aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/filesystems/ubifs.txt26
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/i2c/writing-clients2
-rw-r--r--Documentation/input/elantech.txt123
-rw-r--r--Documentation/input/rotary-encoder.txt13
-rw-r--r--Documentation/kbuild/kbuild.txt13
-rw-r--r--Documentation/kbuild/makefiles.txt53
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile50
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h4
-rw-r--r--arch/arm/mach-ux500/board-mop500.c14
-rw-r--r--arch/arm/plat-nomadik/include/plat/i2c.h8
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S2
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/bitops_no.h18
-rw-r--r--arch/m68k/include/asm/io_no.h8
-rw-r--r--arch/m68k/kernel/asm-offsets.c106
-rw-r--r--arch/m68k/kernel/asm-offsets_mm.c100
-rw-r--r--arch/m68k/kernel/asm-offsets_no.c76
-rw-r--r--arch/m68k/kernel/entry_no.S1
-rw-r--r--arch/m68k/kernel/irq.c28
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c36
-rw-r--r--arch/m68k/kernel/m68k_ksyms_mm.c16
-rw-r--r--arch/m68k/kernel/m68k_ksyms_no.c78
-rw-r--r--arch/m68k/kernel/process_no.c2
-rw-r--r--arch/m68k/kernel/sys_m68k.c581
-rw-r--r--arch/m68k/kernel/sys_m68k_mm.c546
-rw-r--r--arch/m68k/kernel/sys_m68k_no.c94
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/lib/Makefile13
-rw-r--r--arch/m68k/lib/Makefile_mm6
-rw-r--r--arch/m68k/lib/Makefile_no7
-rw-r--r--arch/m68k/lib/checksum.c5
-rw-r--r--arch/m68k/lib/checksum_no.c3
-rw-r--r--arch/m68k/lib/memcpy.c128
-rw-r--r--arch/m68k/lib/memmove.c2
-rw-r--r--arch/m68k/lib/memset.c114
-rw-r--r--arch/m68k/lib/muldi3.c99
-rw-r--r--arch/m68k/lib/muldi3_mm.c63
-rw-r--r--arch/m68k/lib/muldi3_no.c86
-rw-r--r--arch/m68k/lib/string.c223
-rw-r--r--arch/m68k/mm/Makefile14
-rw-r--r--arch/m68k/mm/Makefile_mm8
-rw-r--r--arch/m68k/mm/Makefile_no5
-rw-r--r--arch/m68k/mm/init_no.c51
-rw-r--r--arch/m68k/mm/kmap.c368
-rw-r--r--arch/m68k/mm/kmap_mm.c367
-rw-r--r--arch/m68k/mm/kmap_no.c45
-rw-r--r--arch/m68k/platform/68328/entry.S7
-rw-r--r--arch/m68k/platform/68360/entry.S7
-rw-r--r--arch/m68k/platform/coldfire/dma.c3
-rw-r--r--arch/m68k/platform/coldfire/entry.S11
-rw-r--r--arch/m68k/platform/coldfire/head.S1
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/include/asm/cmpxchg.h1
-rw-r--r--arch/s390/include/asm/elf.h12
-rw-r--r--arch/s390/include/asm/hugetlb.h17
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu.h9
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/include/asm/page.h60
-rw-r--r--arch/s390/include/asm/percpu.h68
-rw-r--r--arch/s390/include/asm/pgalloc.h57
-rw-r--r--arch/s390/include/asm/pgtable.h607
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h13
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/process.c19
-rw-r--r--arch/s390/kernel/setup.c31
-rw-r--r--arch/s390/kernel/smp.c30
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c16
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/mm/extmem.c6
-rw-r--r--arch/s390/mm/fault.c187
-rw-r--r--arch/s390/mm/hugetlbpage.c10
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c68
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--arch/s390/oprofile/hwsampler.c21
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/Kconfig.x864
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/include/asm/linkage.h5
-rw-r--r--arch/x86/include/asm/percpu.h7
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
-rw-r--r--drivers/ata/pata_pcmcia.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btuart_cs.c2
-rw-r--r--drivers/bluetooth/dtl1_cs.c2
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/agp/intel-agp.h8
-rw-r--r--drivers/char/agp/intel-gtt.c10
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_edid.c61
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c207
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/drm_modes.c156
-rw-r--r--drivers/gpu/drm/drm_stub.c21
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c131
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h113
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c311
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h35
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2303
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h19
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h35
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h208
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c383
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c510
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c323
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mpeg.c311
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c68
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c442
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c256
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c135
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c226
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc870
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h534
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c169
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h527
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c142
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c600
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c20
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h22
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c132
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c1046
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c8
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c117
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c252
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h19
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/gpu/vga/vgaarb.c113
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c61
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c276
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c10
-rw-r--r--drivers/i2c/busses/i2c-parport.c30
-rw-r--r--drivers/i2c/busses/i2c-parport.h74
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c19
-rw-r--r--drivers/i2c/busses/i2c-tegra.c39
-rw-r--r--drivers/ide/ide-cs.c2
-rw-r--r--drivers/input/evdev.c19
-rw-r--r--drivers/input/input-polldev.c56
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/keyboard/Kconfig22
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c771
-rw-r--r--drivers/input/keyboard/gpio_keys.c11
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c339
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/qt1070.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c53
-rw-r--r--drivers/input/keyboard/tegra-kbc.c60
-rw-r--r--drivers/input/misc/ad714x.c129
-rw-r--r--drivers/input/misc/ati_remote2.c9
-rw-r--r--drivers/input/misc/rotary_encoder.c119
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/mouse/elantech.c72
-rw-r--r--drivers/input/mouse/elantech.h6
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ads7846.c26
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c92
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c2
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c8
-rw-r--r--drivers/input/touchscreen/max11801_ts.c272
-rw-r--r--drivers/input/touchscreen/tsc2007.c26
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c2
-rw-r--r--drivers/isdn/hisax/elsa_cs.c2
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c2
-rw-r--r--drivers/isdn/hisax/teles_cs.c2
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mtd/maps/pcmciamtd.c2
-rw-r--r--drivers/mtd/ubi/cdev.c14
-rw-r--r--drivers/mtd/ubi/debug.c21
-rw-r--r--drivers/mtd/ubi/debug.h161
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h6
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/can/softing/softing_cs.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/com20020_cs.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/wireless/airo_cs.c2
-rw-r--r--drivers/net/wireless/atmel_cs.c4
-rw-r--r--drivers/net/wireless/b43/pcmcia.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/libertas/if_cs.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c2
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/parport/parport_cs.c2
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pcmcia/ds.c6
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/mxm-wmi.c111
-rw-r--r--drivers/s390/block/dasd_alias.c4
-rw-r--r--drivers/s390/block/dasd_eckd.c11
-rw-r--r--drivers/s390/char/Kconfig12
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/monwriter.c4
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.h24
-rw-r--r--drivers/s390/char/sclp_config.c14
-rw-r--r--drivers/s390/char/sclp_ocf.c145
-rw-r--r--drivers/s390/char/sclp_sdias.c3
-rw-r--r--drivers/s390/char/sclp_tty.c122
-rw-r--r--drivers/s390/char/tape_3590.c11
-rw-r--r--drivers/s390/char/tape_block.c444
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/cio/chsc.c35
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c3
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c2
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/spi/coldfire_qspi.c1
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c2
-rw-r--r--drivers/telephony/ixj_pcmcia.c2
-rw-r--r--drivers/tty/ipwireless/main.c2
-rw-r--r--drivers/tty/serial/serial_cs.c2
-rw-r--r--drivers/usb/host/sl811_cs.c2
-rw-r--r--fs/binfmt_flat.c8
-rw-r--r--fs/dlm/config.c9
-rw-r--r--fs/dlm/config.h1
-rw-r--r--fs/dlm/dlm_internal.h3
-rw-r--r--fs/dlm/lock.c182
-rw-r--r--fs/dlm/lock.h1
-rw-r--r--fs/dlm/lockspace.c6
-rw-r--r--fs/dlm/plock.c65
-rw-r--r--fs/dlm/user.c1
-rw-r--r--fs/ext2/super.c3
-rw-r--r--fs/ext3/namei.c80
-rw-r--r--fs/jbd/commit.c15
-rw-r--r--fs/jbd/journal.c16
-rw-r--r--fs/jbd/transaction.c3
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/ioctl.c468
-rw-r--r--fs/ocfs2/move_extents.c1153
-rw-r--r--fs/ocfs2/move_extents.h22
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h68
-rw-r--r--fs/ocfs2/refcounttree.c58
-rw-r--r--fs/ocfs2/refcounttree.h11
-rw-r--r--fs/ubifs/budget.c104
-rw-r--r--fs/ubifs/commit.c2
-rw-r--r--fs/ubifs/debug.c167
-rw-r--r--fs/ubifs/debug.h178
-rw-r--r--fs/ubifs/dir.c4
-rw-r--r--fs/ubifs/file.c28
-rw-r--r--fs/ubifs/find.c10
-rw-r--r--fs/ubifs/gc.c71
-rw-r--r--fs/ubifs/io.c33
-rw-r--r--fs/ubifs/journal.c29
-rw-r--r--fs/ubifs/log.c28
-rw-r--r--fs/ubifs/lprops.c115
-rw-r--r--fs/ubifs/lpt_commit.c55
-rw-r--r--fs/ubifs/master.c8
-rw-r--r--fs/ubifs/misc.h17
-rw-r--r--fs/ubifs/orphan.c3
-rw-r--r--fs/ubifs/recovery.c354
-rw-r--r--fs/ubifs/replay.c468
-rw-r--r--fs/ubifs/sb.c153
-rw-r--r--fs/ubifs/super.c46
-rw-r--r--fs/ubifs/tnc.c10
-rw-r--r--fs/ubifs/tnc_commit.c18
-rw-r--r--fs/ubifs/ubifs-media.h30
-rw-r--r--fs/ubifs/ubifs.h86
-rw-r--r--fs/ubifs/xattr.c8
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/asm-generic/vmlinux.lds.h61
-rw-r--r--include/drm/drmP.h49
-rw-r--r--include/drm/drm_crtc.h6
-rw-r--r--include/drm/drm_dp_helper.h5
-rw-r--r--include/drm/drm_edid.h25
-rw-r--r--include/drm/drm_fb_helper.h16
-rw-r--r--include/linux/capability.h5
-rw-r--r--include/linux/dlm_plock.h6
-rw-r--r--include/linux/gpio_keys.h8
-rw-r--r--include/linux/i2c/i2c-sh_mobile.h10
-rw-r--r--include/linux/i2c/mpr121_touchkey.h20
-rw-r--r--include/linux/i2c/tsc2007.h7
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/input/ad714x.h3
-rw-r--r--include/linux/input/adp5589.h213
-rw-r--r--include/linux/key.h13
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/lsm_audit.h11
-rw-r--r--include/linux/mtd/ubi.h4
-rw-r--r--include/linux/mxm-wmi.h33
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/rotary_encoder.h1
-rw-r--r--include/linux/spi/ads7846.h3
-rw-r--r--include/mtd/ubi-user.h40
-rw-r--r--include/pcmcia/ds.h2
-rw-r--r--init/Kconfig30
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cred.c6
-rw-r--r--kernel/kmod.c100
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/flex_array.c26
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/rmap.c11
-rw-r--r--net/dns_resolver/dns_key.c10
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Kbuild.include12
-rw-r--r--scripts/Makefile7
-rw-r--r--scripts/Makefile.asm-generic23
-rw-r--r--scripts/Makefile.build74
-rw-r--r--scripts/Makefile.headersinst10
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/basic/.gitignore2
-rw-r--r--scripts/basic/Makefile3
-rw-r--r--scripts/docproc.c (renamed from scripts/basic/docproc.c)0
-rw-r--r--scripts/gen_initramfs_list.sh27
-rw-r--r--scripts/kallsyms.c2
-rwxr-xr-xscripts/mkcompile_h30
-rw-r--r--security/Kconfig1
-rw-r--r--security/commoncap.c13
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/keyctl.c6
-rw-r--r--security/keys/keyring.c37
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/keys/process_keys.c12
-rw-r--r--security/keys/request_key.c3
-rw-r--r--security/keys/request_key_auth.c3
-rw-r--r--security/keys/user_defined.c4
-rw-r--r--security/lsm_audit.c59
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c92
-rw-r--r--security/selinux/include/security.h9
-rw-r--r--security/selinux/netnode.c1
-rw-r--r--security/selinux/selinuxfs.c28
-rw-r--r--security/selinux/ss/policydb.c244
-rw-r--r--security/selinux/ss/policydb.h12
-rw-r--r--security/selinux/ss/services.c72
-rw-r--r--security/smack/smack.h11
-rw-r--r--security/smack/smack_lsm.c48
-rw-r--r--security/tomoyo/common.c17
-rw-r--r--security/tomoyo/file.c1
-rw-r--r--security/tomoyo/memory.c1
-rw-r--r--security/tomoyo/mount.c1
-rw-r--r--security/tomoyo/util.c2
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c2
-rw-r--r--sound/pcmcia/vx/vxpocket.c2
-rw-r--r--usr/gen_init_cpio.c53
457 files changed, 18478 insertions, 9570 deletions
diff --git a/.gitignore b/.gitignore
index 5d56a3fd0de6..9dacde0a4b2d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,6 +57,7 @@ modules.builtin
57include/config 57include/config
58include/linux/version.h 58include/linux/version.h
59include/generated 59include/generated
60arch/*/include/generated
60 61
61# stgit generated dirs 62# stgit generated dirs
62patches-* 63patches-*
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 8436b018c289..3cebfa0d1611 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -73,7 +73,7 @@ installmandocs: mandocs
73### 73###
74#External programs used 74#External programs used
75KERNELDOC = $(srctree)/scripts/kernel-doc 75KERNELDOC = $(srctree)/scripts/kernel-doc
76DOCPROC = $(objtree)/scripts/basic/docproc 76DOCPROC = $(objtree)/scripts/docproc
77 77
78XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl 78XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
79XMLTOFLAGS += --skip-validation 79XMLTOFLAGS += --skip-validation
diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt
index d7b13b01e980..8e4fab639d9c 100644
--- a/Documentation/filesystems/ubifs.txt
+++ b/Documentation/filesystems/ubifs.txt
@@ -115,28 +115,8 @@ ubi.mtd=0 root=ubi0:rootfs rootfstype=ubifs
115Module Parameters for Debugging 115Module Parameters for Debugging
116=============================== 116===============================
117 117
118When UBIFS has been compiled with debugging enabled, there are 3 module 118When UBIFS has been compiled with debugging enabled, there are 2 module
119parameters that are available to control aspects of testing and debugging. 119parameters that are available to control aspects of testing and debugging.
120The parameters are unsigned integers where each bit controls an option.
121The parameters are:
122
123debug_msgs Selects which debug messages to display, as follows:
124
125 Message Type Flag value
126
127 General messages 1
128 Journal messages 2
129 Mount messages 4
130 Commit messages 8
131 LEB search messages 16
132 Budgeting messages 32
133 Garbage collection messages 64
134 Tree Node Cache (TNC) messages 128
135 LEB properties (lprops) messages 256
136 Input/output messages 512
137 Log messages 1024
138 Scan messages 2048
139 Recovery messages 4096
140 120
141debug_chks Selects extra checks that UBIFS can do while running: 121debug_chks Selects extra checks that UBIFS can do while running:
142 122
@@ -154,11 +134,9 @@ debug_tsts Selects a mode of testing, as follows:
154 134
155 Test mode Flag value 135 Test mode Flag value
156 136
157 Force in-the-gaps method 2
158 Failure mode for recovery testing 4 137 Failure mode for recovery testing 4
159 138
160For example, set debug_msgs to 5 to display General messages and Mount 139For example, set debug_chks to 3 to enable general and TNC checks.
161messages.
162 140
163 141
164References 142References
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 6df69765ccb7..2871fd500349 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -19,6 +19,7 @@ Supported adapters:
19 * Intel 6 Series (PCH) 19 * Intel 6 Series (PCH)
20 * Intel Patsburg (PCH) 20 * Intel Patsburg (PCH)
21 * Intel DH89xxCC (PCH) 21 * Intel DH89xxCC (PCH)
22 * Intel Panther Point (PCH)
22 Datasheets: Publicly available at the Intel website 23 Datasheets: Publicly available at the Intel website
23 24
24On Intel Patsburg and later chipsets, both the normal host SMBus controller 25On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 5ebf5af1d716..5aa53374ea2a 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -38,7 +38,7 @@ static struct i2c_driver foo_driver = {
38 .name = "foo", 38 .name = "foo",
39 }, 39 },
40 40
41 .id_table = foo_ids, 41 .id_table = foo_idtable,
42 .probe = foo_probe, 42 .probe = foo_probe,
43 .remove = foo_remove, 43 .remove = foo_remove,
44 /* if device autodetection is needed: */ 44 /* if device autodetection is needed: */
diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
index 56941ae1f5db..db798af5ef98 100644
--- a/Documentation/input/elantech.txt
+++ b/Documentation/input/elantech.txt
@@ -34,7 +34,8 @@ Contents
34Currently the Linux Elantech touchpad driver is aware of two different 34Currently the Linux Elantech touchpad driver is aware of two different
35hardware versions unimaginatively called version 1 and version 2. Version 1 35hardware versions unimaginatively called version 1 and version 2. Version 1
36is found in "older" laptops and uses 4 bytes per packet. Version 2 seems to 36is found in "older" laptops and uses 4 bytes per packet. Version 2 seems to
37be introduced with the EeePC and uses 6 bytes per packet. 37be introduced with the EeePC and uses 6 bytes per packet, and provides
38additional features such as position of two fingers, and width of the touch.
38 39
39The driver tries to support both hardware versions and should be compatible 40The driver tries to support both hardware versions and should be compatible
40with the Xorg Synaptics touchpad driver and its graphical configuration 41with the Xorg Synaptics touchpad driver and its graphical configuration
@@ -94,18 +95,44 @@ Currently the Linux Elantech touchpad driver provides two extra knobs under
94 can check these bits and reject any packet that appears corrupted. Using 95 can check these bits and reject any packet that appears corrupted. Using
95 this knob you can bypass that check. 96 this knob you can bypass that check.
96 97
97 It is not known yet whether hardware version 2 provides the same parity 98 Hardware version 2 does not provide the same parity bits. Only some basic
98 bits. Hence checking is disabled by default. Currently even turning it on 99 data consistency checking can be done. For now checking is disabled by
99 will do nothing. 100 default. Currently even turning it on will do nothing.
100
101 101
102///////////////////////////////////////////////////////////////////////////// 102/////////////////////////////////////////////////////////////////////////////
103 103
1043. Differentiating hardware versions
105 =================================
106
107To detect the hardware version, read the version number as param[0].param[1].param[2]
108
109 4 bytes version: (after the arrow is the name given in the Dell-provided driver)
110 02.00.22 => EF013
111 02.06.00 => EF019
112In the wild, there appear to be more versions, such as 00.01.64, 01.00.21,
11302.00.00, 02.00.04, 02.00.06.
114
115 6 bytes:
116 02.00.30 => EF113
117 02.08.00 => EF023
118 02.08.XX => EF123
119 02.0B.00 => EF215
120 04.01.XX => Scroll_EF051
121 04.02.XX => EF051
122In the wild, there appear to be more versions, such as 04.03.01, 04.04.11. There
123appears to be almost no difference, except for EF113, which does not report
124pressure/width and has different data consistency checks.
125
126Probably all the versions with param[0] <= 01 can be considered as
1274 bytes/firmware 1. The versions < 02.08.00, with the exception of 02.00.30, as
1284 bytes/firmware 2. Everything >= 02.08.00 can be considered as 6 bytes.
129
130/////////////////////////////////////////////////////////////////////////////
104 131
1053. Hardware version 1 1324. Hardware version 1
106 ================== 133 ==================
107 134
1083.1 Registers 1354.1 Registers
109 ~~~~~~~~~ 136 ~~~~~~~~~
110 137
111By echoing a hexadecimal value to a register it contents can be altered. 138By echoing a hexadecimal value to a register it contents can be altered.
@@ -168,7 +195,7 @@ For example:
168 smart edge activation area width? 195 smart edge activation area width?
169 196
170 197
1713.2 Native relative mode 4 byte packet format 1984.2 Native relative mode 4 byte packet format
172 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 199 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
173 200
174byte 0: 201byte 0:
@@ -226,9 +253,13 @@ byte 3:
226 positive = down 253 positive = down
227 254
228 255
2293.3 Native absolute mode 4 byte packet format 2564.3 Native absolute mode 4 byte packet format
230 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 257 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
231 258
259EF013 and EF019 have a special behaviour (due to a bug in the firmware?), and
260when 1 finger is touching, the first 2 position reports must be discarded.
261This counting is reset whenever a different number of fingers is reported.
262
232byte 0: 263byte 0:
233 firmware version 1.x: 264 firmware version 1.x:
234 265
@@ -279,11 +310,11 @@ byte 3:
279///////////////////////////////////////////////////////////////////////////// 310/////////////////////////////////////////////////////////////////////////////
280 311
281 312
2824. Hardware version 2 3135. Hardware version 2
283 ================== 314 ==================
284 315
285 316
2864.1 Registers 3175.1 Registers
287 ~~~~~~~~~ 318 ~~~~~~~~~
288 319
289By echoing a hexadecimal value to a register it contents can be altered. 320By echoing a hexadecimal value to a register it contents can be altered.
@@ -316,16 +347,41 @@ For example:
316 0x7f = never i.e. tap again to release) 347 0x7f = never i.e. tap again to release)
317 348
318 349
3194.2 Native absolute mode 6 byte packet format 3505.2 Native absolute mode 6 byte packet format
320 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 351 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
321 3525.2.1 Parity checking and packet re-synchronization
3224.2.1 One finger touch 353There is no parity checking, however some consistency checks can be performed.
354
355For instance for EF113:
356 SA1= packet[0];
357 A1 = packet[1];
358 B1 = packet[2];
359 SB1= packet[3];
360 C1 = packet[4];
361 D1 = packet[5];
362 if( (((SA1 & 0x3C) != 0x3C) && ((SA1 & 0xC0) != 0x80)) || // check Byte 1
363 (((SA1 & 0x0C) != 0x0C) && ((SA1 & 0xC0) == 0x80)) || // check Byte 1 (one finger pressed)
364 (((SA1 & 0xC0) != 0x80) && (( A1 & 0xF0) != 0x00)) || // check Byte 2
365 (((SB1 & 0x3E) != 0x38) && ((SA1 & 0xC0) != 0x80)) || // check Byte 4
366 (((SB1 & 0x0E) != 0x08) && ((SA1 & 0xC0) == 0x80)) || // check Byte 4 (one finger pressed)
367 (((SA1 & 0xC0) != 0x80) && (( C1 & 0xF0) != 0x00)) ) // check Byte 5
368 // error detected
369
370For all the other ones, there are just a few constant bits:
371 if( ((packet[0] & 0x0C) != 0x04) ||
372 ((packet[3] & 0x0f) != 0x02) )
373 // error detected
374
375
376In case an error is detected, all the packets are shifted by one (and packet[0] is discarded).
377
3785.2.1 One/Three finger touch
323 ~~~~~~~~~~~~~~~~ 379 ~~~~~~~~~~~~~~~~
324 380
325byte 0: 381byte 0:
326 382
327 bit 7 6 5 4 3 2 1 0 383 bit 7 6 5 4 3 2 1 0
328 n1 n0 . . . . R L 384 n1 n0 w3 w2 . . R L
329 385
330 L, R = 1 when Left, Right mouse button pressed 386 L, R = 1 when Left, Right mouse button pressed
331 n1..n0 = numbers of fingers on touchpad 387 n1..n0 = numbers of fingers on touchpad
@@ -333,24 +389,40 @@ byte 0:
333byte 1: 389byte 1:
334 390
335 bit 7 6 5 4 3 2 1 0 391 bit 7 6 5 4 3 2 1 0
336 . . . . . x10 x9 x8 392 p7 p6 p5 p4 . x10 x9 x8
337 393
338byte 2: 394byte 2:
339 395
340 bit 7 6 5 4 3 2 1 0 396 bit 7 6 5 4 3 2 1 0
341 x7 x6 x5 x4 x4 x2 x1 x0 397 x7 x6 x5 x4 x3 x2 x1 x0
342 398
343 x10..x0 = absolute x value (horizontal) 399 x10..x0 = absolute x value (horizontal)
344 400
345byte 3: 401byte 3:
346 402
347 bit 7 6 5 4 3 2 1 0 403 bit 7 6 5 4 3 2 1 0
348 . . . . . . . . 404 n4 vf w1 w0 . . . b2
405
406 n4 = set if more than 3 fingers (only in 3 fingers mode)
407 vf = a kind of flag ? (only on EF123, 0 when finger is over one
408 of the buttons, 1 otherwise)
409 w3..w0 = width of the finger touch (not EF113)
410 b2 (on EF113 only, 0 otherwise), b2.R.L indicates one button pressed:
411 0 = none
412 1 = Left
413 2 = Right
414 3 = Middle (Left and Right)
415 4 = Forward
416 5 = Back
417 6 = Another one
418 7 = Another one
349 419
350byte 4: 420byte 4:
351 421
352 bit 7 6 5 4 3 2 1 0 422 bit 7 6 5 4 3 2 1 0
353 . . . . . . y9 y8 423 p3 p1 p2 p0 . . y9 y8
424
425 p7..p0 = pressure (not EF113)
354 426
355byte 5: 427byte 5:
356 428
@@ -363,6 +435,11 @@ byte 5:
3634.2.2 Two finger touch 4354.2.2 Two finger touch
364 ~~~~~~~~~~~~~~~~ 436 ~~~~~~~~~~~~~~~~
365 437
438Note that the two pairs of coordinates are not exactly the coordinates of the
439two fingers, but only the pair of the lower-left and upper-right coordinates.
440So the actual fingers might be situated on the other diagonal of the square
441defined by these two points.
442
366byte 0: 443byte 0:
367 444
368 bit 7 6 5 4 3 2 1 0 445 bit 7 6 5 4 3 2 1 0
@@ -376,14 +453,14 @@ byte 1:
376 bit 7 6 5 4 3 2 1 0 453 bit 7 6 5 4 3 2 1 0
377 ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0 454 ax7 ax6 ax5 ax4 ax3 ax2 ax1 ax0
378 455
379 ax8..ax0 = first finger absolute x value 456 ax8..ax0 = lower-left finger absolute x value
380 457
381byte 2: 458byte 2:
382 459
383 bit 7 6 5 4 3 2 1 0 460 bit 7 6 5 4 3 2 1 0
384 ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0 461 ay7 ay6 ay5 ay4 ay3 ay2 ay1 ay0
385 462
386 ay8..ay0 = first finger absolute y value 463 ay8..ay0 = lower-left finger absolute y value
387 464
388byte 3: 465byte 3:
389 466
@@ -395,11 +472,11 @@ byte 4:
395 bit 7 6 5 4 3 2 1 0 472 bit 7 6 5 4 3 2 1 0
396 bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0 473 bx7 bx6 bx5 bx4 bx3 bx2 bx1 bx0
397 474
398 bx8..bx0 = second finger absolute x value 475 bx8..bx0 = upper-right finger absolute x value
399 476
400byte 5: 477byte 5:
401 478
402 bit 7 6 5 4 3 2 1 0 479 bit 7 6 5 4 3 2 1 0
403 by7 by8 by5 by4 by3 by2 by1 by0 480 by7 by8 by5 by4 by3 by2 by1 by0
404 481
405 by8..by0 = second finger absolute y value 482 by8..by0 = upper-right finger absolute y value
diff --git a/Documentation/input/rotary-encoder.txt b/Documentation/input/rotary-encoder.txt
index 943e8f6f2b15..92e68bce13a4 100644
--- a/Documentation/input/rotary-encoder.txt
+++ b/Documentation/input/rotary-encoder.txt
@@ -9,6 +9,9 @@ peripherals with two wires. The outputs are phase-shifted by 90 degrees
9and by triggering on falling and rising edges, the turn direction can 9and by triggering on falling and rising edges, the turn direction can
10be determined. 10be determined.
11 11
12Some encoders have both outputs low in stable states, whereas others also have
13a stable state with both outputs high (half-period mode).
14
12The phase diagram of these two outputs look like this: 15The phase diagram of these two outputs look like this:
13 16
14 _____ _____ _____ 17 _____ _____ _____
@@ -26,6 +29,8 @@ The phase diagram of these two outputs look like this:
26 |<-------->| 29 |<-------->|
27 one step 30 one step
28 31
32 |<-->|
33 one step (half-period mode)
29 34
30For more information, please see 35For more information, please see
31 http://en.wikipedia.org/wiki/Rotary_encoder 36 http://en.wikipedia.org/wiki/Rotary_encoder
@@ -34,6 +39,13 @@ For more information, please see
341. Events / state machine 391. Events / state machine
35------------------------- 40-------------------------
36 41
42In half-period mode, state a) and c) above are used to determine the
43rotational direction based on the last stable state. Events are reported in
44states b) and d) given that the new stable state is different from the last
45(i.e. the rotation was not reversed half-way).
46
47Otherwise, the following apply:
48
37a) Rising edge on channel A, channel B in low state 49a) Rising edge on channel A, channel B in low state
38 This state is used to recognize a clockwise turn 50 This state is used to recognize a clockwise turn
39 51
@@ -96,6 +108,7 @@ static struct rotary_encoder_platform_data my_rotary_encoder_info = {
96 .gpio_b = GPIO_ROTARY_B, 108 .gpio_b = GPIO_ROTARY_B,
97 .inverted_a = 0, 109 .inverted_a = 0,
98 .inverted_b = 0, 110 .inverted_b = 0,
111 .half_period = false,
99}; 112};
100 113
101static struct platform_device rotary_encoder_device = { 114static struct platform_device rotary_encoder_device = {
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 7c2a89ba674c..68e32bb6bd80 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -201,3 +201,16 @@ KBUILD_ENABLE_EXTRA_GCC_CHECKS
201-------------------------------------------------- 201--------------------------------------------------
202If enabled over the make command line with "W=1", it turns on additional 202If enabled over the make command line with "W=1", it turns on additional
203gcc -W... options for more extensive build-time checking. 203gcc -W... options for more extensive build-time checking.
204
205KBUILD_BUILD_TIMESTAMP
206--------------------------------------------------
207Setting this to a date string overrides the timestamp used in the
208UTS_VERSION definition (uname -v in the running kernel). The value has to
209be a string that can be passed to date -d. The default value
210is the output of the date command at one point during build.
211
212KBUILD_BUILD_USER, KBUILD_BUILD_HOST
213--------------------------------------------------
214These two variables allow to override the user@host string displayed during
215boot and in /proc/version. The default value is the output of the commands
216whoami and host, respectively.
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 5d145bb443c0..47435e56c5da 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -40,11 +40,13 @@ This document describes the Linux kernel Makefiles.
40 --- 6.6 Commands useful for building a boot image 40 --- 6.6 Commands useful for building a boot image
41 --- 6.7 Custom kbuild commands 41 --- 6.7 Custom kbuild commands
42 --- 6.8 Preprocessing linker scripts 42 --- 6.8 Preprocessing linker scripts
43 --- 6.9 Generic header files
43 44
44 === 7 Kbuild syntax for exported headers 45 === 7 Kbuild syntax for exported headers
45 --- 7.1 header-y 46 --- 7.1 header-y
46 --- 7.2 objhdr-y 47 --- 7.2 objhdr-y
47 --- 7.3 destination-y 48 --- 7.3 destination-y
49 --- 7.4 generic-y
48 50
49 === 8 Kbuild Variables 51 === 8 Kbuild Variables
50 === 9 Makefile language 52 === 9 Makefile language
@@ -499,6 +501,18 @@ more details, with real examples.
499 gcc >= 3.00. For gcc < 3.00, -malign-functions=4 is used. 501 gcc >= 3.00. For gcc < 3.00, -malign-functions=4 is used.
500 Note: cc-option-align uses KBUILD_CFLAGS for $(CC) options 502 Note: cc-option-align uses KBUILD_CFLAGS for $(CC) options
501 503
504 cc-disable-warning
505 cc-disable-warning checks if gcc supports a given warning and returns
506 the commandline switch to disable it. This special function is needed,
507 because gcc 4.4 and later accept any unknown -Wno-* option and only
508 warn about it if there is another warning in the source file.
509
510 Example:
511 KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
512
513 In the above example, -Wno-unused-but-set-variable will be added to
514 KBUILD_CFLAGS only if gcc really accepts it.
515
502 cc-version 516 cc-version
503 cc-version returns a numerical version of the $(CC) compiler version. 517 cc-version returns a numerical version of the $(CC) compiler version.
504 The format is <major><minor> where both are two digits. So for example 518 The format is <major><minor> where both are two digits. So for example
@@ -955,6 +969,11 @@ When kbuild executes, the following steps are followed (roughly):
955 used when linking modules. This is often a linker script. 969 used when linking modules. This is often a linker script.
956 From commandline LDFLAGS_MODULE shall be used (see kbuild.txt). 970 From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
957 971
972 KBUILD_ARFLAGS Options for $(AR) when creating archives
973
974 $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
975 mode) if this option is supported by $(AR).
976
958--- 6.2 Add prerequisites to archprepare: 977--- 6.2 Add prerequisites to archprepare:
959 978
960 The archprepare: rule is used to list prerequisites that need to be 979 The archprepare: rule is used to list prerequisites that need to be
@@ -1209,6 +1228,14 @@ When kbuild executes, the following steps are followed (roughly):
1209 The kbuild infrastructure for *lds file are used in several 1228 The kbuild infrastructure for *lds file are used in several
1210 architecture-specific files. 1229 architecture-specific files.
1211 1230
1231--- 6.9 Generic header files
1232
1233 The directory include/asm-generic contains the header files
1234 that may be shared between individual architectures.
1235 The recommended approach how to use a generic header file is
1236 to list the file in the Kbuild file.
1237 See "7.4 generic-y" for further info on syntax etc.
1238
1212=== 7 Kbuild syntax for exported headers 1239=== 7 Kbuild syntax for exported headers
1213 1240
1214The kernel include a set of headers that is exported to userspace. 1241The kernel include a set of headers that is exported to userspace.
@@ -1265,6 +1292,32 @@ See subsequent chapter for the syntax of the Kbuild file.
1265 In the example above all exported headers in the Kbuild file 1292 In the example above all exported headers in the Kbuild file
1266 will be located in the directory "include/linux" when exported. 1293 will be located in the directory "include/linux" when exported.
1267 1294
1295 --- 7.4 generic-y
1296
1297 If an architecture uses a verbatim copy of a header from
1298 include/asm-generic then this is listed in the file
1299 arch/$(ARCH)/include/asm/Kbuild like this:
1300
1301 Example:
1302 #arch/x86/include/asm/Kbuild
1303 generic-y += termios.h
1304 generic-y += rtc.h
1305
1306 During the prepare phase of the build a wrapper include
1307 file is generated in the directory:
1308
1309 arch/$(ARCH)/include/generated/asm
1310
1311 When a header is exported where the architecture uses
1312 the generic header a similar wrapper is generated as part
1313 of the set of exported headers in the directory:
1314
1315 usr/include/asm
1316
1317 The generated wrapper will in both cases look like the following:
1318
1319 Example: termios.h
1320 #include <asm-generic/termios.h>
1268 1321
1269=== 8 Kbuild Variables 1322=== 8 Kbuild Variables
1270 1323
diff --git a/MAINTAINERS b/MAINTAINERS
index 0b415248ae25..98c324b07a16 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2245,10 +2245,10 @@ F: drivers/gpu/drm/
2245F: include/drm/ 2245F: include/drm/
2246 2246
2247INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2247INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
2248M: Chris Wilson <chris@chris-wilson.co.uk> 2248M: Keith Packard <keithp@keithp.com>
2249L: intel-gfx@lists.freedesktop.org (subscribers-only) 2249L: intel-gfx@lists.freedesktop.org (subscribers-only)
2250L: dri-devel@lists.freedesktop.org 2250L: dri-devel@lists.freedesktop.org
2251T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git 2251T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6.git
2252S: Supported 2252S: Supported
2253F: drivers/gpu/drm/i915 2253F: drivers/gpu/drm/i915
2254F: include/drm/i915* 2254F: include/drm/i915*
@@ -5592,10 +5592,11 @@ M: James Morris <jmorris@namei.org>
5592M: Eric Paris <eparis@parisplace.org> 5592M: Eric Paris <eparis@parisplace.org>
5593L: selinux@tycho.nsa.gov (subscribers-only, general discussion) 5593L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
5594W: http://selinuxproject.org 5594W: http://selinuxproject.org
5595T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git 5595T: git git://git.infradead.org/users/eparis/selinux.git
5596S: Supported 5596S: Supported
5597F: include/linux/selinux* 5597F: include/linux/selinux*
5598F: security/selinux/ 5598F: security/selinux/
5599F: scripts/selinux/
5599 5600
5600APPARMOR SECURITY MODULE 5601APPARMOR SECURITY MODULE
5601M: John Johansen <john.johansen@canonical.com> 5602M: John Johansen <john.johansen@canonical.com>
diff --git a/Makefile b/Makefile
index a0344a81a893..6b73d1eed1ea 100644
--- a/Makefile
+++ b/Makefile
@@ -103,7 +103,7 @@ ifeq ("$(origin O)", "command line")
103endif 103endif
104 104
105ifeq ("$(origin W)", "command line") 105ifeq ("$(origin W)", "command line")
106 export KBUILD_ENABLE_EXTRA_GCC_CHECKS := 1 106 export KBUILD_ENABLE_EXTRA_GCC_CHECKS := $(W)
107endif 107endif
108 108
109# That's our default target when none is given on the command line 109# That's our default target when none is given on the command line
@@ -349,7 +349,8 @@ CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
349 349
350# Use LINUXINCLUDE when you must reference the include/ directory. 350# Use LINUXINCLUDE when you must reference the include/ directory.
351# Needed to be compatible with the O= option 351# Needed to be compatible with the O= option
352LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include -Iinclude \ 352LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include \
353 -Iarch/$(hdr-arch)/include/generated -Iinclude \
353 $(if $(KBUILD_SRC), -I$(srctree)/include) \ 354 $(if $(KBUILD_SRC), -I$(srctree)/include) \
354 -include include/generated/autoconf.h 355 -include include/generated/autoconf.h
355 356
@@ -382,6 +383,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
382export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE 383export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
383export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE 384export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
384export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL 385export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
386export KBUILD_ARFLAGS
385 387
386# When compiling out-of-tree modules, put MODVERDIR in the module 388# When compiling out-of-tree modules, put MODVERDIR in the module
387# tree rather than in the kernel tree. The kernel tree might 389# tree rather than in the kernel tree. The kernel tree might
@@ -416,6 +418,12 @@ ifneq ($(KBUILD_SRC),)
416 $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) 418 $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
417endif 419endif
418 420
421# Support for using generic headers in asm-generic
422PHONY += asm-generic
423asm-generic:
424 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
425 obj=arch/$(SRCARCH)/include/generated/asm
426
419# To make sure we do not include .config for any of the *config targets 427# To make sure we do not include .config for any of the *config targets
420# catch them early, and hand them over to scripts/kconfig/Makefile 428# catch them early, and hand them over to scripts/kconfig/Makefile
421# It is allowed to specify more targets when calling make, including 429# It is allowed to specify more targets when calling make, including
@@ -559,6 +567,10 @@ ifndef CONFIG_CC_STACKPROTECTOR
559KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) 567KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
560endif 568endif
561 569
570# This warning generated too much noise in a regular build.
571# Use make W=1 to enable this warning (see scripts/Makefile.build)
572KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
573
562ifdef CONFIG_FRAME_POINTER 574ifdef CONFIG_FRAME_POINTER
563KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 575KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
564else 576else
@@ -604,7 +616,7 @@ CHECKFLAGS += $(NOSTDINC_FLAGS)
604KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) 616KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
605 617
606# disable pointer signed / unsigned warnings in gcc 4.0 618# disable pointer signed / unsigned warnings in gcc 4.0
607KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,) 619KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
608 620
609# disable invalid "can't wrap" optimizations for signed / pointers 621# disable invalid "can't wrap" optimizations for signed / pointers
610KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) 622KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
@@ -612,6 +624,9 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
612# conserve stack if available 624# conserve stack if available
613KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) 625KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
614 626
627# use the deterministic mode of AR if available
628KBUILD_ARFLAGS := $(call ar-option,D)
629
615# check for 'asm goto' 630# check for 'asm goto'
616ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) 631ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
617 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO 632 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
@@ -797,15 +812,17 @@ ifdef CONFIG_KALLSYMS
797# o The correct .tmp_kallsyms2.o is linked into the final vmlinux. 812# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
798# o Verify that the System.map from vmlinux matches the map from 813# o Verify that the System.map from vmlinux matches the map from
799# .tmp_vmlinux2, just in case we did not generate kallsyms correctly. 814# .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
800# o If CONFIG_KALLSYMS_EXTRA_PASS is set, do an extra pass using 815# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
801# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a 816# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a
802# temporary bypass to allow the kernel to be built while the 817# temporary bypass to allow the kernel to be built while the
803# maintainers work out what went wrong with kallsyms. 818# maintainers work out what went wrong with kallsyms.
804 819
805ifdef CONFIG_KALLSYMS_EXTRA_PASS
806last_kallsyms := 3
807else
808last_kallsyms := 2 820last_kallsyms := 2
821
822ifdef KALLSYMS_EXTRA_PASS
823ifneq ($(KALLSYMS_EXTRA_PASS),0)
824last_kallsyms := 3
825endif
809endif 826endif
810 827
811kallsyms.o := .tmp_kallsyms$(last_kallsyms).o 828kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
@@ -816,7 +833,8 @@ define verify_kallsyms
816 $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map 833 $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
817 $(Q)cmp -s System.map .tmp_System.map || \ 834 $(Q)cmp -s System.map .tmp_System.map || \
818 (echo Inconsistent kallsyms data; \ 835 (echo Inconsistent kallsyms data; \
819 echo Try setting CONFIG_KALLSYMS_EXTRA_PASS; \ 836 echo This is a bug - please report about it; \
837 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround; \
820 rm .tmp_kallsyms* ; /bin/false ) 838 rm .tmp_kallsyms* ; /bin/false )
821endef 839endef
822 840
@@ -947,7 +965,7 @@ ifneq ($(KBUILD_SRC),)
947endif 965endif
948 966
949# prepare2 creates a makefile if using a separate output directory 967# prepare2 creates a makefile if using a separate output directory
950prepare2: prepare3 outputmakefile 968prepare2: prepare3 outputmakefile asm-generic
951 969
952prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \ 970prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
953 include/config/auto.conf 971 include/config/auto.conf
@@ -1021,7 +1039,7 @@ hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
1021hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm) 1039hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
1022 1040
1023PHONY += __headers 1041PHONY += __headers
1024__headers: include/linux/version.h scripts_basic FORCE 1042__headers: include/linux/version.h scripts_basic asm-generic FORCE
1025 $(Q)$(MAKE) $(build)=scripts build_unifdef 1043 $(Q)$(MAKE) $(build)=scripts build_unifdef
1026 1044
1027PHONY += headers_install_all 1045PHONY += headers_install_all
@@ -1136,7 +1154,8 @@ CLEAN_FILES += vmlinux System.map \
1136 .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map 1154 .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
1137 1155
1138# Directories & files removed with 'make mrproper' 1156# Directories & files removed with 'make mrproper'
1139MRPROPER_DIRS += include/config usr/include include/generated 1157MRPROPER_DIRS += include/config usr/include include/generated \
1158 arch/*/include/generated
1140MRPROPER_FILES += .config .config.old .version .old_version \ 1159MRPROPER_FILES += .config .config.old .version .old_version \
1141 include/linux/version.h \ 1160 include/linux/version.h \
1142 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS 1161 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
@@ -1267,7 +1286,11 @@ help:
1267 @echo ' make O=dir [targets] Locate all output files in "dir", including .config' 1286 @echo ' make O=dir [targets] Locate all output files in "dir", including .config'
1268 @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' 1287 @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)'
1269 @echo ' make C=2 [targets] Force check of all c source with $$CHECK' 1288 @echo ' make C=2 [targets] Force check of all c source with $$CHECK'
1270 @echo ' make W=1 [targets] Enable extra gcc checks' 1289 @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
1290 @echo ' 1: warnings which may be relevant and do not occur too often'
1291 @echo ' 2: warnings which occur quite often but may still be relevant'
1292 @echo ' 3: more obscure warnings, can most likely be ignored'
1293 @echo ' Multiple levels can be combined with W=12 or W=123'
1271 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' 1294 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
1272 @echo '' 1295 @echo ''
1273 @echo 'Execute "make" or "make all" to build all targets marked with [*] ' 1296 @echo 'Execute "make" or "make all" to build all targets marked with [*] '
@@ -1291,6 +1314,7 @@ $(help-board-dirs): help-%:
1291# Documentation targets 1314# Documentation targets
1292# --------------------------------------------------------------------------- 1315# ---------------------------------------------------------------------------
1293%docs: scripts_basic FORCE 1316%docs: scripts_basic FORCE
1317 $(Q)$(MAKE) $(build)=scripts build_docproc
1294 $(Q)$(MAKE) $(build)=Documentation/DocBook $@ 1318 $(Q)$(MAKE) $(build)=Documentation/DocBook $@
1295 1319
1296else # KBUILD_EXTMOD 1320else # KBUILD_EXTMOD
@@ -1375,7 +1399,7 @@ endif # KBUILD_EXTMOD
1375clean: $(clean-dirs) 1399clean: $(clean-dirs)
1376 $(call cmd,rmdirs) 1400 $(call cmd,rmdirs)
1377 $(call cmd,rmfiles) 1401 $(call cmd,rmfiles)
1378 @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \ 1402 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
1379 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ 1403 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
1380 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ 1404 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
1381 -o -name '*.symtypes' -o -name 'modules.order' \ 1405 -o -name '*.symtypes' -o -name 'modules.order' \
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 3d890a98a08b..f937ad123852 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -39,7 +39,7 @@ SECTIONS
39 __init_begin = ALIGN(PAGE_SIZE); 39 __init_begin = ALIGN(PAGE_SIZE);
40 INIT_TEXT_SECTION(PAGE_SIZE) 40 INIT_TEXT_SECTION(PAGE_SIZE)
41 INIT_DATA_SECTION(16) 41 INIT_DATA_SECTION(16)
42 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 42 PERCPU_SECTION(L1_CACHE_BYTES)
43 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page 43 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
44 needed for the THREAD_SIZE aligned init_task gets freed after init */ 44 needed for the THREAD_SIZE aligned init_task gets freed after init */
45 . = ALIGN(THREAD_SIZE); 45 . = ALIGN(THREAD_SIZE);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b4348e62ef06..e5287f21badc 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -82,7 +82,7 @@ SECTIONS
82#endif 82#endif
83 } 83 }
84 84
85 PERCPU(32, PAGE_SIZE) 85 PERCPU_SECTION(32)
86 86
87#ifndef CONFIG_XIP_KERNEL 87#ifndef CONFIG_XIP_KERNEL
88 . = ALIGN(PAGE_SIZE); 88 . = ALIGN(PAGE_SIZE);
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 04c779832c78..4f3572a1c684 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -50,13 +50,11 @@ struct tegra_kbc_platform_data {
50 unsigned int debounce_cnt; 50 unsigned int debounce_cnt;
51 unsigned int repeat_cnt; 51 unsigned int repeat_cnt;
52 52
53 unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
54 const struct tegra_kbc_wake_key *wake_cfg;
55
56 struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO]; 53 struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
57 const struct matrix_keymap_data *keymap_data; 54 const struct matrix_keymap_data *keymap_data;
58 55
59 bool wakeup; 56 bool wakeup;
60 bool use_fn_map; 57 bool use_fn_map;
58 bool use_ghost_filter;
61}; 59};
62#endif 60#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 6e1907fa94f0..bb26f40493e6 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -204,7 +204,7 @@ static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
204 }, 204 },
205}; 205};
206 206
207#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \ 207#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \
208static struct nmk_i2c_controller u8500_i2c##id##_data = { \ 208static struct nmk_i2c_controller u8500_i2c##id##_data = { \
209 /* \ 209 /* \
210 * slave data setup time, which is \ 210 * slave data setup time, which is \
@@ -219,19 +219,21 @@ static struct nmk_i2c_controller u8500_i2c##id##_data = { \
219 .rft = _rft, \ 219 .rft = _rft, \
220 /* std. mode operation */ \ 220 /* std. mode operation */ \
221 .clk_freq = clk, \ 221 .clk_freq = clk, \
222 /* Slave response timeout(ms) */\
223 .timeout = t_out, \
222 .sm = _sm, \ 224 .sm = _sm, \
223} 225}
224 226
225/* 227/*
226 * The board uses 4 i2c controllers, initialize all of 228 * The board uses 4 i2c controllers, initialize all of
227 * them with slave data setup time of 250 ns, 229 * them with slave data setup time of 250 ns,
228 * Tx & Rx FIFO threshold values as 1 and standard 230 * Tx & Rx FIFO threshold values as 8 and standard
229 * mode of operation 231 * mode of operation
230 */ 232 */
231U8500_I2C_CONTROLLER(0, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD); 233U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
232U8500_I2C_CONTROLLER(1, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD); 234U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
233U8500_I2C_CONTROLLER(2, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD); 235U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
234U8500_I2C_CONTROLLER(3, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD); 236U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
235 237
236static void __init mop500_i2c_init(void) 238static void __init mop500_i2c_init(void)
237{ 239{
diff --git a/arch/arm/plat-nomadik/include/plat/i2c.h b/arch/arm/plat-nomadik/include/plat/i2c.h
index 1621db67a53d..8ba70ffc31ec 100644
--- a/arch/arm/plat-nomadik/include/plat/i2c.h
+++ b/arch/arm/plat-nomadik/include/plat/i2c.h
@@ -11,8 +11,8 @@
11enum i2c_freq_mode { 11enum i2c_freq_mode {
12 I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */ 12 I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */
13 I2C_FREQ_MODE_FAST, /* up to 400 Kb/s */ 13 I2C_FREQ_MODE_FAST, /* up to 400 Kb/s */
14 I2C_FREQ_MODE_HIGH_SPEED, /* up to 3.4 Mb/s */
14 I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */ 15 I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */
15 I2C_FREQ_MODE_HIGH_SPEED /* up to 3.4 Mb/s */
16}; 16};
17 17
18/** 18/**
@@ -24,13 +24,15 @@ enum i2c_freq_mode {
24 * to the values of 14, 6, 2 for a 48 MHz i2c clk 24 * to the values of 14, 6, 2 for a 48 MHz i2c clk
25 * @tft: Tx FIFO Threshold in bytes 25 * @tft: Tx FIFO Threshold in bytes
26 * @rft: Rx FIFO Threshold in bytes 26 * @rft: Rx FIFO Threshold in bytes
27 * @timeout Slave response timeout(ms)
27 * @sm: speed mode 28 * @sm: speed mode
28 */ 29 */
29struct nmk_i2c_controller { 30struct nmk_i2c_controller {
30 unsigned long clk_freq; 31 unsigned long clk_freq;
31 unsigned short slsu; 32 unsigned short slsu;
32 unsigned char tft; 33 unsigned char tft;
33 unsigned char rft; 34 unsigned char rft;
35 int timeout;
34 enum i2c_freq_mode sm; 36 enum i2c_freq_mode sm;
35}; 37};
36 38
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 854fa49f1c3e..8d85c8c6f857 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
136 136
137 . = ALIGN(16); 137 . = ALIGN(16);
138 INIT_DATA_SECTION(16) 138 INIT_DATA_SECTION(16)
139 PERCPU(32, PAGE_SIZE) 139 PERCPU_SECTION(32)
140 140
141 .exit.data : 141 .exit.data :
142 { 142 {
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 728bbd9e7d4c..a6990cb0f098 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -102,7 +102,7 @@ SECTIONS
102#endif 102#endif
103 __vmlinux_end = .; /* Last address of the physical file. */ 103 __vmlinux_end = .; /* Last address of the physical file. */
104#ifdef CONFIG_ETRAX_ARCH_V32 104#ifdef CONFIG_ETRAX_ARCH_V32
105 PERCPU(32, PAGE_SIZE) 105 PERCPU_SECTION(32)
106 106
107 .init.ramfs : { 107 .init.ramfs : {
108 INIT_RAM_FS 108 INIT_RAM_FS
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 0daae8af5787..7e958d829ec9 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -37,7 +37,7 @@ SECTIONS
37 _einittext = .; 37 _einittext = .;
38 38
39 INIT_DATA_SECTION(8) 39 INIT_DATA_SECTION(8)
40 PERCPU(L1_CACHE_BYTES, 4096) 40 PERCPU_SECTION(L1_CACHE_BYTES)
41 41
42 . = ALIGN(PAGE_SIZE); 42 . = ALIGN(PAGE_SIZE);
43 __init_end = .; 43 __init_end = .;
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index cf95aec77460..018e4a711d79 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -54,7 +54,7 @@ SECTIONS
54 __init_begin = .; 54 __init_begin = .;
55 INIT_TEXT_SECTION(PAGE_SIZE) 55 INIT_TEXT_SECTION(PAGE_SIZE)
56 INIT_DATA_SECTION(16) 56 INIT_DATA_SECTION(16)
57 PERCPU(32, PAGE_SIZE) 57 PERCPU_SECTION(32)
58 . = ALIGN(PAGE_SIZE); 58 . = ALIGN(PAGE_SIZE);
59 __init_end = .; 59 __init_end = .;
60 /* freed after init ends here */ 60 /* freed after init ends here */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 75531da02a40..d66e34c718d0 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,6 +5,7 @@ config M68K
5 select HAVE_AOUT if MMU 5 select HAVE_AOUT if MMU
6 select GENERIC_ATOMIC64 if MMU 6 select GENERIC_ATOMIC64 if MMU
7 select HAVE_GENERIC_HARDIRQS if !MMU 7 select HAVE_GENERIC_HARDIRQS if !MMU
8 select GENERIC_IRQ_SHOW if !MMU
8 9
9config RWSEM_GENERIC_SPINLOCK 10config RWSEM_GENERIC_SPINLOCK
10 bool 11 bool
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
index 7d3779fdc5b6..6b0e2d349f0e 100644
--- a/arch/m68k/include/asm/bitops_no.h
+++ b/arch/m68k/include/asm/bitops_no.h
@@ -246,23 +246,7 @@ static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
246 return retval; 246 return retval;
247} 247}
248 248
249#define ext2_set_bit_atomic(lock, nr, addr) \ 249#include <asm-generic/bitops/ext2-atomic.h>
250 ({ \
251 int ret; \
252 spin_lock(lock); \
253 ret = __test_and_set_bit_le((nr), (addr)); \
254 spin_unlock(lock); \
255 ret; \
256 })
257
258#define ext2_clear_bit_atomic(lock, nr, addr) \
259 ({ \
260 int ret; \
261 spin_lock(lock); \
262 ret = __test_and_clear_bit_le((nr), (addr)); \
263 spin_unlock(lock); \
264 ret; \
265 })
266 250
267static inline int test_bit_le(int nr, const volatile void *addr) 251static inline int test_bit_le(int nr, const volatile void *addr)
268{ 252{
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index cf20f3097af6..353bf754a972 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -144,8 +144,10 @@ static inline void io_insl(unsigned int addr, void *buf, int len)
144#define IOMAP_NOCACHE_NONSER 2 144#define IOMAP_NOCACHE_NONSER 2
145#define IOMAP_WRITETHROUGH 3 145#define IOMAP_WRITETHROUGH 3
146 146
147extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); 147static inline void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
148 148{
149 return (void *) physaddr;
150}
149static inline void *ioremap(unsigned long physaddr, unsigned long size) 151static inline void *ioremap(unsigned long physaddr, unsigned long size)
150{ 152{
151 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 153 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
@@ -163,7 +165,7 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size
163 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 165 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
164} 166}
165 167
166extern void iounmap(void *addr); 168#define iounmap(addr) do { } while(0)
167 169
168/* 170/*
169 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 171 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 59a69a5c62f2..983fed9d469b 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -1,5 +1,105 @@
1#ifdef CONFIG_MMU 1/*
2#include "asm-offsets_mm.c" 2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#define ASM_OFFSETS_C
12
13#include <linux/stddef.h>
14#include <linux/sched.h>
15#include <linux/kernel_stat.h>
16#include <linux/kbuild.h>
17#include <asm/bootinfo.h>
18#include <asm/irq.h>
19#include <asm/amigahw.h>
20#include <linux/font.h>
21
22int main(void)
23{
24 /* offsets into the task struct */
25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
26 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
27 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
28 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
29
30 /* offsets into the thread struct */
31 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
32 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
33 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
34 DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
35 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
36 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
37 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
38 DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
39 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
40
41 /* offsets into the thread_info struct */
42 DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
43 DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
44
45 /* offsets into the pt_regs */
46 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
47 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
48 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
49 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
50 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
51 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
52 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
53 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
54 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
55 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
56 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
57 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
58
59 /* bitfields are a bit difficult */
60#ifdef CONFIG_COLDFIRE
61 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
3#else 62#else
4#include "asm-offsets_no.c" 63 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
64#endif
65
66 /* offsets into the irq_cpustat_t struct */
67 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
68
69 /* signal defines */
70 DEFINE(LSIGSEGV, SIGSEGV);
71 DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
72 DEFINE(LSIGTRAP, SIGTRAP);
73 DEFINE(LTRAP_TRACE, TRAP_TRACE);
74
75#ifdef CONFIG_MMU
76 /* offsets into the bi_record struct */
77 DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
78 DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
79 DEFINE(BIR_DATA, offsetof(struct bi_record, data));
80
81 /* offsets into font_desc (drivers/video/console/font.h) */
82 DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
83 DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
84 DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
85 DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
86 DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
87 DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
88
89 /* offsets into the custom struct */
90 DEFINE(CUSTOMBASE, &amiga_custom);
91 DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
92 DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
93 DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
94 DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
95 DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
96 DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
97 DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
98 DEFINE(CIAABASE, &ciaa);
99 DEFINE(CIABBASE, &ciab);
100 DEFINE(C_PRA, offsetof(struct CIA, pra));
101 DEFINE(ZTWOBASE, zTwoBase);
5#endif 102#endif
103
104 return 0;
105}
diff --git a/arch/m68k/kernel/asm-offsets_mm.c b/arch/m68k/kernel/asm-offsets_mm.c
deleted file mode 100644
index 78e59b82ebc3..000000000000
--- a/arch/m68k/kernel/asm-offsets_mm.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#define ASM_OFFSETS_C
12
13#include <linux/stddef.h>
14#include <linux/sched.h>
15#include <linux/kernel_stat.h>
16#include <linux/kbuild.h>
17#include <asm/bootinfo.h>
18#include <asm/irq.h>
19#include <asm/amigahw.h>
20#include <linux/font.h>
21
22int main(void)
23{
24 /* offsets into the task struct */
25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
26 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
27 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
28#ifdef CONFIG_MMU
29 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
30#endif
31
32 /* offsets into the thread struct */
33 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
34 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
35 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
36 DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
37 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
38 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
39 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
40 DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
41 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
42
43 /* offsets into the thread_info struct */
44 DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
45 DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
46
47 /* offsets into the pt_regs */
48 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
49 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
50 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
51 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
52 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
53 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
54 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
55 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
56 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
57 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
58 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
59 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
60 /* bitfields are a bit difficult */
61 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
62
63 /* offsets into the irq_cpustat_t struct */
64 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
65
66 /* offsets into the bi_record struct */
67 DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
68 DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
69 DEFINE(BIR_DATA, offsetof(struct bi_record, data));
70
71 /* offsets into font_desc (drivers/video/console/font.h) */
72 DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
73 DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
74 DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
75 DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
76 DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
77 DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
78
79 /* signal defines */
80 DEFINE(LSIGSEGV, SIGSEGV);
81 DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
82 DEFINE(LSIGTRAP, SIGTRAP);
83 DEFINE(LTRAP_TRACE, TRAP_TRACE);
84
85 /* offsets into the custom struct */
86 DEFINE(CUSTOMBASE, &amiga_custom);
87 DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
88 DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
89 DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
90 DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
91 DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
92 DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
93 DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
94 DEFINE(CIAABASE, &ciaa);
95 DEFINE(CIABBASE, &ciab);
96 DEFINE(C_PRA, offsetof(struct CIA, pra));
97 DEFINE(ZTWOBASE, zTwoBase);
98
99 return 0;
100}
diff --git a/arch/m68k/kernel/asm-offsets_no.c b/arch/m68k/kernel/asm-offsets_no.c
deleted file mode 100644
index ffe02f41ad46..000000000000
--- a/arch/m68k/kernel/asm-offsets_no.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/stddef.h>
12#include <linux/sched.h>
13#include <linux/kernel_stat.h>
14#include <linux/ptrace.h>
15#include <linux/hardirq.h>
16#include <linux/kbuild.h>
17#include <asm/bootinfo.h>
18#include <asm/irq.h>
19#include <asm/thread_info.h>
20
21int main(void)
22{
23 /* offsets into the task struct */
24 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
25 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
26
27 /* offsets into the irq_cpustat_t struct */
28 DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
29
30 /* offsets into the thread struct */
31 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
32 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
33 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
34 DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
35 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
36 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
37 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
38 DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
39 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
40
41 /* offsets into the pt_regs */
42 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
43 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
44 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
45 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
46 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
47 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
48 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
49 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
50 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
51 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
52 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
53 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
54
55#ifdef CONFIG_COLDFIRE
56 /* bitfields are a bit difficult */
57 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
58#else
59 /* bitfields are a bit difficult */
60 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
61#endif
62
63 /* signal defines */
64 DEFINE(SIGSEGV, SIGSEGV);
65 DEFINE(SEGV_MAPERR, SEGV_MAPERR);
66 DEFINE(SIGTRAP, SIGTRAP);
67 DEFINE(TRAP_TRACE, TRAP_TRACE);
68
69 DEFINE(PT_PTRACED, PT_PTRACED);
70
71 /* Offsets in thread_info structure */
72 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
73 DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
74
75 return 0;
76}
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
index 2783f25e38bd..5f0f6b598b5a 100644
--- a/arch/m68k/kernel/entry_no.S
+++ b/arch/m68k/kernel/entry_no.S
@@ -24,7 +24,6 @@
24 * linux 2.4 support David McCullough <davidm@snapgear.com> 24 * linux 2.4 support David McCullough <davidm@snapgear.com>
25 */ 25 */
26 26
27#include <linux/sys.h>
28#include <linux/linkage.h> 27#include <linux/linkage.h>
29#include <asm/errno.h> 28#include <asm/errno.h>
30#include <asm/setup.h> 29#include <asm/setup.h>
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c
index 15dbc3e9d20c..544b8717d499 100644
--- a/arch/m68k/kernel/irq.c
+++ b/arch/m68k/kernel/irq.c
@@ -28,31 +28,3 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
28 28
29 set_irq_regs(oldregs); 29 set_irq_regs(oldregs);
30} 30}
31
32int show_interrupts(struct seq_file *p, void *v)
33{
34 struct irqaction *ap;
35 int irq = *((loff_t *) v);
36
37 if (irq == 0)
38 seq_puts(p, " CPU0\n");
39
40 if (irq < NR_IRQS) {
41 struct irq_desc *desc = irq_to_desc(irq);
42
43 ap = desc->action;
44 if (ap) {
45 seq_printf(p, "%3d: ", irq);
46 seq_printf(p, "%10u ", kstat_irqs(irq));
47 seq_printf(p, "%14s ", irq_desc_get_chip(desc)->name);
48
49 seq_printf(p, "%s", ap->name);
50 for (ap = ap->next; ap; ap = ap->next)
51 seq_printf(p, ", %s", ap->name);
52 seq_putc(p, '\n');
53 }
54 }
55
56 return 0;
57}
58
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 4752c28ce0ac..33f82769547c 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -1,5 +1,33 @@
1#ifdef CONFIG_MMU 1#include <linux/module.h>
2#include "m68k_ksyms_mm.c" 2
3#else 3asmlinkage long long __ashldi3 (long long, int);
4#include "m68k_ksyms_no.c" 4asmlinkage long long __ashrdi3 (long long, int);
5asmlinkage long long __lshrdi3 (long long, int);
6asmlinkage long long __muldi3 (long long, long long);
7
8/* The following are special because they're not called
9 explicitly (the C compiler generates them). Fortunately,
10 their interface isn't gonna change any time soon now, so
11 it's OK to leave it out of version control. */
12EXPORT_SYMBOL(__ashldi3);
13EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3);
16
17#if !defined(__mc68020__) && !defined(__mc68030__) && \
18 !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
19/*
20 * Simpler 68k and ColdFire parts also need a few other gcc functions.
21 */
22extern long long __divsi3(long long, long long);
23extern long long __modsi3(long long, long long);
24extern long long __mulsi3(long long, long long);
25extern long long __udivsi3(long long, long long);
26extern long long __umodsi3(long long, long long);
27
28EXPORT_SYMBOL(__divsi3);
29EXPORT_SYMBOL(__modsi3);
30EXPORT_SYMBOL(__mulsi3);
31EXPORT_SYMBOL(__udivsi3);
32EXPORT_SYMBOL(__umodsi3);
5#endif 33#endif
diff --git a/arch/m68k/kernel/m68k_ksyms_mm.c b/arch/m68k/kernel/m68k_ksyms_mm.c
deleted file mode 100644
index d900e77e5363..000000000000
--- a/arch/m68k/kernel/m68k_ksyms_mm.c
+++ /dev/null
@@ -1,16 +0,0 @@
1#include <linux/module.h>
2
3asmlinkage long long __ashldi3 (long long, int);
4asmlinkage long long __ashrdi3 (long long, int);
5asmlinkage long long __lshrdi3 (long long, int);
6asmlinkage long long __muldi3 (long long, long long);
7
8/* The following are special because they're not called
9 explicitly (the C compiler generates them). Fortunately,
10 their interface isn't gonna change any time soon now, so
11 it's OK to leave it out of version control. */
12EXPORT_SYMBOL(__ashldi3);
13EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3);
16
diff --git a/arch/m68k/kernel/m68k_ksyms_no.c b/arch/m68k/kernel/m68k_ksyms_no.c
deleted file mode 100644
index 39fe0a7aec32..000000000000
--- a/arch/m68k/kernel/m68k_ksyms_no.c
+++ /dev/null
@@ -1,78 +0,0 @@
1#include <linux/module.h>
2#include <linux/linkage.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/mm.h>
6#include <linux/user.h>
7#include <linux/elfcore.h>
8#include <linux/in6.h>
9#include <linux/interrupt.h>
10
11#include <asm/setup.h>
12#include <asm/machdep.h>
13#include <asm/pgalloc.h>
14#include <asm/irq.h>
15#include <asm/io.h>
16#include <asm/checksum.h>
17#include <asm/current.h>
18
19extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
20
21/* platform dependent support */
22
23EXPORT_SYMBOL(__ioremap);
24EXPORT_SYMBOL(iounmap);
25EXPORT_SYMBOL(dump_fpu);
26
27EXPORT_SYMBOL(ip_fast_csum);
28
29EXPORT_SYMBOL(kernel_thread);
30
31/* Networking helper routines. */
32EXPORT_SYMBOL(csum_partial_copy_nocheck);
33
34/* The following are special because they're not called
35 explicitly (the C compiler generates them). Fortunately,
36 their interface isn't gonna change any time soon now, so
37 it's OK to leave it out of version control. */
38EXPORT_SYMBOL(memcpy);
39EXPORT_SYMBOL(memset);
40
41/*
42 * libgcc functions - functions that are used internally by the
43 * compiler... (prototypes are not correct though, but that
44 * doesn't really matter since they're not versioned).
45 */
46extern void __ashldi3(void);
47extern void __ashrdi3(void);
48extern void __divsi3(void);
49extern void __lshrdi3(void);
50extern void __modsi3(void);
51extern void __muldi3(void);
52extern void __mulsi3(void);
53extern void __udivsi3(void);
54extern void __umodsi3(void);
55
56 /* gcc lib functions */
57EXPORT_SYMBOL(__ashldi3);
58EXPORT_SYMBOL(__ashrdi3);
59EXPORT_SYMBOL(__divsi3);
60EXPORT_SYMBOL(__lshrdi3);
61EXPORT_SYMBOL(__modsi3);
62EXPORT_SYMBOL(__muldi3);
63EXPORT_SYMBOL(__mulsi3);
64EXPORT_SYMBOL(__udivsi3);
65EXPORT_SYMBOL(__umodsi3);
66
67#ifdef CONFIG_COLDFIRE
68extern unsigned int *dma_device_address;
69extern unsigned long dma_base_addr, _ramend;
70EXPORT_SYMBOL(dma_base_addr);
71EXPORT_SYMBOL(dma_device_address);
72EXPORT_SYMBOL(_ramend);
73
74extern asmlinkage void trap(void);
75extern void *_ramvec;
76EXPORT_SYMBOL(trap);
77EXPORT_SYMBOL(_ramvec);
78#endif /* CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c
index e2a63af5d517..9b86ad11c68e 100644
--- a/arch/m68k/kernel/process_no.c
+++ b/arch/m68k/kernel/process_no.c
@@ -151,6 +151,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
151 set_fs(fs); 151 set_fs(fs);
152 return retval; 152 return retval;
153} 153}
154EXPORT_SYMBOL(kernel_thread);
154 155
155void flush_thread(void) 156void flush_thread(void)
156{ 157{
@@ -283,6 +284,7 @@ int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu)
283#endif 284#endif
284 return 1; 285 return 1;
285} 286}
287EXPORT_SYMBOL(dump_fpu);
286 288
287/* 289/*
288 * Generic dumping code. Used for panic and debug. 290 * Generic dumping code. Used for panic and debug.
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 63013df33584..8623f8dc16f8 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -1,5 +1,580 @@
1/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/capability.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/smp.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/file.h>
22#include <linux/ipc.h>
23
24#include <asm/setup.h>
25#include <asm/uaccess.h>
26#include <asm/cachectl.h>
27#include <asm/traps.h>
28#include <asm/page.h>
29#include <asm/unistd.h>
30#include <asm/cacheflush.h>
31
1#ifdef CONFIG_MMU 32#ifdef CONFIG_MMU
2#include "sys_m68k_mm.c" 33
34#include <asm/tlb.h>
35
36asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
37 unsigned long error_code);
38
39asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
40 unsigned long prot, unsigned long flags,
41 unsigned long fd, unsigned long pgoff)
42{
43 /*
44 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
45 * so we need to shift the argument down by 1; m68k mmap64(3)
46 * (in libc) expects the last argument of mmap2 in 4Kb units.
47 */
48 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
49}
50
51/* Convert virtual (user) address VADDR to physical address PADDR */
52#define virt_to_phys_040(vaddr) \
53({ \
54 unsigned long _mmusr, _paddr; \
55 \
56 __asm__ __volatile__ (".chip 68040\n\t" \
57 "ptestr (%1)\n\t" \
58 "movec %%mmusr,%0\n\t" \
59 ".chip 68k" \
60 : "=r" (_mmusr) \
61 : "a" (vaddr)); \
62 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
63 _paddr; \
64})
65
66static inline int
67cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
68{
69 unsigned long paddr, i;
70
71 switch (scope)
72 {
73 case FLUSH_SCOPE_ALL:
74 switch (cache)
75 {
76 case FLUSH_CACHE_DATA:
77 /* This nop is needed for some broken versions of the 68040. */
78 __asm__ __volatile__ ("nop\n\t"
79 ".chip 68040\n\t"
80 "cpusha %dc\n\t"
81 ".chip 68k");
82 break;
83 case FLUSH_CACHE_INSN:
84 __asm__ __volatile__ ("nop\n\t"
85 ".chip 68040\n\t"
86 "cpusha %ic\n\t"
87 ".chip 68k");
88 break;
89 default:
90 case FLUSH_CACHE_BOTH:
91 __asm__ __volatile__ ("nop\n\t"
92 ".chip 68040\n\t"
93 "cpusha %bc\n\t"
94 ".chip 68k");
95 break;
96 }
97 break;
98
99 case FLUSH_SCOPE_LINE:
100 /* Find the physical address of the first mapped page in the
101 address range. */
102 if ((paddr = virt_to_phys_040(addr))) {
103 paddr += addr & ~(PAGE_MASK | 15);
104 len = (len + (addr & 15) + 15) >> 4;
105 } else {
106 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
107
108 if (len <= tmp)
109 return 0;
110 addr += tmp;
111 len -= tmp;
112 tmp = PAGE_SIZE;
113 for (;;)
114 {
115 if ((paddr = virt_to_phys_040(addr)))
116 break;
117 if (len <= tmp)
118 return 0;
119 addr += tmp;
120 len -= tmp;
121 }
122 len = (len + 15) >> 4;
123 }
124 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
125 while (len--)
126 {
127 switch (cache)
128 {
129 case FLUSH_CACHE_DATA:
130 __asm__ __volatile__ ("nop\n\t"
131 ".chip 68040\n\t"
132 "cpushl %%dc,(%0)\n\t"
133 ".chip 68k"
134 : : "a" (paddr));
135 break;
136 case FLUSH_CACHE_INSN:
137 __asm__ __volatile__ ("nop\n\t"
138 ".chip 68040\n\t"
139 "cpushl %%ic,(%0)\n\t"
140 ".chip 68k"
141 : : "a" (paddr));
142 break;
143 default:
144 case FLUSH_CACHE_BOTH:
145 __asm__ __volatile__ ("nop\n\t"
146 ".chip 68040\n\t"
147 "cpushl %%bc,(%0)\n\t"
148 ".chip 68k"
149 : : "a" (paddr));
150 break;
151 }
152 if (!--i && len)
153 {
154 /*
155 * No need to page align here since it is done by
156 * virt_to_phys_040().
157 */
158 addr += PAGE_SIZE;
159 i = PAGE_SIZE / 16;
160 /* Recompute physical address when crossing a page
161 boundary. */
162 for (;;)
163 {
164 if ((paddr = virt_to_phys_040(addr)))
165 break;
166 if (len <= i)
167 return 0;
168 len -= i;
169 addr += PAGE_SIZE;
170 }
171 }
172 else
173 paddr += 16;
174 }
175 break;
176
177 default:
178 case FLUSH_SCOPE_PAGE:
179 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
180 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
181 {
182 if (!(paddr = virt_to_phys_040(addr)))
183 continue;
184 switch (cache)
185 {
186 case FLUSH_CACHE_DATA:
187 __asm__ __volatile__ ("nop\n\t"
188 ".chip 68040\n\t"
189 "cpushp %%dc,(%0)\n\t"
190 ".chip 68k"
191 : : "a" (paddr));
192 break;
193 case FLUSH_CACHE_INSN:
194 __asm__ __volatile__ ("nop\n\t"
195 ".chip 68040\n\t"
196 "cpushp %%ic,(%0)\n\t"
197 ".chip 68k"
198 : : "a" (paddr));
199 break;
200 default:
201 case FLUSH_CACHE_BOTH:
202 __asm__ __volatile__ ("nop\n\t"
203 ".chip 68040\n\t"
204 "cpushp %%bc,(%0)\n\t"
205 ".chip 68k"
206 : : "a" (paddr));
207 break;
208 }
209 }
210 break;
211 }
212 return 0;
213}
214
215#define virt_to_phys_060(vaddr) \
216({ \
217 unsigned long paddr; \
218 __asm__ __volatile__ (".chip 68060\n\t" \
219 "plpar (%0)\n\t" \
220 ".chip 68k" \
221 : "=a" (paddr) \
222 : "0" (vaddr)); \
223 (paddr); /* XXX */ \
224})
225
226static inline int
227cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
228{
229 unsigned long paddr, i;
230
231 /*
232 * 68060 manual says:
233 * cpush %dc : flush DC, remains valid (with our %cacr setup)
234 * cpush %ic : invalidate IC
235 * cpush %bc : flush DC + invalidate IC
236 */
237 switch (scope)
238 {
239 case FLUSH_SCOPE_ALL:
240 switch (cache)
241 {
242 case FLUSH_CACHE_DATA:
243 __asm__ __volatile__ (".chip 68060\n\t"
244 "cpusha %dc\n\t"
245 ".chip 68k");
246 break;
247 case FLUSH_CACHE_INSN:
248 __asm__ __volatile__ (".chip 68060\n\t"
249 "cpusha %ic\n\t"
250 ".chip 68k");
251 break;
252 default:
253 case FLUSH_CACHE_BOTH:
254 __asm__ __volatile__ (".chip 68060\n\t"
255 "cpusha %bc\n\t"
256 ".chip 68k");
257 break;
258 }
259 break;
260
261 case FLUSH_SCOPE_LINE:
262 /* Find the physical address of the first mapped page in the
263 address range. */
264 len += addr & 15;
265 addr &= -16;
266 if (!(paddr = virt_to_phys_060(addr))) {
267 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
268
269 if (len <= tmp)
270 return 0;
271 addr += tmp;
272 len -= tmp;
273 tmp = PAGE_SIZE;
274 for (;;)
275 {
276 if ((paddr = virt_to_phys_060(addr)))
277 break;
278 if (len <= tmp)
279 return 0;
280 addr += tmp;
281 len -= tmp;
282 }
283 }
284 len = (len + 15) >> 4;
285 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
286 while (len--)
287 {
288 switch (cache)
289 {
290 case FLUSH_CACHE_DATA:
291 __asm__ __volatile__ (".chip 68060\n\t"
292 "cpushl %%dc,(%0)\n\t"
293 ".chip 68k"
294 : : "a" (paddr));
295 break;
296 case FLUSH_CACHE_INSN:
297 __asm__ __volatile__ (".chip 68060\n\t"
298 "cpushl %%ic,(%0)\n\t"
299 ".chip 68k"
300 : : "a" (paddr));
301 break;
302 default:
303 case FLUSH_CACHE_BOTH:
304 __asm__ __volatile__ (".chip 68060\n\t"
305 "cpushl %%bc,(%0)\n\t"
306 ".chip 68k"
307 : : "a" (paddr));
308 break;
309 }
310 if (!--i && len)
311 {
312
313 /*
314 * We just want to jump to the first cache line
315 * in the next page.
316 */
317 addr += PAGE_SIZE;
318 addr &= PAGE_MASK;
319
320 i = PAGE_SIZE / 16;
321 /* Recompute physical address when crossing a page
322 boundary. */
323 for (;;)
324 {
325 if ((paddr = virt_to_phys_060(addr)))
326 break;
327 if (len <= i)
328 return 0;
329 len -= i;
330 addr += PAGE_SIZE;
331 }
332 }
333 else
334 paddr += 16;
335 }
336 break;
337
338 default:
339 case FLUSH_SCOPE_PAGE:
340 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
341 addr &= PAGE_MASK; /* Workaround for bug in some
342 revisions of the 68060 */
343 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
344 {
345 if (!(paddr = virt_to_phys_060(addr)))
346 continue;
347 switch (cache)
348 {
349 case FLUSH_CACHE_DATA:
350 __asm__ __volatile__ (".chip 68060\n\t"
351 "cpushp %%dc,(%0)\n\t"
352 ".chip 68k"
353 : : "a" (paddr));
354 break;
355 case FLUSH_CACHE_INSN:
356 __asm__ __volatile__ (".chip 68060\n\t"
357 "cpushp %%ic,(%0)\n\t"
358 ".chip 68k"
359 : : "a" (paddr));
360 break;
361 default:
362 case FLUSH_CACHE_BOTH:
363 __asm__ __volatile__ (".chip 68060\n\t"
364 "cpushp %%bc,(%0)\n\t"
365 ".chip 68k"
366 : : "a" (paddr));
367 break;
368 }
369 }
370 break;
371 }
372 return 0;
373}
374
375/* sys_cacheflush -- flush (part of) the processor cache. */
376asmlinkage int
377sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
378{
379 struct vm_area_struct *vma;
380 int ret = -EINVAL;
381
382 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
383 cache & ~FLUSH_CACHE_BOTH)
384 goto out;
385
386 if (scope == FLUSH_SCOPE_ALL) {
387 /* Only the superuser may explicitly flush the whole cache. */
388 ret = -EPERM;
389 if (!capable(CAP_SYS_ADMIN))
390 goto out;
391 } else {
392 /*
393 * Verify that the specified address region actually belongs
394 * to this process.
395 */
396 vma = find_vma (current->mm, addr);
397 ret = -EINVAL;
398 /* Check for overflow. */
399 if (addr + len < addr)
400 goto out;
401 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
402 goto out;
403 }
404
405 if (CPU_IS_020_OR_030) {
406 if (scope == FLUSH_SCOPE_LINE && len < 256) {
407 unsigned long cacr;
408 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
409 if (cache & FLUSH_CACHE_INSN)
410 cacr |= 4;
411 if (cache & FLUSH_CACHE_DATA)
412 cacr |= 0x400;
413 len >>= 2;
414 while (len--) {
415 __asm__ __volatile__ ("movec %1, %%caar\n\t"
416 "movec %0, %%cacr"
417 : /* no outputs */
418 : "r" (cacr), "r" (addr));
419 addr += 4;
420 }
421 } else {
422 /* Flush the whole cache, even if page granularity requested. */
423 unsigned long cacr;
424 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
425 if (cache & FLUSH_CACHE_INSN)
426 cacr |= 8;
427 if (cache & FLUSH_CACHE_DATA)
428 cacr |= 0x800;
429 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
430 }
431 ret = 0;
432 goto out;
433 } else {
434 /*
435 * 040 or 060: don't blindly trust 'scope', someone could
436 * try to flush a few megs of memory.
437 */
438
439 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
440 scope=FLUSH_SCOPE_PAGE;
441 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
442 scope=FLUSH_SCOPE_ALL;
443 if (CPU_IS_040) {
444 ret = cache_flush_040 (addr, scope, cache, len);
445 } else if (CPU_IS_060) {
446 ret = cache_flush_060 (addr, scope, cache, len);
447 }
448 }
449out:
450 return ret;
451}
452
453/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
454 D1 (newval). */
455asmlinkage int
456sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
457 unsigned long __user * mem)
458{
459 /* This was borrowed from ARM's implementation. */
460 for (;;) {
461 struct mm_struct *mm = current->mm;
462 pgd_t *pgd;
463 pmd_t *pmd;
464 pte_t *pte;
465 spinlock_t *ptl;
466 unsigned long mem_value;
467
468 down_read(&mm->mmap_sem);
469 pgd = pgd_offset(mm, (unsigned long)mem);
470 if (!pgd_present(*pgd))
471 goto bad_access;
472 pmd = pmd_offset(pgd, (unsigned long)mem);
473 if (!pmd_present(*pmd))
474 goto bad_access;
475 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
476 if (!pte_present(*pte) || !pte_dirty(*pte)
477 || !pte_write(*pte)) {
478 pte_unmap_unlock(pte, ptl);
479 goto bad_access;
480 }
481
482 mem_value = *mem;
483 if (mem_value == oldval)
484 *mem = newval;
485
486 pte_unmap_unlock(pte, ptl);
487 up_read(&mm->mmap_sem);
488 return mem_value;
489
490 bad_access:
491 up_read(&mm->mmap_sem);
492 /* This is not necessarily a bad access, we can get here if
493 a memory we're trying to write to should be copied-on-write.
494 Make the kernel do the necessary page stuff, then re-iterate.
495 Simulate a write access fault to do that. */
496 {
497 /* The first argument of the function corresponds to
498 D1, which is the first field of struct pt_regs. */
499 struct pt_regs *fp = (struct pt_regs *)&newval;
500
501 /* '3' is an RMW flag. */
502 if (do_page_fault(fp, (unsigned long)mem, 3))
503 /* If the do_page_fault() failed, we don't
504 have anything meaningful to return.
505 There should be a SIGSEGV pending for
506 the process. */
507 return 0xdeadbeef;
508 }
509 }
510}
511
3#else 512#else
4#include "sys_m68k_no.c" 513
5#endif 514/* sys_cacheflush -- flush (part of) the processor cache. */
515asmlinkage int
516sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
517{
518 flush_cache_all();
519 return 0;
520}
521
522/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
523 D1 (newval). */
524asmlinkage int
525sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
526 unsigned long __user * mem)
527{
528 struct mm_struct *mm = current->mm;
529 unsigned long mem_value;
530
531 down_read(&mm->mmap_sem);
532
533 mem_value = *mem;
534 if (mem_value == oldval)
535 *mem = newval;
536
537 up_read(&mm->mmap_sem);
538 return mem_value;
539}
540
541#endif /* CONFIG_MMU */
542
543asmlinkage int sys_getpagesize(void)
544{
545 return PAGE_SIZE;
546}
547
548/*
549 * Do a system call from kernel instead of calling sys_execve so we
550 * end up with proper pt_regs.
551 */
552int kernel_execve(const char *filename,
553 const char *const argv[],
554 const char *const envp[])
555{
556 register long __res asm ("%d0") = __NR_execve;
557 register long __a asm ("%d1") = (long)(filename);
558 register long __b asm ("%d2") = (long)(argv);
559 register long __c asm ("%d3") = (long)(envp);
560 asm volatile ("trap #0" : "+d" (__res)
561 : "d" (__a), "d" (__b), "d" (__c));
562 return __res;
563}
564
565asmlinkage unsigned long sys_get_thread_area(void)
566{
567 return current_thread_info()->tp_value;
568}
569
570asmlinkage int sys_set_thread_area(unsigned long tp)
571{
572 current_thread_info()->tp_value = tp;
573 return 0;
574}
575
576asmlinkage int sys_atomic_barrier(void)
577{
578 /* no code needed for uniprocs */
579 return 0;
580}
diff --git a/arch/m68k/kernel/sys_m68k_mm.c b/arch/m68k/kernel/sys_m68k_mm.c
deleted file mode 100644
index 3db2e7f902aa..000000000000
--- a/arch/m68k/kernel/sys_m68k_mm.c
+++ /dev/null
@@ -1,546 +0,0 @@
1/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/capability.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/smp.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/file.h>
22#include <linux/ipc.h>
23
24#include <asm/setup.h>
25#include <asm/uaccess.h>
26#include <asm/cachectl.h>
27#include <asm/traps.h>
28#include <asm/page.h>
29#include <asm/unistd.h>
30#include <linux/elf.h>
31#include <asm/tlb.h>
32
33asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
34 unsigned long error_code);
35
36asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
37 unsigned long prot, unsigned long flags,
38 unsigned long fd, unsigned long pgoff)
39{
40 /*
41 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
42 * so we need to shift the argument down by 1; m68k mmap64(3)
43 * (in libc) expects the last argument of mmap2 in 4Kb units.
44 */
45 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
46}
47
48/* Convert virtual (user) address VADDR to physical address PADDR */
49#define virt_to_phys_040(vaddr) \
50({ \
51 unsigned long _mmusr, _paddr; \
52 \
53 __asm__ __volatile__ (".chip 68040\n\t" \
54 "ptestr (%1)\n\t" \
55 "movec %%mmusr,%0\n\t" \
56 ".chip 68k" \
57 : "=r" (_mmusr) \
58 : "a" (vaddr)); \
59 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
60 _paddr; \
61})
62
63static inline int
64cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
65{
66 unsigned long paddr, i;
67
68 switch (scope)
69 {
70 case FLUSH_SCOPE_ALL:
71 switch (cache)
72 {
73 case FLUSH_CACHE_DATA:
74 /* This nop is needed for some broken versions of the 68040. */
75 __asm__ __volatile__ ("nop\n\t"
76 ".chip 68040\n\t"
77 "cpusha %dc\n\t"
78 ".chip 68k");
79 break;
80 case FLUSH_CACHE_INSN:
81 __asm__ __volatile__ ("nop\n\t"
82 ".chip 68040\n\t"
83 "cpusha %ic\n\t"
84 ".chip 68k");
85 break;
86 default:
87 case FLUSH_CACHE_BOTH:
88 __asm__ __volatile__ ("nop\n\t"
89 ".chip 68040\n\t"
90 "cpusha %bc\n\t"
91 ".chip 68k");
92 break;
93 }
94 break;
95
96 case FLUSH_SCOPE_LINE:
97 /* Find the physical address of the first mapped page in the
98 address range. */
99 if ((paddr = virt_to_phys_040(addr))) {
100 paddr += addr & ~(PAGE_MASK | 15);
101 len = (len + (addr & 15) + 15) >> 4;
102 } else {
103 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
104
105 if (len <= tmp)
106 return 0;
107 addr += tmp;
108 len -= tmp;
109 tmp = PAGE_SIZE;
110 for (;;)
111 {
112 if ((paddr = virt_to_phys_040(addr)))
113 break;
114 if (len <= tmp)
115 return 0;
116 addr += tmp;
117 len -= tmp;
118 }
119 len = (len + 15) >> 4;
120 }
121 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
122 while (len--)
123 {
124 switch (cache)
125 {
126 case FLUSH_CACHE_DATA:
127 __asm__ __volatile__ ("nop\n\t"
128 ".chip 68040\n\t"
129 "cpushl %%dc,(%0)\n\t"
130 ".chip 68k"
131 : : "a" (paddr));
132 break;
133 case FLUSH_CACHE_INSN:
134 __asm__ __volatile__ ("nop\n\t"
135 ".chip 68040\n\t"
136 "cpushl %%ic,(%0)\n\t"
137 ".chip 68k"
138 : : "a" (paddr));
139 break;
140 default:
141 case FLUSH_CACHE_BOTH:
142 __asm__ __volatile__ ("nop\n\t"
143 ".chip 68040\n\t"
144 "cpushl %%bc,(%0)\n\t"
145 ".chip 68k"
146 : : "a" (paddr));
147 break;
148 }
149 if (!--i && len)
150 {
151 /*
152 * No need to page align here since it is done by
153 * virt_to_phys_040().
154 */
155 addr += PAGE_SIZE;
156 i = PAGE_SIZE / 16;
157 /* Recompute physical address when crossing a page
158 boundary. */
159 for (;;)
160 {
161 if ((paddr = virt_to_phys_040(addr)))
162 break;
163 if (len <= i)
164 return 0;
165 len -= i;
166 addr += PAGE_SIZE;
167 }
168 }
169 else
170 paddr += 16;
171 }
172 break;
173
174 default:
175 case FLUSH_SCOPE_PAGE:
176 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
177 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
178 {
179 if (!(paddr = virt_to_phys_040(addr)))
180 continue;
181 switch (cache)
182 {
183 case FLUSH_CACHE_DATA:
184 __asm__ __volatile__ ("nop\n\t"
185 ".chip 68040\n\t"
186 "cpushp %%dc,(%0)\n\t"
187 ".chip 68k"
188 : : "a" (paddr));
189 break;
190 case FLUSH_CACHE_INSN:
191 __asm__ __volatile__ ("nop\n\t"
192 ".chip 68040\n\t"
193 "cpushp %%ic,(%0)\n\t"
194 ".chip 68k"
195 : : "a" (paddr));
196 break;
197 default:
198 case FLUSH_CACHE_BOTH:
199 __asm__ __volatile__ ("nop\n\t"
200 ".chip 68040\n\t"
201 "cpushp %%bc,(%0)\n\t"
202 ".chip 68k"
203 : : "a" (paddr));
204 break;
205 }
206 }
207 break;
208 }
209 return 0;
210}
211
212#define virt_to_phys_060(vaddr) \
213({ \
214 unsigned long paddr; \
215 __asm__ __volatile__ (".chip 68060\n\t" \
216 "plpar (%0)\n\t" \
217 ".chip 68k" \
218 : "=a" (paddr) \
219 : "0" (vaddr)); \
220 (paddr); /* XXX */ \
221})
222
223static inline int
224cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
225{
226 unsigned long paddr, i;
227
228 /*
229 * 68060 manual says:
230 * cpush %dc : flush DC, remains valid (with our %cacr setup)
231 * cpush %ic : invalidate IC
232 * cpush %bc : flush DC + invalidate IC
233 */
234 switch (scope)
235 {
236 case FLUSH_SCOPE_ALL:
237 switch (cache)
238 {
239 case FLUSH_CACHE_DATA:
240 __asm__ __volatile__ (".chip 68060\n\t"
241 "cpusha %dc\n\t"
242 ".chip 68k");
243 break;
244 case FLUSH_CACHE_INSN:
245 __asm__ __volatile__ (".chip 68060\n\t"
246 "cpusha %ic\n\t"
247 ".chip 68k");
248 break;
249 default:
250 case FLUSH_CACHE_BOTH:
251 __asm__ __volatile__ (".chip 68060\n\t"
252 "cpusha %bc\n\t"
253 ".chip 68k");
254 break;
255 }
256 break;
257
258 case FLUSH_SCOPE_LINE:
259 /* Find the physical address of the first mapped page in the
260 address range. */
261 len += addr & 15;
262 addr &= -16;
263 if (!(paddr = virt_to_phys_060(addr))) {
264 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
265
266 if (len <= tmp)
267 return 0;
268 addr += tmp;
269 len -= tmp;
270 tmp = PAGE_SIZE;
271 for (;;)
272 {
273 if ((paddr = virt_to_phys_060(addr)))
274 break;
275 if (len <= tmp)
276 return 0;
277 addr += tmp;
278 len -= tmp;
279 }
280 }
281 len = (len + 15) >> 4;
282 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
283 while (len--)
284 {
285 switch (cache)
286 {
287 case FLUSH_CACHE_DATA:
288 __asm__ __volatile__ (".chip 68060\n\t"
289 "cpushl %%dc,(%0)\n\t"
290 ".chip 68k"
291 : : "a" (paddr));
292 break;
293 case FLUSH_CACHE_INSN:
294 __asm__ __volatile__ (".chip 68060\n\t"
295 "cpushl %%ic,(%0)\n\t"
296 ".chip 68k"
297 : : "a" (paddr));
298 break;
299 default:
300 case FLUSH_CACHE_BOTH:
301 __asm__ __volatile__ (".chip 68060\n\t"
302 "cpushl %%bc,(%0)\n\t"
303 ".chip 68k"
304 : : "a" (paddr));
305 break;
306 }
307 if (!--i && len)
308 {
309
310 /*
311 * We just want to jump to the first cache line
312 * in the next page.
313 */
314 addr += PAGE_SIZE;
315 addr &= PAGE_MASK;
316
317 i = PAGE_SIZE / 16;
318 /* Recompute physical address when crossing a page
319 boundary. */
320 for (;;)
321 {
322 if ((paddr = virt_to_phys_060(addr)))
323 break;
324 if (len <= i)
325 return 0;
326 len -= i;
327 addr += PAGE_SIZE;
328 }
329 }
330 else
331 paddr += 16;
332 }
333 break;
334
335 default:
336 case FLUSH_SCOPE_PAGE:
337 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
338 addr &= PAGE_MASK; /* Workaround for bug in some
339 revisions of the 68060 */
340 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
341 {
342 if (!(paddr = virt_to_phys_060(addr)))
343 continue;
344 switch (cache)
345 {
346 case FLUSH_CACHE_DATA:
347 __asm__ __volatile__ (".chip 68060\n\t"
348 "cpushp %%dc,(%0)\n\t"
349 ".chip 68k"
350 : : "a" (paddr));
351 break;
352 case FLUSH_CACHE_INSN:
353 __asm__ __volatile__ (".chip 68060\n\t"
354 "cpushp %%ic,(%0)\n\t"
355 ".chip 68k"
356 : : "a" (paddr));
357 break;
358 default:
359 case FLUSH_CACHE_BOTH:
360 __asm__ __volatile__ (".chip 68060\n\t"
361 "cpushp %%bc,(%0)\n\t"
362 ".chip 68k"
363 : : "a" (paddr));
364 break;
365 }
366 }
367 break;
368 }
369 return 0;
370}
371
372/* sys_cacheflush -- flush (part of) the processor cache. */
373asmlinkage int
374sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
375{
376 struct vm_area_struct *vma;
377 int ret = -EINVAL;
378
379 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
380 cache & ~FLUSH_CACHE_BOTH)
381 goto out;
382
383 if (scope == FLUSH_SCOPE_ALL) {
384 /* Only the superuser may explicitly flush the whole cache. */
385 ret = -EPERM;
386 if (!capable(CAP_SYS_ADMIN))
387 goto out;
388 } else {
389 /*
390 * Verify that the specified address region actually belongs
391 * to this process.
392 */
393 vma = find_vma (current->mm, addr);
394 ret = -EINVAL;
395 /* Check for overflow. */
396 if (addr + len < addr)
397 goto out;
398 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
399 goto out;
400 }
401
402 if (CPU_IS_020_OR_030) {
403 if (scope == FLUSH_SCOPE_LINE && len < 256) {
404 unsigned long cacr;
405 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
406 if (cache & FLUSH_CACHE_INSN)
407 cacr |= 4;
408 if (cache & FLUSH_CACHE_DATA)
409 cacr |= 0x400;
410 len >>= 2;
411 while (len--) {
412 __asm__ __volatile__ ("movec %1, %%caar\n\t"
413 "movec %0, %%cacr"
414 : /* no outputs */
415 : "r" (cacr), "r" (addr));
416 addr += 4;
417 }
418 } else {
419 /* Flush the whole cache, even if page granularity requested. */
420 unsigned long cacr;
421 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
422 if (cache & FLUSH_CACHE_INSN)
423 cacr |= 8;
424 if (cache & FLUSH_CACHE_DATA)
425 cacr |= 0x800;
426 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
427 }
428 ret = 0;
429 goto out;
430 } else {
431 /*
432 * 040 or 060: don't blindly trust 'scope', someone could
433 * try to flush a few megs of memory.
434 */
435
436 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
437 scope=FLUSH_SCOPE_PAGE;
438 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
439 scope=FLUSH_SCOPE_ALL;
440 if (CPU_IS_040) {
441 ret = cache_flush_040 (addr, scope, cache, len);
442 } else if (CPU_IS_060) {
443 ret = cache_flush_060 (addr, scope, cache, len);
444 }
445 }
446out:
447 return ret;
448}
449
450asmlinkage int sys_getpagesize(void)
451{
452 return PAGE_SIZE;
453}
454
455/*
456 * Do a system call from kernel instead of calling sys_execve so we
457 * end up with proper pt_regs.
458 */
459int kernel_execve(const char *filename,
460 const char *const argv[],
461 const char *const envp[])
462{
463 register long __res asm ("%d0") = __NR_execve;
464 register long __a asm ("%d1") = (long)(filename);
465 register long __b asm ("%d2") = (long)(argv);
466 register long __c asm ("%d3") = (long)(envp);
467 asm volatile ("trap #0" : "+d" (__res)
468 : "d" (__a), "d" (__b), "d" (__c));
469 return __res;
470}
471
472asmlinkage unsigned long sys_get_thread_area(void)
473{
474 return current_thread_info()->tp_value;
475}
476
477asmlinkage int sys_set_thread_area(unsigned long tp)
478{
479 current_thread_info()->tp_value = tp;
480 return 0;
481}
482
483/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
484 D1 (newval). */
485asmlinkage int
486sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
487 unsigned long __user * mem)
488{
489 /* This was borrowed from ARM's implementation. */
490 for (;;) {
491 struct mm_struct *mm = current->mm;
492 pgd_t *pgd;
493 pmd_t *pmd;
494 pte_t *pte;
495 spinlock_t *ptl;
496 unsigned long mem_value;
497
498 down_read(&mm->mmap_sem);
499 pgd = pgd_offset(mm, (unsigned long)mem);
500 if (!pgd_present(*pgd))
501 goto bad_access;
502 pmd = pmd_offset(pgd, (unsigned long)mem);
503 if (!pmd_present(*pmd))
504 goto bad_access;
505 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
506 if (!pte_present(*pte) || !pte_dirty(*pte)
507 || !pte_write(*pte)) {
508 pte_unmap_unlock(pte, ptl);
509 goto bad_access;
510 }
511
512 mem_value = *mem;
513 if (mem_value == oldval)
514 *mem = newval;
515
516 pte_unmap_unlock(pte, ptl);
517 up_read(&mm->mmap_sem);
518 return mem_value;
519
520 bad_access:
521 up_read(&mm->mmap_sem);
522 /* This is not necessarily a bad access, we can get here if
523 a memory we're trying to write to should be copied-on-write.
524 Make the kernel do the necessary page stuff, then re-iterate.
525 Simulate a write access fault to do that. */
526 {
527 /* The first argument of the function corresponds to
528 D1, which is the first field of struct pt_regs. */
529 struct pt_regs *fp = (struct pt_regs *)&newval;
530
531 /* '3' is an RMW flag. */
532 if (do_page_fault(fp, (unsigned long)mem, 3))
533 /* If the do_page_fault() failed, we don't
534 have anything meaningful to return.
535 There should be a SIGSEGV pending for
536 the process. */
537 return 0xdeadbeef;
538 }
539 }
540}
541
542asmlinkage int sys_atomic_barrier(void)
543{
544 /* no code needed for uniprocs */
545 return 0;
546}
diff --git a/arch/m68k/kernel/sys_m68k_no.c b/arch/m68k/kernel/sys_m68k_no.c
deleted file mode 100644
index 68488ae47f0a..000000000000
--- a/arch/m68k/kernel/sys_m68k_no.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * linux/arch/m68knommu/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/sem.h>
14#include <linux/msg.h>
15#include <linux/shm.h>
16#include <linux/stat.h>
17#include <linux/syscalls.h>
18#include <linux/mman.h>
19#include <linux/file.h>
20#include <linux/ipc.h>
21#include <linux/fs.h>
22
23#include <asm/setup.h>
24#include <asm/uaccess.h>
25#include <asm/cachectl.h>
26#include <asm/traps.h>
27#include <asm/cacheflush.h>
28#include <asm/unistd.h>
29
30/* sys_cacheflush -- flush (part of) the processor cache. */
31asmlinkage int
32sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
33{
34 flush_cache_all();
35 return(0);
36}
37
38asmlinkage int sys_getpagesize(void)
39{
40 return PAGE_SIZE;
41}
42
43/*
44 * Do a system call from kernel instead of calling sys_execve so we
45 * end up with proper pt_regs.
46 */
47int kernel_execve(const char *filename,
48 const char *const argv[],
49 const char *const envp[])
50{
51 register long __res asm ("%d0") = __NR_execve;
52 register long __a asm ("%d1") = (long)(filename);
53 register long __b asm ("%d2") = (long)(argv);
54 register long __c asm ("%d3") = (long)(envp);
55 asm volatile ("trap #0" : "+d" (__res)
56 : "d" (__a), "d" (__b), "d" (__c));
57 return __res;
58}
59
60asmlinkage unsigned long sys_get_thread_area(void)
61{
62 return current_thread_info()->tp_value;
63}
64
65asmlinkage int sys_set_thread_area(unsigned long tp)
66{
67 current_thread_info()->tp_value = tp;
68 return 0;
69}
70
71/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
72 D1 (newval). */
73asmlinkage int
74sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
75 unsigned long __user * mem)
76{
77 struct mm_struct *mm = current->mm;
78 unsigned long mem_value;
79
80 down_read(&mm->mmap_sem);
81
82 mem_value = *mem;
83 if (mem_value == oldval)
84 *mem = newval;
85
86 up_read(&mm->mmap_sem);
87 return mem_value;
88}
89
90asmlinkage int sys_atomic_barrier(void)
91{
92 /* no code needed for uniprocs */
93 return 0;
94}
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 5909e392cb1e..6f7b09122a00 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -11,7 +11,6 @@
11 * Linux/m68k support by Hamish Macdonald 11 * Linux/m68k support by Hamish Macdonald
12 */ 12 */
13 13
14#include <linux/sys.h>
15#include <linux/linkage.h> 14#include <linux/linkage.h>
16 15
17#ifndef CONFIG_MMU 16#ifndef CONFIG_MMU
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 1f95881d8437..df421e501436 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -1,5 +1,14 @@
1
2#
3# Makefile for m68k-specific library files..
4#
5
6lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
7 memcpy.o memset.o memmove.o
8
1ifdef CONFIG_MMU 9ifdef CONFIG_MMU
2include arch/m68k/lib/Makefile_mm 10lib-y += string.o uaccess.o checksum_mm.o
3else 11else
4include arch/m68k/lib/Makefile_no 12lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o delay.o checksum_no.o
5endif 13endif
14
diff --git a/arch/m68k/lib/Makefile_mm b/arch/m68k/lib/Makefile_mm
deleted file mode 100644
index af9abf8d9d98..000000000000
--- a/arch/m68k/lib/Makefile_mm
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for m68k-specific library files..
3#
4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
6 checksum.o string.o uaccess.o
diff --git a/arch/m68k/lib/Makefile_no b/arch/m68k/lib/Makefile_no
deleted file mode 100644
index 32d852e586d7..000000000000
--- a/arch/m68k/lib/Makefile_no
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for m68knommu specific library files..
3#
4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
7 checksum.o memcpy.o memmove.o memset.o delay.o
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
deleted file mode 100644
index 1297536060de..000000000000
--- a/arch/m68k/lib/checksum.c
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_MMU
2#include "checksum_mm.c"
3#else
4#include "checksum_no.c"
5#endif
diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c
index eccf25d3d73e..e4c6354da765 100644
--- a/arch/m68k/lib/checksum_no.c
+++ b/arch/m68k/lib/checksum_no.c
@@ -101,6 +101,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
101{ 101{
102 return (__force __sum16)~do_csum(iph,ihl*4); 102 return (__force __sum16)~do_csum(iph,ihl*4);
103} 103}
104EXPORT_SYMBOL(ip_fast_csum);
104#endif 105#endif
105 106
106/* 107/*
@@ -140,6 +141,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
140 memcpy(dst, (__force const void *)src, len); 141 memcpy(dst, (__force const void *)src, len);
141 return csum_partial(dst, len, sum); 142 return csum_partial(dst, len, sum);
142} 143}
144EXPORT_SYMBOL(csum_partial_copy_from_user);
143 145
144/* 146/*
145 * copy from ds while checksumming, otherwise like csum_partial 147 * copy from ds while checksumming, otherwise like csum_partial
@@ -151,3 +153,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
151 memcpy(dst, src, len); 153 memcpy(dst, src, len);
152 return csum_partial(dst, len, sum); 154 return csum_partial(dst, len, sum);
153} 155}
156EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index b50dbcad4746..62182c81e91c 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -1,62 +1,80 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
1 6
2#include <linux/types.h> 7#include <linux/module.h>
8#include <linux/string.h>
3 9
4void * memcpy(void * to, const void * from, size_t n) 10void *memcpy(void *to, const void *from, size_t n)
5{ 11{
6#ifdef CONFIG_COLDFIRE 12 void *xto = to;
7 void *xto = to; 13 size_t temp, temp1;
8 size_t temp;
9 14
10 if (!n) 15 if (!n)
11 return xto; 16 return xto;
12 if ((long) to & 1) 17 if ((long)to & 1) {
13 { 18 char *cto = to;
14 char *cto = to; 19 const char *cfrom = from;
15 const char *cfrom = from; 20 *cto++ = *cfrom++;
16 *cto++ = *cfrom++; 21 to = cto;
17 to = cto; 22 from = cfrom;
18 from = cfrom; 23 n--;
19 n--; 24 }
20 } 25 if (n > 2 && (long)to & 2) {
21 if (n > 2 && (long) to & 2) 26 short *sto = to;
22 { 27 const short *sfrom = from;
23 short *sto = to; 28 *sto++ = *sfrom++;
24 const short *sfrom = from; 29 to = sto;
25 *sto++ = *sfrom++; 30 from = sfrom;
26 to = sto; 31 n -= 2;
27 from = sfrom; 32 }
28 n -= 2; 33 temp = n >> 2;
29 } 34 if (temp) {
30 temp = n >> 2; 35 long *lto = to;
31 if (temp) 36 const long *lfrom = from;
32 { 37#if defined(__mc68020__) || defined(__mc68030__) || \
33 long *lto = to; 38 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
34 const long *lfrom = from; 39 asm volatile (
35 for (; temp; temp--) 40 " movel %2,%3\n"
36 *lto++ = *lfrom++; 41 " andw #7,%3\n"
37 to = lto; 42 " lsrl #3,%2\n"
38 from = lfrom; 43 " negw %3\n"
39 } 44 " jmp %%pc@(1f,%3:w:2)\n"
40 if (n & 2) 45 "4: movel %0@+,%1@+\n"
41 { 46 " movel %0@+,%1@+\n"
42 short *sto = to; 47 " movel %0@+,%1@+\n"
43 const short *sfrom = from; 48 " movel %0@+,%1@+\n"
44 *sto++ = *sfrom++; 49 " movel %0@+,%1@+\n"
45 to = sto; 50 " movel %0@+,%1@+\n"
46 from = sfrom; 51 " movel %0@+,%1@+\n"
47 } 52 " movel %0@+,%1@+\n"
48 if (n & 1) 53 "1: dbra %2,4b\n"
49 { 54 " clrw %2\n"
50 char *cto = to; 55 " subql #1,%2\n"
51 const char *cfrom = from; 56 " jpl 4b"
52 *cto = *cfrom; 57 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
53 } 58 : "0" (lfrom), "1" (lto), "2" (temp));
54 return xto;
55#else 59#else
56 const char *c_from = from; 60 for (; temp; temp--)
57 char *c_to = to; 61 *lto++ = *lfrom++;
58 while (n-- > 0)
59 *c_to++ = *c_from++;
60 return((void *) to);
61#endif 62#endif
63 to = lto;
64 from = lfrom;
65 }
66 if (n & 2) {
67 short *sto = to;
68 const short *sfrom = from;
69 *sto++ = *sfrom++;
70 to = sto;
71 from = sfrom;
72 }
73 if (n & 1) {
74 char *cto = to;
75 const char *cfrom = from;
76 *cto = *cfrom;
77 }
78 return xto;
62} 79}
80EXPORT_SYMBOL(memcpy);
diff --git a/arch/m68k/lib/memmove.c b/arch/m68k/lib/memmove.c
index b3dcfe9dab7e..6519f7f349f6 100644
--- a/arch/m68k/lib/memmove.c
+++ b/arch/m68k/lib/memmove.c
@@ -4,8 +4,6 @@
4 * for more details. 4 * for more details.
5 */ 5 */
6 6
7#define __IN_STRING_C
8
9#include <linux/module.h> 7#include <linux/module.h>
10#include <linux/string.h> 8#include <linux/string.h>
11 9
diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c
index 1389bf455633..f649e6a2e644 100644
--- a/arch/m68k/lib/memset.c
+++ b/arch/m68k/lib/memset.c
@@ -1,47 +1,75 @@
1#include <linux/types.h> 1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
2 6
3void * memset(void * s, int c, size_t count) 7#include <linux/module.h>
8#include <linux/string.h>
9
10void *memset(void *s, int c, size_t count)
4{ 11{
5 void *xs = s; 12 void *xs = s;
6 size_t temp; 13 size_t temp;
7 14
8 if (!count) 15 if (!count)
9 return xs; 16 return xs;
10 c &= 0xff; 17 c &= 0xff;
11 c |= c << 8; 18 c |= c << 8;
12 c |= c << 16; 19 c |= c << 16;
13 if ((long) s & 1) 20 if ((long)s & 1) {
14 { 21 char *cs = s;
15 char *cs = s; 22 *cs++ = c;
16 *cs++ = c; 23 s = cs;
17 s = cs; 24 count--;
18 count--; 25 }
19 } 26 if (count > 2 && (long)s & 2) {
20 if (count > 2 && (long) s & 2) 27 short *ss = s;
21 { 28 *ss++ = c;
22 short *ss = s; 29 s = ss;
23 *ss++ = c; 30 count -= 2;
24 s = ss; 31 }
25 count -= 2; 32 temp = count >> 2;
26 } 33 if (temp) {
27 temp = count >> 2; 34 long *ls = s;
28 if (temp) 35#if defined(__mc68020__) || defined(__mc68030__) || \
29 { 36 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
30 long *ls = s; 37 size_t temp1;
31 for (; temp; temp--) 38 asm volatile (
32 *ls++ = c; 39 " movel %1,%2\n"
33 s = ls; 40 " andw #7,%2\n"
34 } 41 " lsrl #3,%1\n"
35 if (count & 2) 42 " negw %2\n"
36 { 43 " jmp %%pc@(2f,%2:w:2)\n"
37 short *ss = s; 44 "1: movel %3,%0@+\n"
38 *ss++ = c; 45 " movel %3,%0@+\n"
39 s = ss; 46 " movel %3,%0@+\n"
40 } 47 " movel %3,%0@+\n"
41 if (count & 1) 48 " movel %3,%0@+\n"
42 { 49 " movel %3,%0@+\n"
43 char *cs = s; 50 " movel %3,%0@+\n"
44 *cs = c; 51 " movel %3,%0@+\n"
45 } 52 "2: dbra %1,1b\n"
46 return xs; 53 " clrw %1\n"
54 " subql #1,%1\n"
55 " jpl 1b"
56 : "=a" (ls), "=d" (temp), "=&d" (temp1)
57 : "d" (c), "0" (ls), "1" (temp));
58#else
59 for (; temp; temp--)
60 *ls++ = c;
61#endif
62 s = ls;
63 }
64 if (count & 2) {
65 short *ss = s;
66 *ss++ = c;
67 s = ss;
68 }
69 if (count & 1) {
70 char *cs = s;
71 *cs = c;
72 }
73 return xs;
47} 74}
75EXPORT_SYMBOL(memset);
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 16e0eb338ee0..079bafca073e 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -1,5 +1,98 @@
1#ifdef CONFIG_MMU 1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2#include "muldi3_mm.c" 2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#if defined(__mc68020__) || defined(__mc68030__) || \
23 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
24
25#define umul_ppmm(w1, w0, u, v) \
26 __asm__ ("mulu%.l %3,%1:%0" \
27 : "=d" ((USItype)(w0)), \
28 "=d" ((USItype)(w1)) \
29 : "%0" ((USItype)(u)), \
30 "dmi" ((USItype)(v)))
31
3#else 32#else
4#include "muldi3_no.c" 33
34#define SI_TYPE_SIZE 32
35#define __BITS4 (SI_TYPE_SIZE / 4)
36#define __ll_B (1L << (SI_TYPE_SIZE / 2))
37#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
38#define __ll_highpart(t) ((USItype) (t) / __ll_B)
39
40#define umul_ppmm(w1, w0, u, v) \
41 do { \
42 USItype __x0, __x1, __x2, __x3; \
43 USItype __ul, __vl, __uh, __vh; \
44 \
45 __ul = __ll_lowpart (u); \
46 __uh = __ll_highpart (u); \
47 __vl = __ll_lowpart (v); \
48 __vh = __ll_highpart (v); \
49 \
50 __x0 = (USItype) __ul * __vl; \
51 __x1 = (USItype) __ul * __vh; \
52 __x2 = (USItype) __uh * __vl; \
53 __x3 = (USItype) __uh * __vh; \
54 \
55 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
56 __x1 += __x2; /* but this indeed can */ \
57 if (__x1 < __x2) /* did we get it? */ \
58 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
59 \
60 (w1) = __x3 + __ll_highpart (__x1); \
61 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
62 } while (0)
63
5#endif 64#endif
65
66#define __umulsidi3(u, v) \
67 ({DIunion __w; \
68 umul_ppmm (__w.s.high, __w.s.low, u, v); \
69 __w.ll; })
70
71typedef int SItype __attribute__ ((mode (SI)));
72typedef unsigned int USItype __attribute__ ((mode (SI)));
73typedef int DItype __attribute__ ((mode (DI)));
74typedef int word_type __attribute__ ((mode (__word__)));
75
76struct DIstruct {SItype high, low;};
77
78typedef union
79{
80 struct DIstruct s;
81 DItype ll;
82} DIunion;
83
84DItype
85__muldi3 (DItype u, DItype v)
86{
87 DIunion w;
88 DIunion uu, vv;
89
90 uu.ll = u,
91 vv.ll = v;
92
93 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
94 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
95 + (USItype) uu.s.high * (USItype) vv.s.low);
96
97 return w.ll;
98}
diff --git a/arch/m68k/lib/muldi3_mm.c b/arch/m68k/lib/muldi3_mm.c
deleted file mode 100644
index be4f275649e3..000000000000
--- a/arch/m68k/lib/muldi3_mm.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#define BITS_PER_UNIT 8
23
24#define umul_ppmm(w1, w0, u, v) \
25 __asm__ ("mulu%.l %3,%1:%0" \
26 : "=d" ((USItype)(w0)), \
27 "=d" ((USItype)(w1)) \
28 : "%0" ((USItype)(u)), \
29 "dmi" ((USItype)(v)))
30
31#define __umulsidi3(u, v) \
32 ({DIunion __w; \
33 umul_ppmm (__w.s.high, __w.s.low, u, v); \
34 __w.ll; })
35
36typedef int SItype __attribute__ ((mode (SI)));
37typedef unsigned int USItype __attribute__ ((mode (SI)));
38typedef int DItype __attribute__ ((mode (DI)));
39typedef int word_type __attribute__ ((mode (__word__)));
40
41struct DIstruct {SItype high, low;};
42
43typedef union
44{
45 struct DIstruct s;
46 DItype ll;
47} DIunion;
48
49DItype
50__muldi3 (DItype u, DItype v)
51{
52 DIunion w;
53 DIunion uu, vv;
54
55 uu.ll = u,
56 vv.ll = v;
57
58 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
59 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
60 + (USItype) uu.s.high * (USItype) vv.s.low);
61
62 return w.ll;
63}
diff --git a/arch/m68k/lib/muldi3_no.c b/arch/m68k/lib/muldi3_no.c
deleted file mode 100644
index 34af72c30303..000000000000
--- a/arch/m68k/lib/muldi3_no.c
+++ /dev/null
@@ -1,86 +0,0 @@
1/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
2 gcc-2.7.2.3/longlong.h which is: */
3/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22#define BITS_PER_UNIT 8
23#define SI_TYPE_SIZE 32
24
25#define __BITS4 (SI_TYPE_SIZE / 4)
26#define __ll_B (1L << (SI_TYPE_SIZE / 2))
27#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
28#define __ll_highpart(t) ((USItype) (t) / __ll_B)
29
30#define umul_ppmm(w1, w0, u, v) \
31 do { \
32 USItype __x0, __x1, __x2, __x3; \
33 USItype __ul, __vl, __uh, __vh; \
34 \
35 __ul = __ll_lowpart (u); \
36 __uh = __ll_highpart (u); \
37 __vl = __ll_lowpart (v); \
38 __vh = __ll_highpart (v); \
39 \
40 __x0 = (USItype) __ul * __vl; \
41 __x1 = (USItype) __ul * __vh; \
42 __x2 = (USItype) __uh * __vl; \
43 __x3 = (USItype) __uh * __vh; \
44 \
45 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
46 __x1 += __x2; /* but this indeed can */ \
47 if (__x1 < __x2) /* did we get it? */ \
48 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
49 \
50 (w1) = __x3 + __ll_highpart (__x1); \
51 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
52 } while (0)
53
54#define __umulsidi3(u, v) \
55 ({DIunion __w; \
56 umul_ppmm (__w.s.high, __w.s.low, u, v); \
57 __w.ll; })
58
59typedef int SItype __attribute__ ((mode (SI)));
60typedef unsigned int USItype __attribute__ ((mode (SI)));
61typedef int DItype __attribute__ ((mode (DI)));
62typedef int word_type __attribute__ ((mode (__word__)));
63
64struct DIstruct {SItype high, low;};
65
66typedef union
67{
68 struct DIstruct s;
69 DItype ll;
70} DIunion;
71
72DItype
73__muldi3 (DItype u, DItype v)
74{
75 DIunion w;
76 DIunion uu, vv;
77
78 uu.ll = u,
79 vv.ll = v;
80
81 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
82 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
83 + (USItype) uu.s.high * (USItype) vv.s.low);
84
85 return w.ll;
86}
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index d399c5f25636..b9a57abfad08 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -20,226 +20,3 @@ char *strcat(char *dest, const char *src)
20 return __kernel_strcpy(dest + __kernel_strlen(dest), src); 20 return __kernel_strcpy(dest + __kernel_strlen(dest), src);
21} 21}
22EXPORT_SYMBOL(strcat); 22EXPORT_SYMBOL(strcat);
23
24void *memset(void *s, int c, size_t count)
25{
26 void *xs = s;
27 size_t temp, temp1;
28
29 if (!count)
30 return xs;
31 c &= 0xff;
32 c |= c << 8;
33 c |= c << 16;
34 if ((long)s & 1) {
35 char *cs = s;
36 *cs++ = c;
37 s = cs;
38 count--;
39 }
40 if (count > 2 && (long)s & 2) {
41 short *ss = s;
42 *ss++ = c;
43 s = ss;
44 count -= 2;
45 }
46 temp = count >> 2;
47 if (temp) {
48 long *ls = s;
49
50 asm volatile (
51 " movel %1,%2\n"
52 " andw #7,%2\n"
53 " lsrl #3,%1\n"
54 " negw %2\n"
55 " jmp %%pc@(2f,%2:w:2)\n"
56 "1: movel %3,%0@+\n"
57 " movel %3,%0@+\n"
58 " movel %3,%0@+\n"
59 " movel %3,%0@+\n"
60 " movel %3,%0@+\n"
61 " movel %3,%0@+\n"
62 " movel %3,%0@+\n"
63 " movel %3,%0@+\n"
64 "2: dbra %1,1b\n"
65 " clrw %1\n"
66 " subql #1,%1\n"
67 " jpl 1b"
68 : "=a" (ls), "=d" (temp), "=&d" (temp1)
69 : "d" (c), "0" (ls), "1" (temp));
70 s = ls;
71 }
72 if (count & 2) {
73 short *ss = s;
74 *ss++ = c;
75 s = ss;
76 }
77 if (count & 1) {
78 char *cs = s;
79 *cs = c;
80 }
81 return xs;
82}
83EXPORT_SYMBOL(memset);
84
85void *memcpy(void *to, const void *from, size_t n)
86{
87 void *xto = to;
88 size_t temp, temp1;
89
90 if (!n)
91 return xto;
92 if ((long)to & 1) {
93 char *cto = to;
94 const char *cfrom = from;
95 *cto++ = *cfrom++;
96 to = cto;
97 from = cfrom;
98 n--;
99 }
100 if (n > 2 && (long)to & 2) {
101 short *sto = to;
102 const short *sfrom = from;
103 *sto++ = *sfrom++;
104 to = sto;
105 from = sfrom;
106 n -= 2;
107 }
108 temp = n >> 2;
109 if (temp) {
110 long *lto = to;
111 const long *lfrom = from;
112
113 asm volatile (
114 " movel %2,%3\n"
115 " andw #7,%3\n"
116 " lsrl #3,%2\n"
117 " negw %3\n"
118 " jmp %%pc@(1f,%3:w:2)\n"
119 "4: movel %0@+,%1@+\n"
120 " movel %0@+,%1@+\n"
121 " movel %0@+,%1@+\n"
122 " movel %0@+,%1@+\n"
123 " movel %0@+,%1@+\n"
124 " movel %0@+,%1@+\n"
125 " movel %0@+,%1@+\n"
126 " movel %0@+,%1@+\n"
127 "1: dbra %2,4b\n"
128 " clrw %2\n"
129 " subql #1,%2\n"
130 " jpl 4b"
131 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
132 : "0" (lfrom), "1" (lto), "2" (temp));
133 to = lto;
134 from = lfrom;
135 }
136 if (n & 2) {
137 short *sto = to;
138 const short *sfrom = from;
139 *sto++ = *sfrom++;
140 to = sto;
141 from = sfrom;
142 }
143 if (n & 1) {
144 char *cto = to;
145 const char *cfrom = from;
146 *cto = *cfrom;
147 }
148 return xto;
149}
150EXPORT_SYMBOL(memcpy);
151
152void *memmove(void *dest, const void *src, size_t n)
153{
154 void *xdest = dest;
155 size_t temp;
156
157 if (!n)
158 return xdest;
159
160 if (dest < src) {
161 if ((long)dest & 1) {
162 char *cdest = dest;
163 const char *csrc = src;
164 *cdest++ = *csrc++;
165 dest = cdest;
166 src = csrc;
167 n--;
168 }
169 if (n > 2 && (long)dest & 2) {
170 short *sdest = dest;
171 const short *ssrc = src;
172 *sdest++ = *ssrc++;
173 dest = sdest;
174 src = ssrc;
175 n -= 2;
176 }
177 temp = n >> 2;
178 if (temp) {
179 long *ldest = dest;
180 const long *lsrc = src;
181 temp--;
182 do
183 *ldest++ = *lsrc++;
184 while (temp--);
185 dest = ldest;
186 src = lsrc;
187 }
188 if (n & 2) {
189 short *sdest = dest;
190 const short *ssrc = src;
191 *sdest++ = *ssrc++;
192 dest = sdest;
193 src = ssrc;
194 }
195 if (n & 1) {
196 char *cdest = dest;
197 const char *csrc = src;
198 *cdest = *csrc;
199 }
200 } else {
201 dest = (char *)dest + n;
202 src = (const char *)src + n;
203 if ((long)dest & 1) {
204 char *cdest = dest;
205 const char *csrc = src;
206 *--cdest = *--csrc;
207 dest = cdest;
208 src = csrc;
209 n--;
210 }
211 if (n > 2 && (long)dest & 2) {
212 short *sdest = dest;
213 const short *ssrc = src;
214 *--sdest = *--ssrc;
215 dest = sdest;
216 src = ssrc;
217 n -= 2;
218 }
219 temp = n >> 2;
220 if (temp) {
221 long *ldest = dest;
222 const long *lsrc = src;
223 temp--;
224 do
225 *--ldest = *--lsrc;
226 while (temp--);
227 dest = ldest;
228 src = lsrc;
229 }
230 if (n & 2) {
231 short *sdest = dest;
232 const short *ssrc = src;
233 *--sdest = *--ssrc;
234 dest = sdest;
235 src = ssrc;
236 }
237 if (n & 1) {
238 char *cdest = dest;
239 const char *csrc = src;
240 *--cdest = *--csrc;
241 }
242 }
243 return xdest;
244}
245EXPORT_SYMBOL(memmove);
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index b60270e4954b..09cadf1058d5 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -1,5 +1,9 @@
1ifdef CONFIG_MMU 1#
2include arch/m68k/mm/Makefile_mm 2# Makefile for the linux m68k-specific parts of the memory manager.
3else 3#
4include arch/m68k/mm/Makefile_no 4
5endif 5obj-y := init.o
6
7obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o
8obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
9obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/Makefile_mm b/arch/m68k/mm/Makefile_mm
deleted file mode 100644
index 5eaa43c4cb3c..000000000000
--- a/arch/m68k/mm/Makefile_mm
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for the linux m68k-specific parts of the memory manager.
3#
4
5obj-y := cache.o init.o fault.o hwtest.o
6
7obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
8obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/Makefile_no b/arch/m68k/mm/Makefile_no
deleted file mode 100644
index b54ab6b4b523..000000000000
--- a/arch/m68k/mm/Makefile_no
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the linux m68knommu specific parts of the memory manager.
3#
4
5obj-y += init.o kmap.o
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 8a6653f56bd8..7cbd7bd1f8bc 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -38,28 +38,10 @@
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/machdep.h> 39#include <asm/machdep.h>
40 40
41#undef DEBUG
42
43extern void die_if_kernel(char *,struct pt_regs *,long);
44extern void free_initmem(void);
45
46/* 41/*
47 * BAD_PAGE is the page that is used for page faults when linux
48 * is out-of-memory. Older versions of linux just did a
49 * do_exit(), but using this instead means there is less risk
50 * for a process dying in kernel mode, possibly leaving a inode
51 * unused etc..
52 *
53 * BAD_PAGETABLE is the accompanying page-table: it is initialized
54 * to point to BAD_PAGE entries.
55 *
56 * ZERO_PAGE is a special page that is used for zero-initialized 42 * ZERO_PAGE is a special page that is used for zero-initialized
57 * data and COW. 43 * data and COW.
58 */ 44 */
59static unsigned long empty_bad_page_table;
60
61static unsigned long empty_bad_page;
62
63unsigned long empty_zero_page; 45unsigned long empty_zero_page;
64 46
65extern unsigned long memory_start; 47extern unsigned long memory_start;
@@ -77,22 +59,9 @@ void __init paging_init(void)
77 * Make sure start_mem is page aligned, otherwise bootmem and 59 * Make sure start_mem is page aligned, otherwise bootmem and
78 * page_alloc get different views of the world. 60 * page_alloc get different views of the world.
79 */ 61 */
80#ifdef DEBUG
81 unsigned long start_mem = PAGE_ALIGN(memory_start);
82#endif
83 unsigned long end_mem = memory_end & PAGE_MASK; 62 unsigned long end_mem = memory_end & PAGE_MASK;
63 unsigned long zones_size[MAX_NR_ZONES] = {0, };
84 64
85#ifdef DEBUG
86 printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n",
87 start_mem, end_mem);
88#endif
89
90 /*
91 * Initialize the bad page table and bad page to point
92 * to a couple of allocated pages.
93 */
94 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
95 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
96 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 65 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
97 memset((void *)empty_zero_page, 0, PAGE_SIZE); 66 memset((void *)empty_zero_page, 0, PAGE_SIZE);
98 67
@@ -101,19 +70,8 @@ void __init paging_init(void)
101 */ 70 */
102 set_fs (USER_DS); 71 set_fs (USER_DS);
103 72
104#ifdef DEBUG 73 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
105 printk (KERN_DEBUG "before free_area_init\n"); 74 free_area_init(zones_size);
106
107 printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
108 start_mem, end_mem);
109#endif
110
111 {
112 unsigned long zones_size[MAX_NR_ZONES] = {0, };
113
114 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
115 free_area_init(zones_size);
116 }
117} 75}
118 76
119void __init mem_init(void) 77void __init mem_init(void)
@@ -166,8 +124,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
166} 124}
167#endif 125#endif
168 126
169void 127void free_initmem(void)
170free_initmem()
171{ 128{
172#ifdef CONFIG_RAMKERNEL 129#ifdef CONFIG_RAMKERNEL
173 unsigned long addr; 130 unsigned long addr;
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index a373d136b2b2..69345849454b 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -1,5 +1,367 @@
1#ifdef CONFIG_MMU 1/*
2#include "kmap_mm.c" 2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
3#else 51#else
4#include "kmap_no.c" 52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
5#endif 99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/kmap_mm.c b/arch/m68k/mm/kmap_mm.c
deleted file mode 100644
index 69345849454b..000000000000
--- a/arch/m68k/mm/kmap_mm.c
+++ /dev/null
@@ -1,367 +0,0 @@
1/*
2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/kmap_no.c b/arch/m68k/mm/kmap_no.c
deleted file mode 100644
index ece8d5ad4e6c..000000000000
--- a/arch/m68k/mm/kmap_no.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * linux/arch/m68knommu/mm/kmap.c
3 *
4 * Copyright (C) 2000 Lineo, <davidm@snapgear.com>
5 * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/vmalloc.h>
13
14#include <asm/setup.h>
15#include <asm/segment.h>
16#include <asm/page.h>
17#include <asm/pgalloc.h>
18#include <asm/io.h>
19#include <asm/system.h>
20
21#undef DEBUG
22
23/*
24 * Map some physical address range into the kernel address space.
25 */
26void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
27{
28 return (void *)physaddr;
29}
30
31/*
32 * Unmap a ioremap()ed region again.
33 */
34void iounmap(void *addr)
35{
36}
37
38/*
39 * Set new cache mode for some kernel address space.
40 * The caller must push data for that range itself, if such data may already
41 * be in the cache.
42 */
43void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
44{
45}
diff --git a/arch/m68k/platform/68328/entry.S b/arch/m68k/platform/68328/entry.S
index 676960cf022a..f68dce766c0a 100644
--- a/arch/m68k/platform/68328/entry.S
+++ b/arch/m68k/platform/68328/entry.S
@@ -10,7 +10,6 @@
10 * Linux/m68k support by Hamish Macdonald 10 * Linux/m68k support by Hamish Macdonald
11 */ 11 */
12 12
13#include <linux/sys.h>
14#include <linux/linkage.h> 13#include <linux/linkage.h>
15#include <asm/thread_info.h> 14#include <asm/thread_info.h>
16#include <asm/unistd.h> 15#include <asm/unistd.h>
@@ -80,7 +79,7 @@ ENTRY(system_call)
80 movel %sp,%d1 /* get thread_info pointer */ 79 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1 80 andl #-THREAD_SIZE,%d1
82 movel %d1,%a2 81 movel %d1,%a2
83 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 82 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
84 jne do_trace 83 jne do_trace
85 cmpl #NR_syscalls,%d0 84 cmpl #NR_syscalls,%d0
86 jcc badsys 85 jcc badsys
@@ -107,12 +106,12 @@ Luser_return:
107 andl #-THREAD_SIZE,%d1 106 andl #-THREAD_SIZE,%d1
108 movel %d1,%a2 107 movel %d1,%a2
1091: 1081:
110 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 109 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
111 jne Lwork_to_do 110 jne Lwork_to_do
112 RESTORE_ALL 111 RESTORE_ALL
113 112
114Lwork_to_do: 113Lwork_to_do:
115 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 114 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
116 btst #TIF_NEED_RESCHED,%d1 115 btst #TIF_NEED_RESCHED,%d1
117 jne reschedule 116 jne reschedule
118 117
diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S
index 46c1b18c9dcb..a07b14feed92 100644
--- a/arch/m68k/platform/68360/entry.S
+++ b/arch/m68k/platform/68360/entry.S
@@ -12,7 +12,6 @@
12 * M68360 Port by SED Systems, and Lineo. 12 * M68360 Port by SED Systems, and Lineo.
13 */ 13 */
14 14
15#include <linux/sys.h>
16#include <linux/linkage.h> 15#include <linux/linkage.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
18#include <asm/unistd.h> 17#include <asm/unistd.h>
@@ -76,7 +75,7 @@ ENTRY(system_call)
76 movel %sp,%d1 /* get thread_info pointer */ 75 movel %sp,%d1 /* get thread_info pointer */
77 andl #-THREAD_SIZE,%d1 76 andl #-THREAD_SIZE,%d1
78 movel %d1,%a2 77 movel %d1,%a2
79 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 78 btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
80 jne do_trace 79 jne do_trace
81 cmpl #NR_syscalls,%d0 80 cmpl #NR_syscalls,%d0
82 jcc badsys 81 jcc badsys
@@ -103,12 +102,12 @@ Luser_return:
103 andl #-THREAD_SIZE,%d1 102 andl #-THREAD_SIZE,%d1
104 movel %d1,%a2 103 movel %d1,%a2
1051: 1041:
106 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 105 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
107 jne Lwork_to_do 106 jne Lwork_to_do
108 RESTORE_ALL 107 RESTORE_ALL
109 108
110Lwork_to_do: 109Lwork_to_do:
111 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 110 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
112 btst #TIF_NEED_RESCHED,%d1 111 btst #TIF_NEED_RESCHED,%d1
113 jne reschedule 112 jne reschedule
114 113
diff --git a/arch/m68k/platform/coldfire/dma.c b/arch/m68k/platform/coldfire/dma.c
index e88b95e2cc62..df5ce20d181c 100644
--- a/arch/m68k/platform/coldfire/dma.c
+++ b/arch/m68k/platform/coldfire/dma.c
@@ -9,6 +9,7 @@
9/***************************************************************************/ 9/***************************************************************************/
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h>
12#include <asm/dma.h> 13#include <asm/dma.h>
13#include <asm/coldfire.h> 14#include <asm/coldfire.h>
14#include <asm/mcfsim.h> 15#include <asm/mcfsim.h>
@@ -33,7 +34,9 @@ unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = {
33 MCFDMA_BASE3, 34 MCFDMA_BASE3,
34#endif 35#endif
35}; 36};
37EXPORT_SYMBOL(dma_base_addr);
36 38
37unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; 39unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
40EXPORT_SYMBOL(dma_device_address);
38 41
39/***************************************************************************/ 42/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index eab63f09965b..27c2b001161e 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -26,7 +26,6 @@
26 * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be> 26 * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
27 */ 27 */
28 28
29#include <linux/sys.h>
30#include <linux/linkage.h> 29#include <linux/linkage.h>
31#include <asm/unistd.h> 30#include <asm/unistd.h>
32#include <asm/thread_info.h> 31#include <asm/thread_info.h>
@@ -78,7 +77,7 @@ ENTRY(system_call)
78 movel %d2,%a0 77 movel %d2,%a0
79 movel %a0@,%a1 /* save top of frame */ 78 movel %a0@,%a1 /* save top of frame */
80 movel %sp,%a1@(TASK_THREAD+THREAD_ESP0) 79 movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
81 btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 80 btst #(TIF_SYSCALL_TRACE%8),%a0@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
82 bnes 1f 81 bnes 1f
83 82
84 movel %d3,%a0 83 movel %d3,%a0
@@ -113,11 +112,11 @@ ret_from_exception:
113 movel %sp,%d1 /* get thread_info pointer */ 112 movel %sp,%d1 /* get thread_info pointer */
114 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 113 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
115 movel %d1,%a0 114 movel %d1,%a0
116 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 115 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
117 andl #(1<<TIF_NEED_RESCHED),%d1 116 andl #(1<<TIF_NEED_RESCHED),%d1
118 jeq Lkernel_return 117 jeq Lkernel_return
119 118
120 movel %a0@(TI_PREEMPTCOUNT),%d1 119 movel %a0@(TINFO_PREEMPT),%d1
121 cmpl #0,%d1 120 cmpl #0,%d1
122 jne Lkernel_return 121 jne Lkernel_return
123 122
@@ -137,14 +136,14 @@ Luser_return:
137 movel %sp,%d1 /* get thread_info pointer */ 136 movel %sp,%d1 /* get thread_info pointer */
138 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 137 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
139 movel %d1,%a0 138 movel %d1,%a0
140 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 139 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
141 jne Lwork_to_do /* still work to do */ 140 jne Lwork_to_do /* still work to do */
142 141
143Lreturn: 142Lreturn:
144 RESTORE_USER 143 RESTORE_USER
145 144
146Lwork_to_do: 145Lwork_to_do:
147 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 146 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
148 move #0x2000,%sr /* enable intrs again */ 147 move #0x2000,%sr /* enable intrs again */
149 btst #TIF_NEED_RESCHED,%d1 148 btst #TIF_NEED_RESCHED,%d1
150 jne reschedule 149 jne reschedule
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 6ae91a499184..c33483824a2e 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -8,7 +8,6 @@
8 8
9/*****************************************************************************/ 9/*****************************************************************************/
10 10
11#include <linux/sys.h>
12#include <linux/linkage.h> 11#include <linux/linkage.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 01af3876cf90..a81176f44c74 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@ SECTIONS
118 EXIT_DATA 118 EXIT_DATA
119 } 119 }
120 120
121 PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) 121 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
122 . = ALIGN(PAGE_SIZE); 122 . = ALIGN(PAGE_SIZE);
123 __init_end = .; 123 __init_end = .;
124 /* freed after init ends here */ 124 /* freed after init ends here */
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 968bcd2cb022..6f702a6ab395 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -70,7 +70,7 @@ SECTIONS
70 .exit.text : { EXIT_TEXT; } 70 .exit.text : { EXIT_TEXT; }
71 .exit.data : { EXIT_DATA; } 71 .exit.data : { EXIT_DATA; }
72 72
73 PERCPU(32, PAGE_SIZE) 73 PERCPU_SECTION(32)
74 . = ALIGN(PAGE_SIZE); 74 . = ALIGN(PAGE_SIZE);
75 __init_end = .; 75 __init_end = .;
76 /* freed after init ends here */ 76 /* freed after init ends here */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index e1a55849bfa7..fa6f2b8163e0 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -149,7 +149,7 @@ SECTIONS
149 EXIT_DATA 149 EXIT_DATA
150 } 150 }
151 151
152 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 152 PERCPU_SECTION(L1_CACHE_BYTES)
153 . = ALIGN(PAGE_SIZE); 153 . = ALIGN(PAGE_SIZE);
154 __init_end = .; 154 __init_end = .;
155 /* freed after init ends here */ 155 /* freed after init ends here */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b9150f07d266..920276c0f6a1 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -160,7 +160,7 @@ SECTIONS
160 INIT_RAM_FS 160 INIT_RAM_FS
161 } 161 }
162 162
163 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 163 PERCPU_SECTION(L1_CACHE_BYTES)
164 164
165 . = ALIGN(8); 165 . = ALIGN(8);
166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4a7f14079e03..ff2d2371b2e9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -230,17 +230,6 @@ config SYSVIPC_COMPAT
230config AUDIT_ARCH 230config AUDIT_ARCH
231 def_bool y 231 def_bool y
232 232
233config S390_EXEC_PROTECT
234 def_bool y
235 prompt "Data execute protection"
236 help
237 This option allows to enable a buffer overflow protection for user
238 space programs and it also selects the addressing mode option above.
239 The kernel parameter noexec=on will enable this feature and also
240 switch the addressing modes, default is disabled. Enabling this (via
241 kernel parameter) on machines earlier than IBM System z9 this will
242 reduce system performance.
243
244comment "Code generation options" 233comment "Code generation options"
245 234
246choice 235choice
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 5c91995b74e4..24bff4f1cc52 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -130,9 +130,7 @@ static void appldata_work_fn(struct work_struct *work)
130{ 130{
131 struct list_head *lh; 131 struct list_head *lh;
132 struct appldata_ops *ops; 132 struct appldata_ops *ops;
133 int i;
134 133
135 i = 0;
136 get_online_cpus(); 134 get_online_cpus();
137 mutex_lock(&appldata_ops_mutex); 135 mutex_lock(&appldata_ops_mutex);
138 list_for_each(lh, &appldata_ops_list) { 136 list_for_each(lh, &appldata_ops_list) {
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 7488e52efa97..81d7908416cf 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -167,7 +167,6 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
167#ifdef CONFIG_64BIT 167#ifdef CONFIG_64BIT
168#define cmpxchg64(ptr, o, n) \ 168#define cmpxchg64(ptr, o, n) \
169({ \ 169({ \
170 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
171 cmpxchg((ptr), (o), (n)); \ 170 cmpxchg((ptr), (o), (n)); \
172}) 171})
173#else /* CONFIG_64BIT */ 172#else /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 10c029cfcc7d..64b61bf72e93 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -196,18 +196,6 @@ do { \
196} while (0) 196} while (0)
197#endif /* __s390x__ */ 197#endif /* __s390x__ */
198 198
199/*
200 * An executable for which elf_read_implies_exec() returns TRUE will
201 * have the READ_IMPLIES_EXEC personality flag set automatically.
202 */
203#define elf_read_implies_exec(ex, executable_stack) \
204({ \
205 if (current->mm->context.noexec && \
206 executable_stack != EXSTACK_DISABLE_X) \
207 disable_noexec(current->mm, current); \
208 current->mm->context.noexec == 0; \
209})
210
211#define STACK_RND_MASK 0x7ffUL 199#define STACK_RND_MASK 0x7ffUL
212 200
213#define ARCH_DLINFO \ 201#define ARCH_DLINFO \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index b56403c2df28..799ed0f1643d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
111{ 111{
112 pmd_t *pmdp = (pmd_t *) ptep; 112 pmd_t *pmdp = (pmd_t *) ptep;
113 113
114 if (!MACHINE_HAS_IDTE) { 114 if (MACHINE_HAS_IDTE)
115 __pmd_csp(pmdp);
116 if (mm->context.noexec) {
117 pmdp = get_shadow_table(pmdp);
118 __pmd_csp(pmdp);
119 }
120 return;
121 }
122
123 __pmd_idte(address, pmdp);
124 if (mm->context.noexec) {
125 pmdp = get_shadow_table(pmdp);
126 __pmd_idte(address, pmdp); 115 __pmd_idte(address, pmdp);
127 } 116 else
128 return; 117 __pmd_csp(pmdp);
129} 118}
130 119
131#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index db14a311f1d2..1544b90bd6d6 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -15,6 +15,7 @@ enum interruption_class {
15 EXTINT_VRT, 15 EXTINT_VRT,
16 EXTINT_SCP, 16 EXTINT_SCP,
17 EXTINT_IUC, 17 EXTINT_IUC,
18 EXTINT_CPM,
18 IOINT_QAI, 19 IOINT_QAI,
19 IOINT_QDI, 20 IOINT_QDI,
20 IOINT_DAS, 21 IOINT_DAS,
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 65e172f8209d..228cf0b295db 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -124,7 +124,7 @@ struct _lowcore {
124 /* Address space pointer. */ 124 /* Address space pointer. */
125 __u32 kernel_asce; /* 0x02ac */ 125 __u32 kernel_asce; /* 0x02ac */
126 __u32 user_asce; /* 0x02b0 */ 126 __u32 user_asce; /* 0x02b0 */
127 __u32 user_exec_asce; /* 0x02b4 */ 127 __u32 current_pid; /* 0x02b4 */
128 128
129 /* SMP info area */ 129 /* SMP info area */
130 __u32 cpu_nr; /* 0x02b8 */ 130 __u32 cpu_nr; /* 0x02b8 */
@@ -255,7 +255,7 @@ struct _lowcore {
255 /* Address space pointer. */ 255 /* Address space pointer. */
256 __u64 kernel_asce; /* 0x0310 */ 256 __u64 kernel_asce; /* 0x0310 */
257 __u64 user_asce; /* 0x0318 */ 257 __u64 user_asce; /* 0x0318 */
258 __u64 user_exec_asce; /* 0x0320 */ 258 __u64 current_pid; /* 0x0320 */
259 259
260 /* SMP info area */ 260 /* SMP info area */
261 __u32 cpu_nr; /* 0x0328 */ 261 __u32 cpu_nr; /* 0x0328 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 78522cdefdd4..82d0847896a0 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,19 +5,18 @@ typedef struct {
5 atomic_t attach_count; 5 atomic_t attach_count;
6 unsigned int flush_mm; 6 unsigned int flush_mm;
7 spinlock_t list_lock; 7 spinlock_t list_lock;
8 struct list_head crst_list;
9 struct list_head pgtable_list; 8 struct list_head pgtable_list;
10 unsigned long asce_bits; 9 unsigned long asce_bits;
11 unsigned long asce_limit; 10 unsigned long asce_limit;
12 unsigned long vdso_base; 11 unsigned long vdso_base;
13 int noexec; 12 /* Cloned contexts will be created with extended page tables. */
14 int has_pgste; /* The mmu context has extended page tables */ 13 unsigned int alloc_pgste:1;
15 int alloc_pgste; /* cloned contexts will have extended page tables */ 14 /* The mmu context has extended page tables. */
15 unsigned int has_pgste:1;
16} mm_context_t; 16} mm_context_t;
17 17
18#define INIT_MM_CONTEXT(name) \ 18#define INIT_MM_CONTEXT(name) \
19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
20 .context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \
21 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), 20 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
22 21
23#endif 22#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8c277caa8d3a..5682f160ff82 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk,
35 * and if has_pgste is set, it will create extended page 35 * and if has_pgste is set, it will create extended page
36 * tables. 36 * tables.
37 */ 37 */
38 mm->context.noexec = 0;
39 mm->context.has_pgste = 1; 38 mm->context.has_pgste = 1;
40 mm->context.alloc_pgste = 1; 39 mm->context.alloc_pgste = 1;
41 } else { 40 } else {
42 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
43 mm->context.has_pgste = 0; 41 mm->context.has_pgste = 0;
44 mm->context.alloc_pgste = 0; 42 mm->context.alloc_pgste = 0;
45 } 43 }
@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
63 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 61 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
64 if (user_mode != HOME_SPACE_MODE) { 62 if (user_mode != HOME_SPACE_MODE) {
65 /* Load primary space page table origin. */ 63 /* Load primary space page table origin. */
66 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
67 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
68 asm volatile(LCTL_OPCODE" 1,1,%0\n" 64 asm volatile(LCTL_OPCODE" 1,1,%0\n"
69 : : "m" (S390_lowcore.user_exec_asce) ); 65 : : "m" (S390_lowcore.user_asce) );
70 } else 66 } else
71 /* Load home space page table origin. */ 67 /* Load home space page table origin. */
72 asm volatile(LCTL_OPCODE" 13,13,%0" 68 asm volatile(LCTL_OPCODE" 13,13,%0"
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 3c987e9ec8d6..accb372ddc7e 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -90,6 +90,7 @@ static inline void copy_page(void *to, void *from)
90 */ 90 */
91 91
92typedef struct { unsigned long pgprot; } pgprot_t; 92typedef struct { unsigned long pgprot; } pgprot_t;
93typedef struct { unsigned long pgste; } pgste_t;
93typedef struct { unsigned long pte; } pte_t; 94typedef struct { unsigned long pte; } pte_t;
94typedef struct { unsigned long pmd; } pmd_t; 95typedef struct { unsigned long pmd; } pmd_t;
95typedef struct { unsigned long pud; } pud_t; 96typedef struct { unsigned long pud; } pud_t;
@@ -97,18 +98,21 @@ typedef struct { unsigned long pgd; } pgd_t;
97typedef pte_t *pgtable_t; 98typedef pte_t *pgtable_t;
98 99
99#define pgprot_val(x) ((x).pgprot) 100#define pgprot_val(x) ((x).pgprot)
101#define pgste_val(x) ((x).pgste)
100#define pte_val(x) ((x).pte) 102#define pte_val(x) ((x).pte)
101#define pmd_val(x) ((x).pmd) 103#define pmd_val(x) ((x).pmd)
102#define pud_val(x) ((x).pud) 104#define pud_val(x) ((x).pud)
103#define pgd_val(x) ((x).pgd) 105#define pgd_val(x) ((x).pgd)
104 106
107#define __pgste(x) ((pgste_t) { (x) } )
105#define __pte(x) ((pte_t) { (x) } ) 108#define __pte(x) ((pte_t) { (x) } )
106#define __pmd(x) ((pmd_t) { (x) } ) 109#define __pmd(x) ((pmd_t) { (x) } )
110#define __pud(x) ((pud_t) { (x) } )
107#define __pgd(x) ((pgd_t) { (x) } ) 111#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 112#define __pgprot(x) ((pgprot_t) { (x) } )
109 113
110static inline void 114static inline void page_set_storage_key(unsigned long addr,
111page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) 115 unsigned char skey, int mapped)
112{ 116{
113 if (!mapped) 117 if (!mapped)
114 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" 118 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
@@ -117,15 +121,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
117 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 121 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
118} 122}
119 123
120static inline unsigned int 124static inline unsigned char page_get_storage_key(unsigned long addr)
121page_get_storage_key(unsigned long addr)
122{ 125{
123 unsigned int skey; 126 unsigned char skey;
124 127
125 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); 128 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
126 return skey; 129 return skey;
127} 130}
128 131
132static inline int page_reset_referenced(unsigned long addr)
133{
134 unsigned int ipm;
135
136 asm volatile(
137 " rrbe 0,%1\n"
138 " ipm %0\n"
139 : "=d" (ipm) : "a" (addr) : "cc");
140 return !!(ipm & 0x20000000);
141}
142
143/* Bits int the storage key */
144#define _PAGE_CHANGED 0x02 /* HW changed bit */
145#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
146#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
147#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
148
149/*
150 * Test and clear dirty bit in storage key.
151 * We can't clear the changed bit atomically. This is a potential
152 * race against modification of the referenced bit. This function
153 * should therefore only be called if it is not mapped in any
154 * address space.
155 */
156#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
157static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
158{
159 unsigned char skey;
160
161 skey = page_get_storage_key(pfn << PAGE_SHIFT);
162 if (!(skey & _PAGE_CHANGED))
163 return 0;
164 page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
165 return 1;
166}
167
168/*
169 * Test and clear referenced bit in storage key.
170 */
171#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
172static inline int page_test_and_clear_young(unsigned long pfn)
173{
174 return page_reset_referenced(pfn << PAGE_SHIFT);
175}
176
129struct page; 177struct page;
130void arch_free_page(struct page *page, int order); 178void arch_free_page(struct page *page, int order);
131void arch_alloc_page(struct page *page, int order); 179void arch_alloc_page(struct page *page, int order);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index f7ad8719d02d..5325c89a5843 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -1,6 +1,9 @@
1#ifndef __ARCH_S390_PERCPU__ 1#ifndef __ARCH_S390_PERCPU__
2#define __ARCH_S390_PERCPU__ 2#define __ARCH_S390_PERCPU__
3 3
4#include <linux/preempt.h>
5#include <asm/cmpxchg.h>
6
4/* 7/*
5 * s390 uses its own implementation for per cpu data, the offset of 8 * s390 uses its own implementation for per cpu data, the offset of
6 * the cpu local data area is cached in the cpu's lowcore memory. 9 * the cpu local data area is cached in the cpu's lowcore memory.
@@ -16,6 +19,71 @@
16#define ARCH_NEEDS_WEAK_PER_CPU 19#define ARCH_NEEDS_WEAK_PER_CPU
17#endif 20#endif
18 21
22#define arch_irqsafe_cpu_to_op(pcp, val, op) \
23do { \
24 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \
26 pcp_op_T__ *ptr__; \
27 preempt_disable(); \
28 ptr__ = __this_cpu_ptr(&(pcp)); \
29 prev__ = *ptr__; \
30 do { \
31 old__ = prev__; \
32 new__ = old__ op (val); \
33 switch (sizeof(*ptr__)) { \
34 case 8: \
35 prev__ = cmpxchg64(ptr__, old__, new__); \
36 break; \
37 default: \
38 prev__ = cmpxchg(ptr__, old__, new__); \
39 } \
40 } while (prev__ != old__); \
41 preempt_enable(); \
42} while (0)
43
44#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
45#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
46#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
47#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
48
49#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
50#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
51#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
52#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
53
54#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
55#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
56#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
57#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
58
59#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
60#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
61#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
62#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
63
64#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
65({ \
66 typedef typeof(pcp) pcp_op_T__; \
67 pcp_op_T__ ret__; \
68 pcp_op_T__ *ptr__; \
69 preempt_disable(); \
70 ptr__ = __this_cpu_ptr(&(pcp)); \
71 switch (sizeof(*ptr__)) { \
72 case 8: \
73 ret__ = cmpxchg64(ptr__, oval, nval); \
74 break; \
75 default: \
76 ret__ = cmpxchg(ptr__, oval, nval); \
77 } \
78 preempt_enable(); \
79 ret__; \
80})
81
82#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
83#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
84#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
85#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
86
19#include <asm-generic/percpu.h> 87#include <asm-generic/percpu.h>
20 88
21#endif /* __ARCH_S390_PERCPU__ */ 89#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 082eb4e50e8b..f6314af3b354 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -19,14 +19,13 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22unsigned long *crst_table_alloc(struct mm_struct *, int); 22unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *); 23void crst_table_free(struct mm_struct *, unsigned long *);
24void crst_table_free_rcu(struct mm_struct *, unsigned long *); 24void crst_table_free_rcu(struct mm_struct *, unsigned long *);
25 25
26unsigned long *page_table_alloc(struct mm_struct *); 26unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *); 27void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mm_struct *, unsigned long *); 28void page_table_free_rcu(struct mm_struct *, unsigned long *);
29void disable_noexec(struct mm_struct *, struct task_struct *);
30 29
31static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
32{ 31{
@@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
50static inline void crst_table_init(unsigned long *crst, unsigned long entry) 49static inline void crst_table_init(unsigned long *crst, unsigned long entry)
51{ 50{
52 clear_table(crst, entry, sizeof(unsigned long)*2048); 51 clear_table(crst, entry, sizeof(unsigned long)*2048);
53 crst = get_shadow_table(crst);
54 if (crst)
55 clear_table(crst, entry, sizeof(unsigned long)*2048);
56} 52}
57 53
58#ifndef __s390x__ 54#ifndef __s390x__
@@ -69,10 +65,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
69#define pmd_free(mm, x) do { } while (0) 65#define pmd_free(mm, x) do { } while (0)
70 66
71#define pgd_populate(mm, pgd, pud) BUG() 67#define pgd_populate(mm, pgd, pud) BUG()
72#define pgd_populate_kernel(mm, pgd, pud) BUG()
73
74#define pud_populate(mm, pud, pmd) BUG() 68#define pud_populate(mm, pud, pmd) BUG()
75#define pud_populate_kernel(mm, pud, pmd) BUG()
76 69
77#else /* __s390x__ */ 70#else /* __s390x__ */
78 71
@@ -90,7 +83,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit);
90 83
91static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 84static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
92{ 85{
93 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 86 unsigned long *table = crst_table_alloc(mm);
94 if (table) 87 if (table)
95 crst_table_init(table, _REGION3_ENTRY_EMPTY); 88 crst_table_init(table, _REGION3_ENTRY_EMPTY);
96 return (pud_t *) table; 89 return (pud_t *) table;
@@ -99,43 +92,21 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
99 92
100static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 93static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
101{ 94{
102 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 95 unsigned long *table = crst_table_alloc(mm);
103 if (table) 96 if (table)
104 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 97 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
105 return (pmd_t *) table; 98 return (pmd_t *) table;
106} 99}
107#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 100#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
108 101
109static inline void pgd_populate_kernel(struct mm_struct *mm,
110 pgd_t *pgd, pud_t *pud)
111{
112 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
113}
114
115static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 102static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
116{ 103{
117 pgd_populate_kernel(mm, pgd, pud); 104 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
118 if (mm->context.noexec) {
119 pgd = get_shadow_table(pgd);
120 pud = get_shadow_table(pud);
121 pgd_populate_kernel(mm, pgd, pud);
122 }
123}
124
125static inline void pud_populate_kernel(struct mm_struct *mm,
126 pud_t *pud, pmd_t *pmd)
127{
128 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
129} 105}
130 106
131static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 107static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
132{ 108{
133 pud_populate_kernel(mm, pud, pmd); 109 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
134 if (mm->context.noexec) {
135 pud = get_shadow_table(pud);
136 pmd = get_shadow_table(pmd);
137 pud_populate_kernel(mm, pud, pmd);
138 }
139} 110}
140 111
141#endif /* __s390x__ */ 112#endif /* __s390x__ */
@@ -143,29 +114,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
143static inline pgd_t *pgd_alloc(struct mm_struct *mm) 114static inline pgd_t *pgd_alloc(struct mm_struct *mm)
144{ 115{
145 spin_lock_init(&mm->context.list_lock); 116 spin_lock_init(&mm->context.list_lock);
146 INIT_LIST_HEAD(&mm->context.crst_list);
147 INIT_LIST_HEAD(&mm->context.pgtable_list); 117 INIT_LIST_HEAD(&mm->context.pgtable_list);
148 return (pgd_t *) 118 return (pgd_t *) crst_table_alloc(mm);
149 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
150} 119}
151#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 120#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
152 121
153static inline void pmd_populate_kernel(struct mm_struct *mm,
154 pmd_t *pmd, pte_t *pte)
155{
156 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
157}
158
159static inline void pmd_populate(struct mm_struct *mm, 122static inline void pmd_populate(struct mm_struct *mm,
160 pmd_t *pmd, pgtable_t pte) 123 pmd_t *pmd, pgtable_t pte)
161{ 124{
162 pmd_populate_kernel(mm, pmd, pte); 125 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
163 if (mm->context.noexec) {
164 pmd = get_shadow_table(pmd);
165 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
166 }
167} 126}
168 127
128#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
129
169#define pmd_pgtable(pmd) \ 130#define pmd_pgtable(pmd) \
170 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) 131 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
171 132
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 02ace3491c51..c4773a2ef3d3 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -31,9 +31,8 @@
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/mm_types.h> 33#include <linux/mm_types.h>
34#include <asm/bitops.h>
35#include <asm/bug.h> 34#include <asm/bug.h>
36#include <asm/processor.h> 35#include <asm/page.h>
37 36
38extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 37extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39extern void paging_init(void); 38extern void paging_init(void);
@@ -243,11 +242,13 @@ extern unsigned long VMALLOC_START;
243/* Software bits in the page table entry */ 242/* Software bits in the page table entry */
244#define _PAGE_SWT 0x001 /* SW pte type bit t */ 243#define _PAGE_SWT 0x001 /* SW pte type bit t */
245#define _PAGE_SWX 0x002 /* SW pte type bit x */ 244#define _PAGE_SWX 0x002 /* SW pte type bit x */
246#define _PAGE_SPECIAL 0x004 /* SW associated with special page */ 245#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
246#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
247#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
247#define __HAVE_ARCH_PTE_SPECIAL 248#define __HAVE_ARCH_PTE_SPECIAL
248 249
249/* Set of bits not changed in pte_modify */ 250/* Set of bits not changed in pte_modify */
250#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) 251#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
251 252
252/* Six different types of pages. */ 253/* Six different types of pages. */
253#define _PAGE_TYPE_EMPTY 0x400 254#define _PAGE_TYPE_EMPTY 0x400
@@ -256,8 +257,6 @@ extern unsigned long VMALLOC_START;
256#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 257#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
257#define _PAGE_TYPE_RO 0x200 258#define _PAGE_TYPE_RO 0x200
258#define _PAGE_TYPE_RW 0x000 259#define _PAGE_TYPE_RW 0x000
259#define _PAGE_TYPE_EX_RO 0x202
260#define _PAGE_TYPE_EX_RW 0x002
261 260
262/* 261/*
263 * Only four types for huge pages, using the invalid bit and protection bit 262 * Only four types for huge pages, using the invalid bit and protection bit
@@ -287,8 +286,6 @@ extern unsigned long VMALLOC_START;
287 * _PAGE_TYPE_FILE 11?1 -> 11?1 286 * _PAGE_TYPE_FILE 11?1 -> 11?1
288 * _PAGE_TYPE_RO 0100 -> 1100 287 * _PAGE_TYPE_RO 0100 -> 1100
289 * _PAGE_TYPE_RW 0000 -> 1000 288 * _PAGE_TYPE_RW 0000 -> 1000
290 * _PAGE_TYPE_EX_RO 0110 -> 1110
291 * _PAGE_TYPE_EX_RW 0010 -> 1010
292 * 289 *
293 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 290 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
294 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 291 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
@@ -297,14 +294,17 @@ extern unsigned long VMALLOC_START;
297 */ 294 */
298 295
299/* Page status table bits for virtualization */ 296/* Page status table bits for virtualization */
300#define RCP_PCL_BIT 55 297#define RCP_ACC_BITS 0xf000000000000000UL
301#define RCP_HR_BIT 54 298#define RCP_FP_BIT 0x0800000000000000UL
302#define RCP_HC_BIT 53 299#define RCP_PCL_BIT 0x0080000000000000UL
303#define RCP_GR_BIT 50 300#define RCP_HR_BIT 0x0040000000000000UL
304#define RCP_GC_BIT 49 301#define RCP_HC_BIT 0x0020000000000000UL
305 302#define RCP_GR_BIT 0x0004000000000000UL
306/* User dirty bit for KVM's migration feature */ 303#define RCP_GC_BIT 0x0002000000000000UL
307#define KVM_UD_BIT 47 304
305/* User dirty / referenced bit for KVM's migration feature */
306#define KVM_UR_BIT 0x0000800000000000UL
307#define KVM_UC_BIT 0x0000400000000000UL
308 308
309#ifndef __s390x__ 309#ifndef __s390x__
310 310
@@ -377,85 +377,54 @@ extern unsigned long VMALLOC_START;
377#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 377#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
378 _ASCE_ALT_EVENT) 378 _ASCE_ALT_EVENT)
379 379
380/* Bits int the storage key */
381#define _PAGE_CHANGED 0x02 /* HW changed bit */
382#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
383
384/* 380/*
385 * Page protection definitions. 381 * Page protection definitions.
386 */ 382 */
387#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 383#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
388#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 384#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
389#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 385#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
390#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
391#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
392 386
393#define PAGE_KERNEL PAGE_RW 387#define PAGE_KERNEL PAGE_RW
394#define PAGE_COPY PAGE_RO 388#define PAGE_COPY PAGE_RO
395 389
396/* 390/*
397 * Dependent on the EXEC_PROTECT option s390 can do execute protection. 391 * On s390 the page table entry has an invalid bit and a read-only bit.
398 * Write permission always implies read permission. In theory with a 392 * Read permission implies execute permission and write permission
399 * primary/secondary page table execute only can be implemented but 393 * implies read permission.
400 * it would cost an additional bit in the pte to distinguish all the
401 * different pte types. To avoid that execute permission currently
402 * implies read permission as well.
403 */ 394 */
404 /*xwr*/ 395 /*xwr*/
405#define __P000 PAGE_NONE 396#define __P000 PAGE_NONE
406#define __P001 PAGE_RO 397#define __P001 PAGE_RO
407#define __P010 PAGE_RO 398#define __P010 PAGE_RO
408#define __P011 PAGE_RO 399#define __P011 PAGE_RO
409#define __P100 PAGE_EX_RO 400#define __P100 PAGE_RO
410#define __P101 PAGE_EX_RO 401#define __P101 PAGE_RO
411#define __P110 PAGE_EX_RO 402#define __P110 PAGE_RO
412#define __P111 PAGE_EX_RO 403#define __P111 PAGE_RO
413 404
414#define __S000 PAGE_NONE 405#define __S000 PAGE_NONE
415#define __S001 PAGE_RO 406#define __S001 PAGE_RO
416#define __S010 PAGE_RW 407#define __S010 PAGE_RW
417#define __S011 PAGE_RW 408#define __S011 PAGE_RW
418#define __S100 PAGE_EX_RO 409#define __S100 PAGE_RO
419#define __S101 PAGE_EX_RO 410#define __S101 PAGE_RO
420#define __S110 PAGE_EX_RW 411#define __S110 PAGE_RW
421#define __S111 PAGE_EX_RW 412#define __S111 PAGE_RW
422
423#ifndef __s390x__
424# define PxD_SHADOW_SHIFT 1
425#else /* __s390x__ */
426# define PxD_SHADOW_SHIFT 2
427#endif /* __s390x__ */
428 413
429static inline void *get_shadow_table(void *table) 414static inline int mm_exclusive(struct mm_struct *mm)
430{ 415{
431 unsigned long addr, offset; 416 return likely(mm == current->active_mm &&
432 struct page *page; 417 atomic_read(&mm->context.attach_count) <= 1);
433
434 addr = (unsigned long) table;
435 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
436 page = virt_to_page((void *)(addr ^ offset));
437 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
438} 418}
439 419
440/* 420static inline int mm_has_pgste(struct mm_struct *mm)
441 * Certain architectures need to do special things when PTEs
442 * within a page table are directly modified. Thus, the following
443 * hook is made available.
444 */
445static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
446 pte_t *ptep, pte_t entry)
447{ 421{
448 *ptep = entry; 422#ifdef CONFIG_PGSTE
449 if (mm->context.noexec) { 423 if (unlikely(mm->context.has_pgste))
450 if (!(pte_val(entry) & _PAGE_INVALID) && 424 return 1;
451 (pte_val(entry) & _PAGE_SWX)) 425#endif
452 pte_val(entry) |= _PAGE_RO; 426 return 0;
453 else
454 pte_val(entry) = _PAGE_TYPE_EMPTY;
455 ptep[PTRS_PER_PTE] = entry;
456 }
457} 427}
458
459/* 428/*
460 * pgd/pmd/pte query functions 429 * pgd/pmd/pte query functions
461 */ 430 */
@@ -568,52 +537,127 @@ static inline int pte_special(pte_t pte)
568} 537}
569 538
570#define __HAVE_ARCH_PTE_SAME 539#define __HAVE_ARCH_PTE_SAME
571#define pte_same(a,b) (pte_val(a) == pte_val(b)) 540static inline int pte_same(pte_t a, pte_t b)
541{
542 return pte_val(a) == pte_val(b);
543}
572 544
573static inline void rcp_lock(pte_t *ptep) 545static inline pgste_t pgste_get_lock(pte_t *ptep)
574{ 546{
547 unsigned long new = 0;
575#ifdef CONFIG_PGSTE 548#ifdef CONFIG_PGSTE
576 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 549 unsigned long old;
550
577 preempt_disable(); 551 preempt_disable();
578 while (test_and_set_bit(RCP_PCL_BIT, pgste)) 552 asm(
579 ; 553 " lg %0,%2\n"
554 "0: lgr %1,%0\n"
555 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
556 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
557 " csg %0,%1,%2\n"
558 " jl 0b\n"
559 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
560 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
580#endif 561#endif
562 return __pgste(new);
581} 563}
582 564
583static inline void rcp_unlock(pte_t *ptep) 565static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
584{ 566{
585#ifdef CONFIG_PGSTE 567#ifdef CONFIG_PGSTE
586 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 568 asm(
587 clear_bit(RCP_PCL_BIT, pgste); 569 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
570 " stg %1,%0\n"
571 : "=Q" (ptep[PTRS_PER_PTE])
572 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
588 preempt_enable(); 573 preempt_enable();
589#endif 574#endif
590} 575}
591 576
592/* forward declaration for SetPageUptodate in page-flags.h*/ 577static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
593static inline void page_clear_dirty(struct page *page, int mapped);
594#include <linux/page-flags.h>
595
596static inline void ptep_rcp_copy(pte_t *ptep)
597{ 578{
598#ifdef CONFIG_PGSTE 579#ifdef CONFIG_PGSTE
599 struct page *page = virt_to_page(pte_val(*ptep)); 580 unsigned long pfn, bits;
600 unsigned int skey; 581 unsigned char skey;
601 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 582
602 583 pfn = pte_val(*ptep) >> PAGE_SHIFT;
603 skey = page_get_storage_key(page_to_phys(page)); 584 skey = page_get_storage_key(pfn);
604 if (skey & _PAGE_CHANGED) { 585 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
605 set_bit_simple(RCP_GC_BIT, pgste); 586 /* Clear page changed & referenced bit in the storage key */
606 set_bit_simple(KVM_UD_BIT, pgste); 587 if (bits) {
588 skey ^= bits;
589 page_set_storage_key(pfn, skey, 1);
607 } 590 }
608 if (skey & _PAGE_REFERENCED) 591 /* Transfer page changed & referenced bit to guest bits in pgste */
609 set_bit_simple(RCP_GR_BIT, pgste); 592 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
610 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { 593 /* Get host changed & referenced bits from pgste */
611 SetPageDirty(page); 594 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
612 set_bit_simple(KVM_UD_BIT, pgste); 595 /* Clear host bits in pgste. */
613 } 596 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
614 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) 597 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
615 SetPageReferenced(page); 598 /* Copy page access key and fetch protection bit to pgste */
599 pgste_val(pgste) |=
600 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
601 /* Transfer changed and referenced to kvm user bits */
602 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
603 /* Transfer changed & referenced to pte sofware bits */
604 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
616#endif 605#endif
606 return pgste;
607
608}
609
610static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
611{
612#ifdef CONFIG_PGSTE
613 int young;
614
615 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
616 /* Transfer page referenced bit to pte software bit (host view) */
617 if (young || (pgste_val(pgste) & RCP_HR_BIT))
618 pte_val(*ptep) |= _PAGE_SWR;
619 /* Clear host referenced bit in pgste. */
620 pgste_val(pgste) &= ~RCP_HR_BIT;
621 /* Transfer page referenced bit to guest bit in pgste */
622 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
623#endif
624 return pgste;
625
626}
627
628static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
629{
630#ifdef CONFIG_PGSTE
631 unsigned long pfn;
632 unsigned long okey, nkey;
633
634 pfn = pte_val(*ptep) >> PAGE_SHIFT;
635 okey = nkey = page_get_storage_key(pfn);
636 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
637 /* Set page access key and fetch protection bit from pgste */
638 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
639 if (okey != nkey)
640 page_set_storage_key(pfn, nkey, 1);
641#endif
642}
643
644/*
645 * Certain architectures need to do special things when PTEs
646 * within a page table are directly modified. Thus, the following
647 * hook is made available.
648 */
649static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
650 pte_t *ptep, pte_t entry)
651{
652 pgste_t pgste;
653
654 if (mm_has_pgste(mm)) {
655 pgste = pgste_get_lock(ptep);
656 pgste_set_pte(ptep, pgste);
657 *ptep = entry;
658 pgste_set_unlock(ptep, pgste);
659 } else
660 *ptep = entry;
617} 661}
618 662
619/* 663/*
@@ -627,19 +671,19 @@ static inline int pte_write(pte_t pte)
627 671
628static inline int pte_dirty(pte_t pte) 672static inline int pte_dirty(pte_t pte)
629{ 673{
630 /* A pte is neither clean nor dirty on s/390. The dirty bit 674#ifdef CONFIG_PGSTE
631 * is in the storage key. See page_test_and_clear_dirty for 675 if (pte_val(pte) & _PAGE_SWC)
632 * details. 676 return 1;
633 */ 677#endif
634 return 0; 678 return 0;
635} 679}
636 680
637static inline int pte_young(pte_t pte) 681static inline int pte_young(pte_t pte)
638{ 682{
639 /* A pte is neither young nor old on s/390. The young bit 683#ifdef CONFIG_PGSTE
640 * is in the storage key. See page_test_and_clear_young for 684 if (pte_val(pte) & _PAGE_SWR)
641 * details. 685 return 1;
642 */ 686#endif
643 return 0; 687 return 0;
644} 688}
645 689
@@ -647,64 +691,30 @@ static inline int pte_young(pte_t pte)
647 * pgd/pmd/pte modification functions 691 * pgd/pmd/pte modification functions
648 */ 692 */
649 693
650#ifndef __s390x__ 694static inline void pgd_clear(pgd_t *pgd)
651
652#define pgd_clear(pgd) do { } while (0)
653#define pud_clear(pud) do { } while (0)
654
655#else /* __s390x__ */
656
657static inline void pgd_clear_kernel(pgd_t * pgd)
658{ 695{
696#ifdef __s390x__
659 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 697 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
660 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 698 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
699#endif
661} 700}
662 701
663static inline void pgd_clear(pgd_t * pgd) 702static inline void pud_clear(pud_t *pud)
664{
665 pgd_t *shadow = get_shadow_table(pgd);
666
667 pgd_clear_kernel(pgd);
668 if (shadow)
669 pgd_clear_kernel(shadow);
670}
671
672static inline void pud_clear_kernel(pud_t *pud)
673{ 703{
704#ifdef __s390x__
674 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 705 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
675 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 706 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
707#endif
676} 708}
677 709
678static inline void pud_clear(pud_t *pud) 710static inline void pmd_clear(pmd_t *pmdp)
679{
680 pud_t *shadow = get_shadow_table(pud);
681
682 pud_clear_kernel(pud);
683 if (shadow)
684 pud_clear_kernel(shadow);
685}
686
687#endif /* __s390x__ */
688
689static inline void pmd_clear_kernel(pmd_t * pmdp)
690{ 711{
691 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 712 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
692} 713}
693 714
694static inline void pmd_clear(pmd_t *pmd)
695{
696 pmd_t *shadow = get_shadow_table(pmd);
697
698 pmd_clear_kernel(pmd);
699 if (shadow)
700 pmd_clear_kernel(shadow);
701}
702
703static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 715static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
704{ 716{
705 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 717 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
706 if (mm->context.noexec)
707 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
708} 718}
709 719
710/* 720/*
@@ -734,35 +744,27 @@ static inline pte_t pte_mkwrite(pte_t pte)
734 744
735static inline pte_t pte_mkclean(pte_t pte) 745static inline pte_t pte_mkclean(pte_t pte)
736{ 746{
737 /* The only user of pte_mkclean is the fork() code. 747#ifdef CONFIG_PGSTE
738 We must *not* clear the *physical* page dirty bit 748 pte_val(pte) &= ~_PAGE_SWC;
739 just because fork() wants to clear the dirty bit in 749#endif
740 *one* of the page's mappings. So we just do nothing. */
741 return pte; 750 return pte;
742} 751}
743 752
744static inline pte_t pte_mkdirty(pte_t pte) 753static inline pte_t pte_mkdirty(pte_t pte)
745{ 754{
746 /* We do not explicitly set the dirty bit because the
747 * sske instruction is slow. It is faster to let the
748 * next instruction set the dirty bit.
749 */
750 return pte; 755 return pte;
751} 756}
752 757
753static inline pte_t pte_mkold(pte_t pte) 758static inline pte_t pte_mkold(pte_t pte)
754{ 759{
755 /* S/390 doesn't keep its dirty/referenced bit in the pte. 760#ifdef CONFIG_PGSTE
756 * There is no point in clearing the real referenced bit. 761 pte_val(pte) &= ~_PAGE_SWR;
757 */ 762#endif
758 return pte; 763 return pte;
759} 764}
760 765
761static inline pte_t pte_mkyoung(pte_t pte) 766static inline pte_t pte_mkyoung(pte_t pte)
762{ 767{
763 /* S/390 doesn't keep its dirty/referenced bit in the pte.
764 * There is no point in setting the real referenced bit.
765 */
766 return pte; 768 return pte;
767} 769}
768 770
@@ -800,62 +802,60 @@ static inline pte_t pte_mkhuge(pte_t pte)
800} 802}
801#endif 803#endif
802 804
803#ifdef CONFIG_PGSTE
804/* 805/*
805 * Get (and clear) the user dirty bit for a PTE. 806 * Get (and clear) the user dirty bit for a pte.
806 */ 807 */
807static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, 808static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
808 pte_t *ptep) 809 pte_t *ptep)
809{ 810{
810 int dirty; 811 pgste_t pgste;
811 unsigned long *pgste; 812 int dirty = 0;
812 struct page *page; 813
813 unsigned int skey; 814 if (mm_has_pgste(mm)) {
814 815 pgste = pgste_get_lock(ptep);
815 if (!mm->context.has_pgste) 816 pgste = pgste_update_all(ptep, pgste);
816 return -EINVAL; 817 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
817 rcp_lock(ptep); 818 pgste_val(pgste) &= ~KVM_UC_BIT;
818 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 819 pgste_set_unlock(ptep, pgste);
819 page = virt_to_page(pte_val(*ptep)); 820 return dirty;
820 skey = page_get_storage_key(page_to_phys(page));
821 if (skey & _PAGE_CHANGED) {
822 set_bit_simple(RCP_GC_BIT, pgste);
823 set_bit_simple(KVM_UD_BIT, pgste);
824 } 821 }
825 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
826 SetPageDirty(page);
827 set_bit_simple(KVM_UD_BIT, pgste);
828 }
829 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
830 if (skey & _PAGE_CHANGED)
831 page_clear_dirty(page, 1);
832 rcp_unlock(ptep);
833 return dirty; 822 return dirty;
834} 823}
835#endif 824
825/*
826 * Get (and clear) the user referenced bit for a pte.
827 */
828static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
829 pte_t *ptep)
830{
831 pgste_t pgste;
832 int young = 0;
833
834 if (mm_has_pgste(mm)) {
835 pgste = pgste_get_lock(ptep);
836 pgste = pgste_update_young(ptep, pgste);
837 young = !!(pgste_val(pgste) & KVM_UR_BIT);
838 pgste_val(pgste) &= ~KVM_UR_BIT;
839 pgste_set_unlock(ptep, pgste);
840 }
841 return young;
842}
836 843
837#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 844#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
838static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 845static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
839 unsigned long addr, pte_t *ptep) 846 unsigned long addr, pte_t *ptep)
840{ 847{
841#ifdef CONFIG_PGSTE 848 pgste_t pgste;
842 unsigned long physpage; 849 pte_t pte;
843 int young;
844 unsigned long *pgste;
845 850
846 if (!vma->vm_mm->context.has_pgste) 851 if (mm_has_pgste(vma->vm_mm)) {
847 return 0; 852 pgste = pgste_get_lock(ptep);
848 physpage = pte_val(*ptep) & PAGE_MASK; 853 pgste = pgste_update_young(ptep, pgste);
849 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 854 pte = *ptep;
850 855 *ptep = pte_mkold(pte);
851 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); 856 pgste_set_unlock(ptep, pgste);
852 rcp_lock(ptep); 857 return pte_young(pte);
853 if (young) 858 }
854 set_bit_simple(RCP_GR_BIT, pgste);
855 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
856 rcp_unlock(ptep);
857 return young;
858#endif
859 return 0; 859 return 0;
860} 860}
861 861
@@ -867,10 +867,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
867 * On s390 reference bits are in storage key and never in TLB 867 * On s390 reference bits are in storage key and never in TLB
868 * With virtualization we handle the reference bit, without we 868 * With virtualization we handle the reference bit, without we
869 * we can simply return */ 869 * we can simply return */
870#ifdef CONFIG_PGSTE
871 return ptep_test_and_clear_young(vma, address, ptep); 870 return ptep_test_and_clear_young(vma, address, ptep);
872#endif
873 return 0;
874} 871}
875 872
876static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 873static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -890,25 +887,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
890 } 887 }
891} 888}
892 889
893static inline void ptep_invalidate(struct mm_struct *mm,
894 unsigned long address, pte_t *ptep)
895{
896 if (mm->context.has_pgste) {
897 rcp_lock(ptep);
898 __ptep_ipte(address, ptep);
899 ptep_rcp_copy(ptep);
900 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
901 rcp_unlock(ptep);
902 return;
903 }
904 __ptep_ipte(address, ptep);
905 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
906 if (mm->context.noexec) {
907 __ptep_ipte(address, ptep + PTRS_PER_PTE);
908 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
909 }
910}
911
912/* 890/*
913 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 891 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
914 * both clear the TLB for the unmapped pte. The reason is that 892 * both clear the TLB for the unmapped pte. The reason is that
@@ -923,24 +901,72 @@ static inline void ptep_invalidate(struct mm_struct *mm,
923 * is a nop. 901 * is a nop.
924 */ 902 */
925#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 903#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
926#define ptep_get_and_clear(__mm, __address, __ptep) \ 904static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
927({ \ 905 unsigned long address, pte_t *ptep)
928 pte_t __pte = *(__ptep); \ 906{
929 (__mm)->context.flush_mm = 1; \ 907 pgste_t pgste;
930 if (atomic_read(&(__mm)->context.attach_count) > 1 || \ 908 pte_t pte;
931 (__mm) != current->active_mm) \ 909
932 ptep_invalidate(__mm, __address, __ptep); \ 910 mm->context.flush_mm = 1;
933 else \ 911 if (mm_has_pgste(mm))
934 pte_clear((__mm), (__address), (__ptep)); \ 912 pgste = pgste_get_lock(ptep);
935 __pte; \ 913
936}) 914 pte = *ptep;
915 if (!mm_exclusive(mm))
916 __ptep_ipte(address, ptep);
917 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
918
919 if (mm_has_pgste(mm)) {
920 pgste = pgste_update_all(&pte, pgste);
921 pgste_set_unlock(ptep, pgste);
922 }
923 return pte;
924}
925
926#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
927static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
928 unsigned long address,
929 pte_t *ptep)
930{
931 pte_t pte;
932
933 mm->context.flush_mm = 1;
934 if (mm_has_pgste(mm))
935 pgste_get_lock(ptep);
936
937 pte = *ptep;
938 if (!mm_exclusive(mm))
939 __ptep_ipte(address, ptep);
940 return pte;
941}
942
943static inline void ptep_modify_prot_commit(struct mm_struct *mm,
944 unsigned long address,
945 pte_t *ptep, pte_t pte)
946{
947 *ptep = pte;
948 if (mm_has_pgste(mm))
949 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
950}
937 951
938#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 952#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
939static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 953static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
940 unsigned long address, pte_t *ptep) 954 unsigned long address, pte_t *ptep)
941{ 955{
942 pte_t pte = *ptep; 956 pgste_t pgste;
943 ptep_invalidate(vma->vm_mm, address, ptep); 957 pte_t pte;
958
959 if (mm_has_pgste(vma->vm_mm))
960 pgste = pgste_get_lock(ptep);
961
962 pte = *ptep;
963 __ptep_ipte(address, ptep);
964 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
965
966 if (mm_has_pgste(vma->vm_mm)) {
967 pgste = pgste_update_all(&pte, pgste);
968 pgste_set_unlock(ptep, pgste);
969 }
944 return pte; 970 return pte;
945} 971}
946 972
@@ -953,76 +979,67 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
953 */ 979 */
954#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 980#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
955static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 981static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
956 unsigned long addr, 982 unsigned long address,
957 pte_t *ptep, int full) 983 pte_t *ptep, int full)
958{ 984{
959 pte_t pte = *ptep; 985 pgste_t pgste;
986 pte_t pte;
987
988 if (mm_has_pgste(mm))
989 pgste = pgste_get_lock(ptep);
990
991 pte = *ptep;
992 if (!full)
993 __ptep_ipte(address, ptep);
994 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
960 995
961 if (full) 996 if (mm_has_pgste(mm)) {
962 pte_clear(mm, addr, ptep); 997 pgste = pgste_update_all(&pte, pgste);
963 else 998 pgste_set_unlock(ptep, pgste);
964 ptep_invalidate(mm, addr, ptep); 999 }
965 return pte; 1000 return pte;
966} 1001}
967 1002
968#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1003#define __HAVE_ARCH_PTEP_SET_WRPROTECT
969#define ptep_set_wrprotect(__mm, __addr, __ptep) \ 1004static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
970({ \ 1005 unsigned long address, pte_t *ptep)
971 pte_t __pte = *(__ptep); \ 1006{
972 if (pte_write(__pte)) { \ 1007 pgste_t pgste;
973 (__mm)->context.flush_mm = 1; \ 1008 pte_t pte = *ptep;
974 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
975 (__mm) != current->active_mm) \
976 ptep_invalidate(__mm, __addr, __ptep); \
977 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
978 } \
979})
980 1009
981#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1010 if (pte_write(pte)) {
982#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 1011 mm->context.flush_mm = 1;
983({ \ 1012 if (mm_has_pgste(mm))
984 int __changed = !pte_same(*(__ptep), __entry); \ 1013 pgste = pgste_get_lock(ptep);
985 if (__changed) { \
986 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
987 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
988 } \
989 __changed; \
990})
991 1014
992/* 1015 if (!mm_exclusive(mm))
993 * Test and clear dirty bit in storage key. 1016 __ptep_ipte(address, ptep);
994 * We can't clear the changed bit atomically. This is a potential 1017 *ptep = pte_wrprotect(pte);
995 * race against modification of the referenced bit. This function
996 * should therefore only be called if it is not mapped in any
997 * address space.
998 */
999#define __HAVE_ARCH_PAGE_TEST_DIRTY
1000static inline int page_test_dirty(struct page *page)
1001{
1002 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
1003}
1004 1018
1005#define __HAVE_ARCH_PAGE_CLEAR_DIRTY 1019 if (mm_has_pgste(mm))
1006static inline void page_clear_dirty(struct page *page, int mapped) 1020 pgste_set_unlock(ptep, pgste);
1007{ 1021 }
1008 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped); 1022 return pte;
1009} 1023}
1010 1024
1011/* 1025#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1012 * Test and clear referenced bit in storage key. 1026static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1013 */ 1027 unsigned long address, pte_t *ptep,
1014#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 1028 pte_t entry, int dirty)
1015static inline int page_test_and_clear_young(struct page *page)
1016{ 1029{
1017 unsigned long physpage = page_to_phys(page); 1030 pgste_t pgste;
1018 int ccode; 1031
1019 1032 if (pte_same(*ptep, entry))
1020 asm volatile( 1033 return 0;
1021 " rrbe 0,%1\n" 1034 if (mm_has_pgste(vma->vm_mm))
1022 " ipm %0\n" 1035 pgste = pgste_get_lock(ptep);
1023 " srl %0,28\n" 1036
1024 : "=d" (ccode) : "a" (physpage) : "cc" ); 1037 __ptep_ipte(address, ptep);
1025 return ccode & 2; 1038 *ptep = entry;
1039
1040 if (mm_has_pgste(vma->vm_mm))
1041 pgste_set_unlock(ptep, pgste);
1042 return 1;
1026} 1043}
1027 1044
1028/* 1045/*
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 2c79b6416271..1300c3025334 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,6 +84,7 @@ struct thread_struct {
84 struct per_event per_event; /* Cause of the last PER trap */ 84 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 85 /* pfault_wait is used to block the process on a pfault event */
86 unsigned long pfault_wait; 86 unsigned long pfault_wait;
87 struct list_head list;
87}; 88};
88 89
89typedef struct thread_struct thread_struct; 90typedef struct thread_struct thread_struct;
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 29d5d6d4becc..b7a4f2eb0057 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
50 /* 50 /*
51 * If the process only ran on the local cpu, do a local flush. 51 * If the process only ran on the local cpu, do a local flush.
52 */ 52 */
53 local_cpumask = cpumask_of_cpu(smp_processor_id()); 53 cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) 54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
55 __tlb_flush_local(); 55 __tlb_flush_local();
56 else 56 else
@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * on all cpus instead of doing a local flush if the mm 80 * on all cpus instead of doing a local flush if the mm
81 * only ran on the local cpu. 81 * only ran on the local cpu.
82 */ 82 */
83 if (MACHINE_HAS_IDTE) { 83 if (MACHINE_HAS_IDTE)
84 if (mm->context.noexec)
85 __tlb_flush_idte((unsigned long)
86 get_shadow_table(mm->pgd) |
87 mm->context.asce_bits);
88 __tlb_flush_idte((unsigned long) mm->pgd | 84 __tlb_flush_idte((unsigned long) mm->pgd |
89 mm->context.asce_bits); 85 mm->context.asce_bits);
90 return; 86 else
91 } 87 __tlb_flush_full(mm);
92 __tlb_flush_full(mm);
93} 88}
94 89
95static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 90static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index e82152572377..9208e69245a0 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -385,6 +385,7 @@
385 385
386/* Ignore system calls that are also reachable via sys_socket */ 386/* Ignore system calls that are also reachable via sys_socket */
387#define __IGNORE_recvmmsg 387#define __IGNORE_recvmmsg
388#define __IGNORE_sendmmsg
388 389
389#define __ARCH_WANT_IPC_PARSE_VERSION 390#define __ARCH_WANT_IPC_PARSE_VERSION
390#define __ARCH_WANT_OLD_READDIR 391#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fe03c140002a..edfbd17d7082 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -124,13 +124,11 @@ int main(void)
124 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); 124 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
125 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); 125 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
126 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); 126 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
127 DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
127 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); 128 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
128 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 129 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
129 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 130 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
130 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 131 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
131 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
132 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
133 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
134 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 132 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
135 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 133 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
136 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 134 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1b67fc6ebdc2..0476174dfff5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -212,6 +212,7 @@ __switch_to:
212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
214 st %r3,__LC_CURRENT # store task struct of next 214 st %r3,__LC_CURRENT # store task struct of next
215 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
215 st %r5,__LC_THREAD_INFO # store thread info of next 216 st %r5,__LC_THREAD_INFO # store thread info of next
216 ahi %r5,STACK_SIZE # end of kernel stack of next 217 ahi %r5,STACK_SIZE # end of kernel stack of next
217 st %r5,__LC_KERNEL_STACK # store end of kernel stack 218 st %r5,__LC_KERNEL_STACK # store end of kernel stack
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9fd864563499..d61967e2eab0 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -220,6 +220,7 @@ __switch_to:
220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
222 stg %r3,__LC_CURRENT # store task struct of next 222 stg %r3,__LC_CURRENT # store task struct of next
223 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
223 stg %r5,__LC_THREAD_INFO # store thread info of next 224 stg %r5,__LC_THREAD_INFO # store thread info of next
224 aghi %r5,STACK_SIZE # end of kernel stack of next 225 aghi %r5,STACK_SIZE # end of kernel stack of next
225 stg %r5,__LC_KERNEL_STACK # store end of kernel stack 226 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index ea5099c9709c..e204f9597aaf 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -32,6 +32,7 @@ static const struct irq_class intrclass_names[] = {
32 {.name = "VRT", .desc = "[EXT] Virtio" }, 32 {.name = "VRT", .desc = "[EXT] Virtio" },
33 {.name = "SCP", .desc = "[EXT] Service Call" }, 33 {.name = "SCP", .desc = "[EXT] Service Call" },
34 {.name = "IUC", .desc = "[EXT] IUCV" }, 34 {.name = "IUC", .desc = "[EXT] IUCV" },
35 {.name = "CPM", .desc = "[EXT] CPU Measurement" },
35 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, 36 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
36 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, 37 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
37 {.name = "DAS", .desc = "[I/O] DASD" }, 38 {.name = "DAS", .desc = "[I/O] DASD" },
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a895e69379f7..541a7509faeb 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -9,41 +9,26 @@
9 9
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/cpu.h> 11#include <linux/cpu.h>
12#include <linux/errno.h>
13#include <linux/sched.h> 12#include <linux/sched.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/mm.h> 14#include <linux/mm.h>
16#include <linux/fs.h>
17#include <linux/smp.h> 15#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/slab.h> 16#include <linux/slab.h>
20#include <linux/unistd.h>
21#include <linux/ptrace.h>
22#include <linux/vmalloc.h>
23#include <linux/user.h>
24#include <linux/interrupt.h> 17#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/reboot.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/notifier.h>
30#include <linux/tick.h> 18#include <linux/tick.h>
31#include <linux/elfcore.h>
32#include <linux/kernel_stat.h>
33#include <linux/personality.h> 19#include <linux/personality.h>
34#include <linux/syscalls.h> 20#include <linux/syscalls.h>
35#include <linux/compat.h> 21#include <linux/compat.h>
36#include <linux/kprobes.h> 22#include <linux/kprobes.h>
37#include <linux/random.h> 23#include <linux/random.h>
38#include <asm/compat.h> 24#include <linux/module.h>
39#include <asm/uaccess.h>
40#include <asm/pgtable.h>
41#include <asm/system.h> 25#include <asm/system.h>
42#include <asm/io.h> 26#include <asm/io.h>
43#include <asm/processor.h> 27#include <asm/processor.h>
44#include <asm/irq.h> 28#include <asm/irq.h>
45#include <asm/timer.h> 29#include <asm/timer.h>
46#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
47#include <asm/smp.h> 32#include <asm/smp.h>
48#include "entry.h" 33#include "entry.h"
49 34
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f5434d1ecb31..0c35dee10b00 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode,
305 */ 305 */
306static int __init early_parse_switch_amode(char *p) 306static int __init early_parse_switch_amode(char *p)
307{ 307{
308 if (user_mode != SECONDARY_SPACE_MODE) 308 user_mode = PRIMARY_SPACE_MODE;
309 user_mode = PRIMARY_SPACE_MODE;
310 return 0; 309 return 0;
311} 310}
312early_param("switch_amode", early_parse_switch_amode); 311early_param("switch_amode", early_parse_switch_amode);
@@ -315,10 +314,6 @@ static int __init early_parse_user_mode(char *p)
315{ 314{
316 if (p && strcmp(p, "primary") == 0) 315 if (p && strcmp(p, "primary") == 0)
317 user_mode = PRIMARY_SPACE_MODE; 316 user_mode = PRIMARY_SPACE_MODE;
318#ifdef CONFIG_S390_EXEC_PROTECT
319 else if (p && strcmp(p, "secondary") == 0)
320 user_mode = SECONDARY_SPACE_MODE;
321#endif
322 else if (!p || strcmp(p, "home") == 0) 317 else if (!p || strcmp(p, "home") == 0)
323 user_mode = HOME_SPACE_MODE; 318 user_mode = HOME_SPACE_MODE;
324 else 319 else
@@ -327,31 +322,9 @@ static int __init early_parse_user_mode(char *p)
327} 322}
328early_param("user_mode", early_parse_user_mode); 323early_param("user_mode", early_parse_user_mode);
329 324
330#ifdef CONFIG_S390_EXEC_PROTECT
331/*
332 * Enable execute protection?
333 */
334static int __init early_parse_noexec(char *p)
335{
336 if (!strncmp(p, "off", 3))
337 return 0;
338 user_mode = SECONDARY_SPACE_MODE;
339 return 0;
340}
341early_param("noexec", early_parse_noexec);
342#endif /* CONFIG_S390_EXEC_PROTECT */
343
344static void setup_addressing_mode(void) 325static void setup_addressing_mode(void)
345{ 326{
346 if (user_mode == SECONDARY_SPACE_MODE) { 327 if (user_mode == PRIMARY_SPACE_MODE) {
347 if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
348 PSW32_ASC_SECONDARY))
349 pr_info("Execute protection active, "
350 "mvcos available\n");
351 else
352 pr_info("Execute protection active, "
353 "mvcos not available\n");
354 } else if (user_mode == PRIMARY_SPACE_MODE) {
355 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 328 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
356 pr_info("Address spaces switched, " 329 pr_info("Address spaces switched, "
357 "mvcos available\n"); 330 "mvcos available\n");
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 63c7d9ff220d..f8e85ecbc459 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -335,7 +335,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
335 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 335 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
336 if (!cpu_stopped(logical_cpu)) 336 if (!cpu_stopped(logical_cpu))
337 continue; 337 continue;
338 cpu_set(logical_cpu, cpu_present_map); 338 set_cpu_present(logical_cpu, true);
339 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 339 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
340 logical_cpu = cpumask_next(logical_cpu, &avail); 340 logical_cpu = cpumask_next(logical_cpu, &avail);
341 if (logical_cpu >= nr_cpu_ids) 341 if (logical_cpu >= nr_cpu_ids)
@@ -367,7 +367,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
367 continue; 367 continue;
368 __cpu_logical_map[logical_cpu] = cpu_id; 368 __cpu_logical_map[logical_cpu] = cpu_id;
369 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 369 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
370 cpu_set(logical_cpu, cpu_present_map); 370 set_cpu_present(logical_cpu, true);
371 if (cpu >= info->configured) 371 if (cpu >= info->configured)
372 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 372 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
373 else 373 else
@@ -385,7 +385,7 @@ static int __smp_rescan_cpus(void)
385{ 385{
386 cpumask_t avail; 386 cpumask_t avail;
387 387
388 cpus_xor(avail, cpu_possible_map, cpu_present_map); 388 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
389 if (smp_use_sigp_detection) 389 if (smp_use_sigp_detection)
390 return smp_rescan_cpus_sigp(avail); 390 return smp_rescan_cpus_sigp(avail);
391 else 391 else
@@ -467,7 +467,7 @@ int __cpuinit start_secondary(void *cpuvoid)
467 notify_cpu_starting(smp_processor_id()); 467 notify_cpu_starting(smp_processor_id());
468 /* Mark this cpu as online */ 468 /* Mark this cpu as online */
469 ipi_call_lock(); 469 ipi_call_lock();
470 cpu_set(smp_processor_id(), cpu_online_map); 470 set_cpu_online(smp_processor_id(), true);
471 ipi_call_unlock(); 471 ipi_call_unlock();
472 /* Switch on interrupts */ 472 /* Switch on interrupts */
473 local_irq_enable(); 473 local_irq_enable();
@@ -644,7 +644,7 @@ int __cpu_disable(void)
644 struct ec_creg_mask_parms cr_parms; 644 struct ec_creg_mask_parms cr_parms;
645 int cpu = smp_processor_id(); 645 int cpu = smp_processor_id();
646 646
647 cpu_clear(cpu, cpu_online_map); 647 set_cpu_online(cpu, false);
648 648
649 /* Disable pfault pseudo page faults on this cpu. */ 649 /* Disable pfault pseudo page faults on this cpu. */
650 pfault_fini(); 650 pfault_fini();
@@ -654,8 +654,8 @@ int __cpu_disable(void)
654 654
655 /* disable all external interrupts */ 655 /* disable all external interrupts */
656 cr_parms.orvals[0] = 0; 656 cr_parms.orvals[0] = 0;
657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
658 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 658 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4);
659 /* disable all I/O interrupts */ 659 /* disable all I/O interrupts */
660 cr_parms.orvals[6] = 0; 660 cr_parms.orvals[6] = 0;
661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
@@ -681,7 +681,7 @@ void __cpu_die(unsigned int cpu)
681 atomic_dec(&init_mm.context.attach_count); 681 atomic_dec(&init_mm.context.attach_count);
682} 682}
683 683
684void cpu_die(void) 684void __noreturn cpu_die(void)
685{ 685{
686 idle_task_exit(); 686 idle_task_exit();
687 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 687 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
@@ -738,8 +738,8 @@ void __init smp_prepare_boot_cpu(void)
738 BUG_ON(smp_processor_id() != 0); 738 BUG_ON(smp_processor_id() != 0);
739 739
740 current_thread_info()->cpu = 0; 740 current_thread_info()->cpu = 0;
741 cpu_set(0, cpu_present_map); 741 set_cpu_present(0, true);
742 cpu_set(0, cpu_online_map); 742 set_cpu_online(0, true);
743 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 743 S390_lowcore.percpu_offset = __per_cpu_offset[0];
744 current_set[0] = current; 744 current_set[0] = current;
745 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 745 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
@@ -1016,21 +1016,21 @@ int __ref smp_rescan_cpus(void)
1016 1016
1017 get_online_cpus(); 1017 get_online_cpus();
1018 mutex_lock(&smp_cpu_state_mutex); 1018 mutex_lock(&smp_cpu_state_mutex);
1019 newcpus = cpu_present_map; 1019 cpumask_copy(&newcpus, cpu_present_mask);
1020 rc = __smp_rescan_cpus(); 1020 rc = __smp_rescan_cpus();
1021 if (rc) 1021 if (rc)
1022 goto out; 1022 goto out;
1023 cpus_andnot(newcpus, cpu_present_map, newcpus); 1023 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1024 for_each_cpu_mask(cpu, newcpus) { 1024 for_each_cpu(cpu, &newcpus) {
1025 rc = smp_add_present_cpu(cpu); 1025 rc = smp_add_present_cpu(cpu);
1026 if (rc) 1026 if (rc)
1027 cpu_clear(cpu, cpu_present_map); 1027 set_cpu_present(cpu, false);
1028 } 1028 }
1029 rc = 0; 1029 rc = 0;
1030out: 1030out:
1031 mutex_unlock(&smp_cpu_state_mutex); 1031 mutex_unlock(&smp_cpu_state_mutex);
1032 put_online_cpus(); 1032 put_online_cpus();
1033 if (!cpus_empty(newcpus)) 1033 if (!cpumask_empty(&newcpus))
1034 topology_schedule_update(); 1034 topology_schedule_update();
1035 return rc; 1035 return rc;
1036} 1036}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 87be655557aa..a59557f1fb5f 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -810,7 +810,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port)
810 etr_sync.etr_port = port; 810 etr_sync.etr_port = port;
811 get_online_cpus(); 811 get_online_cpus();
812 atomic_set(&etr_sync.cpus, num_online_cpus() - 1); 812 atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
813 rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); 813 rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask);
814 put_online_cpus(); 814 put_online_cpus();
815 return rc; 815 return rc;
816} 816}
@@ -1579,7 +1579,7 @@ static void stp_work_fn(struct work_struct *work)
1579 memset(&stp_sync, 0, sizeof(stp_sync)); 1579 memset(&stp_sync, 0, sizeof(stp_sync));
1580 get_online_cpus(); 1580 get_online_cpus();
1581 atomic_set(&stp_sync.cpus, num_online_cpus() - 1); 1581 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1582 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); 1582 stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
1583 put_online_cpus(); 1583 put_online_cpus();
1584 1584
1585 if (!check_sync_clock()) 1585 if (!check_sync_clock())
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 94b06c31fc8a..2eafb8c7a746 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -52,20 +52,20 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
52{ 52{
53 cpumask_t mask; 53 cpumask_t mask;
54 54
55 cpus_clear(mask); 55 cpumask_clear(&mask);
56 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { 56 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
57 cpumask_copy(&mask, cpumask_of(cpu)); 57 cpumask_copy(&mask, cpumask_of(cpu));
58 return mask; 58 return mask;
59 } 59 }
60 while (info) { 60 while (info) {
61 if (cpu_isset(cpu, info->mask)) { 61 if (cpumask_test_cpu(cpu, &info->mask)) {
62 mask = info->mask; 62 mask = info->mask;
63 break; 63 break;
64 } 64 }
65 info = info->next; 65 info = info->next;
66 } 66 }
67 if (cpus_empty(mask)) 67 if (cpumask_empty(&mask))
68 mask = cpumask_of_cpu(cpu); 68 cpumask_copy(&mask, cpumask_of(cpu));
69 return mask; 69 return mask;
70} 70}
71 71
@@ -85,10 +85,10 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
85 if (cpu_logical_map(lcpu) != rcpu) 85 if (cpu_logical_map(lcpu) != rcpu)
86 continue; 86 continue;
87#ifdef CONFIG_SCHED_BOOK 87#ifdef CONFIG_SCHED_BOOK
88 cpu_set(lcpu, book->mask); 88 cpumask_set_cpu(lcpu, &book->mask);
89 cpu_book_id[lcpu] = book->id; 89 cpu_book_id[lcpu] = book->id;
90#endif 90#endif
91 cpu_set(lcpu, core->mask); 91 cpumask_set_cpu(lcpu, &core->mask);
92 cpu_core_id[lcpu] = core->id; 92 cpu_core_id[lcpu] = core->id;
93 smp_cpu_polarization[lcpu] = tl_cpu->pp; 93 smp_cpu_polarization[lcpu] = tl_cpu->pp;
94 } 94 }
@@ -101,13 +101,13 @@ static void clear_masks(void)
101 101
102 info = &core_info; 102 info = &core_info;
103 while (info) { 103 while (info) {
104 cpus_clear(info->mask); 104 cpumask_clear(&info->mask);
105 info = info->next; 105 info = info->next;
106 } 106 }
107#ifdef CONFIG_SCHED_BOOK 107#ifdef CONFIG_SCHED_BOOK
108 info = &book_info; 108 info = &book_info;
109 while (info) { 109 while (info) {
110 cpus_clear(info->mask); 110 cpumask_clear(&info->mask);
111 info = info->next; 111 info = info->next;
112 } 112 }
113#endif 113#endif
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index d13e8755a8cc..8ad2b34ad151 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -22,6 +22,9 @@ obj-y += vdso32_wrapper.o
22extra-y += vdso32.lds 22extra-y += vdso32.lds
23CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) 23CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
24 24
25# Disable gcov profiling for VDSO code
26GCOV_PROFILE := n
27
25# Force dependency (incbin is bad) 28# Force dependency (incbin is bad)
26$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 29$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
27 30
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 449352dda9cd..2a8ddfd12a5b 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -22,6 +22,9 @@ obj-y += vdso64_wrapper.o
22extra-y += vdso64.lds 22extra-y += vdso64.lds
23CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) 23CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
24 24
25# Disable gcov profiling for VDSO code
26GCOV_PROFILE := n
27
25# Force dependency (incbin is bad) 28# Force dependency (incbin is bad)
26$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 29$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
27 30
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 1bc18cdb525b..56fe6bc81fee 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -77,7 +77,7 @@ SECTIONS
77 . = ALIGN(PAGE_SIZE); 77 . = ALIGN(PAGE_SIZE);
78 INIT_DATA_SECTION(0x100) 78 INIT_DATA_SECTION(0x100)
79 79
80 PERCPU(0x100, PAGE_SIZE) 80 PERCPU_SECTION(0x100)
81 . = ALIGN(PAGE_SIZE); 81 . = ALIGN(PAGE_SIZE);
82 __init_end = .; /* freed after init ends here */ 82 __init_end = .; /* freed after init ends here */
83 83
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 3cc95dd0a3a6..075ddada4911 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -412,6 +412,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
412 struct dcss_segment *seg; 412 struct dcss_segment *seg;
413 int rc, diag_cc; 413 int rc, diag_cc;
414 414
415 start_addr = end_addr = 0;
415 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); 416 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
416 if (seg == NULL) { 417 if (seg == NULL) {
417 rc = -ENOMEM; 418 rc = -ENOMEM;
@@ -573,6 +574,7 @@ segment_modify_shared (char *name, int do_nonshared)
573 unsigned long start_addr, end_addr, dummy; 574 unsigned long start_addr, end_addr, dummy;
574 int rc, diag_cc; 575 int rc, diag_cc;
575 576
577 start_addr = end_addr = 0;
576 mutex_lock(&dcss_lock); 578 mutex_lock(&dcss_lock);
577 seg = segment_by_name (name); 579 seg = segment_by_name (name);
578 if (seg == NULL) { 580 if (seg == NULL) {
@@ -681,8 +683,6 @@ void
681segment_save(char *name) 683segment_save(char *name)
682{ 684{
683 struct dcss_segment *seg; 685 struct dcss_segment *seg;
684 int startpfn = 0;
685 int endpfn = 0;
686 char cmd1[160]; 686 char cmd1[160];
687 char cmd2[80]; 687 char cmd2[80];
688 int i, response; 688 int i, response;
@@ -698,8 +698,6 @@ segment_save(char *name)
698 goto out; 698 goto out;
699 } 699 }
700 700
701 startpfn = seg->start_addr >> PAGE_SHIFT;
702 endpfn = (seg->end) >> PAGE_SHIFT;
703 sprintf(cmd1, "DEFSEG %s", name); 701 sprintf(cmd1, "DEFSEG %s", name);
704 for (i=0; i<seg->segcnt; i++) { 702 for (i=0; i<seg->segcnt; i++) {
705 sprintf(cmd1+strlen(cmd1), " %lX-%lX %s", 703 sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ab988135e5c6..a0f9e730f26a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code,
225 force_sig_info(SIGBUS, &si, tsk); 225 force_sig_info(SIGBUS, &si, tsk);
226} 226}
227 227
228#ifdef CONFIG_S390_EXEC_PROTECT
229static noinline int signal_return(struct pt_regs *regs, long int_code,
230 unsigned long trans_exc_code)
231{
232 u16 instruction;
233 int rc;
234
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236
237 if (!rc && instruction == 0x0a77) {
238 clear_tsk_thread_flag(current, TIF_PER_TRAP);
239 if (is_compat_task())
240 sys32_sigreturn();
241 else
242 sys_sigreturn();
243 } else if (!rc && instruction == 0x0aad) {
244 clear_tsk_thread_flag(current, TIF_PER_TRAP);
245 if (is_compat_task())
246 sys32_rt_sigreturn();
247 else
248 sys_rt_sigreturn();
249 } else
250 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
251 return 0;
252}
253#endif /* CONFIG_S390_EXEC_PROTECT */
254
255static noinline void do_fault_error(struct pt_regs *regs, long int_code, 228static noinline void do_fault_error(struct pt_regs *regs, long int_code,
256 unsigned long trans_exc_code, int fault) 229 unsigned long trans_exc_code, int fault)
257{ 230{
@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
259 232
260 switch (fault) { 233 switch (fault) {
261 case VM_FAULT_BADACCESS: 234 case VM_FAULT_BADACCESS:
262#ifdef CONFIG_S390_EXEC_PROTECT
263 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
264 (trans_exc_code & 3) == 0) {
265 signal_return(regs, int_code, trans_exc_code);
266 break;
267 }
268#endif /* CONFIG_S390_EXEC_PROTECT */
269 case VM_FAULT_BADMAP: 235 case VM_FAULT_BADMAP:
270 /* Bad memory access. Check if it is kernel or user space. */ 236 /* Bad memory access. Check if it is kernel or user space. */
271 if (regs->psw.mask & PSW_MASK_PSTATE) { 237 if (regs->psw.mask & PSW_MASK_PSTATE) {
@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
414 int access, fault; 380 int access, fault;
415 381
416 access = VM_READ | VM_EXEC | VM_WRITE; 382 access = VM_READ | VM_EXEC | VM_WRITE;
417#ifdef CONFIG_S390_EXEC_PROTECT
418 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
419 (trans_exc_code & 3) == 0)
420 access = VM_EXEC;
421#endif
422 fault = do_exception(regs, access, trans_exc_code); 383 fault = do_exception(regs, access, trans_exc_code);
423 if (unlikely(fault)) 384 if (unlikely(fault))
424 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); 385 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
@@ -491,22 +452,28 @@ static int __init nopfault(char *str)
491 452
492__setup("nopfault", nopfault); 453__setup("nopfault", nopfault);
493 454
494typedef struct { 455struct pfault_refbk {
495 __u16 refdiagc; 456 u16 refdiagc;
496 __u16 reffcode; 457 u16 reffcode;
497 __u16 refdwlen; 458 u16 refdwlen;
498 __u16 refversn; 459 u16 refversn;
499 __u64 refgaddr; 460 u64 refgaddr;
500 __u64 refselmk; 461 u64 refselmk;
501 __u64 refcmpmk; 462 u64 refcmpmk;
502 __u64 reserved; 463 u64 reserved;
503} __attribute__ ((packed, aligned(8))) pfault_refbk_t; 464} __attribute__ ((packed, aligned(8)));
504 465
505int pfault_init(void) 466int pfault_init(void)
506{ 467{
507 pfault_refbk_t refbk = 468 struct pfault_refbk refbk = {
508 { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48, 469 .refdiagc = 0x258,
509 __PF_RES_FIELD }; 470 .reffcode = 0,
471 .refdwlen = 5,
472 .refversn = 2,
473 .refgaddr = __LC_CURRENT_PID,
474 .refselmk = 1ULL << 48,
475 .refcmpmk = 1ULL << 48,
476 .reserved = __PF_RES_FIELD };
510 int rc; 477 int rc;
511 478
512 if (!MACHINE_IS_VM || pfault_disable) 479 if (!MACHINE_IS_VM || pfault_disable)
@@ -524,8 +491,12 @@ int pfault_init(void)
524 491
525void pfault_fini(void) 492void pfault_fini(void)
526{ 493{
527 pfault_refbk_t refbk = 494 struct pfault_refbk refbk = {
528 { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; 495 .refdiagc = 0x258,
496 .reffcode = 1,
497 .refdwlen = 5,
498 .refversn = 2,
499 };
529 500
530 if (!MACHINE_IS_VM || pfault_disable) 501 if (!MACHINE_IS_VM || pfault_disable)
531 return; 502 return;
@@ -537,11 +508,15 @@ void pfault_fini(void)
537 : : "a" (&refbk), "m" (refbk) : "cc"); 508 : : "a" (&refbk), "m" (refbk) : "cc");
538} 509}
539 510
511static DEFINE_SPINLOCK(pfault_lock);
512static LIST_HEAD(pfault_list);
513
540static void pfault_interrupt(unsigned int ext_int_code, 514static void pfault_interrupt(unsigned int ext_int_code,
541 unsigned int param32, unsigned long param64) 515 unsigned int param32, unsigned long param64)
542{ 516{
543 struct task_struct *tsk; 517 struct task_struct *tsk;
544 __u16 subcode; 518 __u16 subcode;
519 pid_t pid;
545 520
546 /* 521 /*
547 * Get the external interruption subcode & pfault 522 * Get the external interruption subcode & pfault
@@ -553,44 +528,79 @@ static void pfault_interrupt(unsigned int ext_int_code,
553 if ((subcode & 0xff00) != __SUBCODE_MASK) 528 if ((subcode & 0xff00) != __SUBCODE_MASK)
554 return; 529 return;
555 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; 530 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
556 531 if (subcode & 0x0080) {
557 /* 532 /* Get the token (= pid of the affected task). */
558 * Get the token (= address of the task structure of the affected task). 533 pid = sizeof(void *) == 4 ? param32 : param64;
559 */ 534 rcu_read_lock();
560#ifdef CONFIG_64BIT 535 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
561 tsk = (struct task_struct *) param64; 536 if (tsk)
562#else 537 get_task_struct(tsk);
563 tsk = (struct task_struct *) param32; 538 rcu_read_unlock();
564#endif 539 if (!tsk)
565 540 return;
541 } else {
542 tsk = current;
543 }
544 spin_lock(&pfault_lock);
566 if (subcode & 0x0080) { 545 if (subcode & 0x0080) {
567 /* signal bit is set -> a page has been swapped in by VM */ 546 /* signal bit is set -> a page has been swapped in by VM */
568 if (xchg(&tsk->thread.pfault_wait, -1) != 0) { 547 if (tsk->thread.pfault_wait == 1) {
569 /* Initial interrupt was faster than the completion 548 /* Initial interrupt was faster than the completion
570 * interrupt. pfault_wait is valid. Set pfault_wait 549 * interrupt. pfault_wait is valid. Set pfault_wait
571 * back to zero and wake up the process. This can 550 * back to zero and wake up the process. This can
572 * safely be done because the task is still sleeping 551 * safely be done because the task is still sleeping
573 * and can't produce new pfaults. */ 552 * and can't produce new pfaults. */
574 tsk->thread.pfault_wait = 0; 553 tsk->thread.pfault_wait = 0;
554 list_del(&tsk->thread.list);
575 wake_up_process(tsk); 555 wake_up_process(tsk);
576 put_task_struct(tsk); 556 } else {
557 /* Completion interrupt was faster than initial
558 * interrupt. Set pfault_wait to -1 so the initial
559 * interrupt doesn't put the task to sleep. */
560 tsk->thread.pfault_wait = -1;
577 } 561 }
562 put_task_struct(tsk);
578 } else { 563 } else {
579 /* signal bit not set -> a real page is missing. */ 564 /* signal bit not set -> a real page is missing. */
580 get_task_struct(tsk); 565 if (tsk->thread.pfault_wait == -1) {
581 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
582 if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
583 /* Completion interrupt was faster than the initial 566 /* Completion interrupt was faster than the initial
584 * interrupt (swapped in a -1 for pfault_wait). Set 567 * interrupt (pfault_wait == -1). Set pfault_wait
585 * pfault_wait back to zero and exit. This can be 568 * back to zero and exit. */
586 * done safely because tsk is running in kernel
587 * mode and can't produce new pfaults. */
588 tsk->thread.pfault_wait = 0; 569 tsk->thread.pfault_wait = 0;
589 set_task_state(tsk, TASK_RUNNING); 570 } else {
590 put_task_struct(tsk); 571 /* Initial interrupt arrived before completion
591 } else 572 * interrupt. Let the task sleep. */
573 tsk->thread.pfault_wait = 1;
574 list_add(&tsk->thread.list, &pfault_list);
575 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
592 set_tsk_need_resched(tsk); 576 set_tsk_need_resched(tsk);
577 }
578 }
579 spin_unlock(&pfault_lock);
580}
581
582static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
583 unsigned long action, void *hcpu)
584{
585 struct thread_struct *thread, *next;
586 struct task_struct *tsk;
587
588 switch (action) {
589 case CPU_DEAD:
590 case CPU_DEAD_FROZEN:
591 spin_lock_irq(&pfault_lock);
592 list_for_each_entry_safe(thread, next, &pfault_list, list) {
593 thread->pfault_wait = 0;
594 list_del(&thread->list);
595 tsk = container_of(thread, struct task_struct, thread);
596 wake_up_process(tsk);
597 }
598 spin_unlock_irq(&pfault_lock);
599 break;
600 default:
601 break;
593 } 602 }
603 return NOTIFY_OK;
594} 604}
595 605
596static int __init pfault_irq_init(void) 606static int __init pfault_irq_init(void)
@@ -599,22 +609,21 @@ static int __init pfault_irq_init(void)
599 609
600 if (!MACHINE_IS_VM) 610 if (!MACHINE_IS_VM)
601 return 0; 611 return 0;
602 /*
603 * Try to get pfault pseudo page faults going.
604 */
605 rc = register_external_interrupt(0x2603, pfault_interrupt); 612 rc = register_external_interrupt(0x2603, pfault_interrupt);
606 if (rc) { 613 if (rc)
607 pfault_disable = 1; 614 goto out_extint;
608 return rc; 615 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
609 } 616 if (rc)
610 if (pfault_init() == 0) 617 goto out_pfault;
611 return 0; 618 hotcpu_notifier(pfault_cpu_notify, 0);
619 return 0;
612 620
613 /* Tough luck, no pfault. */ 621out_pfault:
614 pfault_disable = 1;
615 unregister_external_interrupt(0x2603, pfault_interrupt); 622 unregister_external_interrupt(0x2603, pfault_interrupt);
616 return 0; 623out_extint:
624 pfault_disable = 1;
625 return rc;
617} 626}
618early_initcall(pfault_irq_init); 627early_initcall(pfault_irq_init);
619 628
620#endif 629#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 639cd21f2218..a4d856db9154 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval) 13 pte_t *pteptr, pte_t pteval)
14{ 14{
15 pmd_t *pmdp = (pmd_t *) pteptr; 15 pmd_t *pmdp = (pmd_t *) pteptr;
16 pte_t shadow_pteval = pteval;
17 unsigned long mask; 16 unsigned long mask;
18 17
19 if (!MACHINE_HAS_HPAGE) { 18 if (!MACHINE_HAS_HPAGE) {
@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
21 mask = pte_val(pteval) & 20 mask = pte_val(pteval) &
22 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
23 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; 22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
24 if (mm->context.noexec) {
25 pteptr += PTRS_PER_PTE;
26 pte_val(shadow_pteval) =
27 (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
28 }
29 } 23 }
30 24
31 pmd_val(*pmdp) = pte_val(pteval); 25 pmd_val(*pmdp) = pte_val(pteval);
32 if (mm->context.noexec) {
33 pmdp = get_shadow_table(pmdp);
34 pmd_val(*pmdp) = pte_val(shadow_pteval);
35 }
36} 26}
37 27
38int arch_prepare_hugepage(struct page *page) 28int arch_prepare_hugepage(struct page *page)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index bb409332a484..dfefc2171691 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -175,7 +175,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
175 pmd = pmd_offset(pud, address); 175 pmd = pmd_offset(pud, address);
176 pte = pte_offset_kernel(pmd, address); 176 pte = pte_offset_kernel(pmd, address);
177 if (!enable) { 177 if (!enable) {
178 ptep_invalidate(&init_mm, address, pte); 178 __ptep_ipte(address, pte);
179 pte_val(*pte) = _PAGE_TYPE_EMPTY;
179 continue; 180 continue;
180 } 181 }
181 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 182 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index f05edcc3beff..d013ed39743b 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -28,7 +28,7 @@ static void change_page_attr(unsigned long addr, int numpages,
28 28
29 pte = *ptep; 29 pte = *ptep;
30 pte = set(pte); 30 pte = set(pte);
31 ptep_invalidate(&init_mm, addr, ptep); 31 __ptep_ipte(addr, ptep);
32 *ptep = pte; 32 *ptep = pte;
33 addr += PAGE_SIZE; 33 addr += PAGE_SIZE;
34 } 34 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e1850c28cd68..8d4330642512 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 41
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 42static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
44 43
45static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) 44static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
46{ 45{
@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
67 while (batch->pgt_index > 0) 66 while (batch->pgt_index > 0)
68 __page_table_free(batch->mm, batch->table[--batch->pgt_index]); 67 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
69 while (batch->crst_index < RCU_FREELIST_SIZE) 68 while (batch->crst_index < RCU_FREELIST_SIZE)
70 __crst_table_free(batch->mm, batch->table[batch->crst_index++]); 69 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
71 free_page((unsigned long) batch); 70 free_page((unsigned long) batch);
72} 71}
73 72
@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
125} 124}
126early_param("vmalloc", parse_vmalloc); 125early_param("vmalloc", parse_vmalloc);
127 126
128unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 127unsigned long *crst_table_alloc(struct mm_struct *mm)
129{ 128{
130 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 129 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
131 130
132 if (!page) 131 if (!page)
133 return NULL; 132 return NULL;
134 page->index = 0;
135 if (noexec) {
136 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
137 if (!shadow) {
138 __free_pages(page, ALLOC_ORDER);
139 return NULL;
140 }
141 page->index = page_to_phys(shadow);
142 }
143 spin_lock_bh(&mm->context.list_lock);
144 list_add(&page->lru, &mm->context.crst_list);
145 spin_unlock_bh(&mm->context.list_lock);
146 return (unsigned long *) page_to_phys(page); 133 return (unsigned long *) page_to_phys(page);
147} 134}
148 135
149static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
150{
151 unsigned long *shadow = get_shadow_table(table);
152
153 if (shadow)
154 free_pages((unsigned long) shadow, ALLOC_ORDER);
155 free_pages((unsigned long) table, ALLOC_ORDER);
156}
157
158void crst_table_free(struct mm_struct *mm, unsigned long *table) 136void crst_table_free(struct mm_struct *mm, unsigned long *table)
159{ 137{
160 struct page *page = virt_to_page(table); 138 free_pages((unsigned long) table, ALLOC_ORDER);
161
162 spin_lock_bh(&mm->context.list_lock);
163 list_del(&page->lru);
164 spin_unlock_bh(&mm->context.list_lock);
165 __crst_table_free(mm, table);
166} 139}
167 140
168void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) 141void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
169{ 142{
170 struct rcu_table_freelist *batch; 143 struct rcu_table_freelist *batch;
171 struct page *page = virt_to_page(table);
172 144
173 spin_lock_bh(&mm->context.list_lock);
174 list_del(&page->lru);
175 spin_unlock_bh(&mm->context.list_lock);
176 if (atomic_read(&mm->mm_users) < 2 && 145 if (atomic_read(&mm->mm_users) < 2 &&
177 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 146 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
178 __crst_table_free(mm, table); 147 crst_table_free(mm, table);
179 return; 148 return;
180 } 149 }
181 batch = rcu_table_freelist_get(mm); 150 batch = rcu_table_freelist_get(mm);
182 if (!batch) { 151 if (!batch) {
183 smp_call_function(smp_sync, NULL, 1); 152 smp_call_function(smp_sync, NULL, 1);
184 __crst_table_free(mm, table); 153 crst_table_free(mm, table);
185 return; 154 return;
186 } 155 }
187 batch->table[--batch->crst_index] = table; 156 batch->table[--batch->crst_index] = table;
@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
197 166
198 BUG_ON(limit > (1UL << 53)); 167 BUG_ON(limit > (1UL << 53));
199repeat: 168repeat:
200 table = crst_table_alloc(mm, mm->context.noexec); 169 table = crst_table_alloc(mm);
201 if (!table) 170 if (!table)
202 return -ENOMEM; 171 return -ENOMEM;
203 spin_lock_bh(&mm->page_table_lock); 172 spin_lock_bh(&mm->page_table_lock);
@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
273 unsigned long *table; 242 unsigned long *table;
274 unsigned long bits; 243 unsigned long bits;
275 244
276 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 245 bits = (mm->context.has_pgste) ? 3UL : 1UL;
277 spin_lock_bh(&mm->context.list_lock); 246 spin_lock_bh(&mm->context.list_lock);
278 page = NULL; 247 page = NULL;
279 if (!list_empty(&mm->context.pgtable_list)) { 248 if (!list_empty(&mm->context.pgtable_list)) {
@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
329 struct page *page; 298 struct page *page;
330 unsigned long bits; 299 unsigned long bits;
331 300
332 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 301 bits = (mm->context.has_pgste) ? 3UL : 1UL;
333 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 302 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
334 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 303 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
335 spin_lock_bh(&mm->context.list_lock); 304 spin_lock_bh(&mm->context.list_lock);
@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
366 page_table_free(mm, table); 335 page_table_free(mm, table);
367 return; 336 return;
368 } 337 }
369 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 338 bits = (mm->context.has_pgste) ? 3UL : 1UL;
370 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 339 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
371 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 340 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
372 spin_lock_bh(&mm->context.list_lock); 341 spin_lock_bh(&mm->context.list_lock);
@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
379 rcu_table_freelist_finish(); 348 rcu_table_freelist_finish();
380} 349}
381 350
382void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
383{
384 struct page *page;
385
386 spin_lock_bh(&mm->context.list_lock);
387 /* Free shadow region and segment tables. */
388 list_for_each_entry(page, &mm->context.crst_list, lru)
389 if (page->index) {
390 free_pages((unsigned long) page->index, ALLOC_ORDER);
391 page->index = 0;
392 }
393 /* "Free" second halves of page tables. */
394 list_for_each_entry(page, &mm->context.pgtable_list, lru)
395 page->flags &= ~SECOND_HALVES;
396 spin_unlock_bh(&mm->context.list_lock);
397 mm->context.noexec = 0;
398 update_mm(mm, tsk);
399}
400
401/* 351/*
402 * switch on pgstes for its userspace process (for kvm) 352 * switch on pgstes for its userspace process (for kvm)
403 */ 353 */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 34c43f23b28c..8c1970d1dd91 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -95,7 +95,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
95 pu_dir = vmem_pud_alloc(); 95 pu_dir = vmem_pud_alloc();
96 if (!pu_dir) 96 if (!pu_dir)
97 goto out; 97 goto out;
98 pgd_populate_kernel(&init_mm, pg_dir, pu_dir); 98 pgd_populate(&init_mm, pg_dir, pu_dir);
99 } 99 }
100 100
101 pu_dir = pud_offset(pg_dir, address); 101 pu_dir = pud_offset(pg_dir, address);
@@ -103,7 +103,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
103 pm_dir = vmem_pmd_alloc(); 103 pm_dir = vmem_pmd_alloc();
104 if (!pm_dir) 104 if (!pm_dir)
105 goto out; 105 goto out;
106 pud_populate_kernel(&init_mm, pu_dir, pm_dir); 106 pud_populate(&init_mm, pu_dir, pm_dir);
107 } 107 }
108 108
109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
@@ -123,7 +123,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
123 pt_dir = vmem_pte_alloc(); 123 pt_dir = vmem_pte_alloc();
124 if (!pt_dir) 124 if (!pt_dir)
125 goto out; 125 goto out;
126 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 126 pmd_populate(&init_mm, pm_dir, pt_dir);
127 } 127 }
128 128
129 pt_dir = pte_offset_kernel(pm_dir, address); 129 pt_dir = pte_offset_kernel(pm_dir, address);
@@ -159,7 +159,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
159 continue; 159 continue;
160 160
161 if (pmd_huge(*pm_dir)) { 161 if (pmd_huge(*pm_dir)) {
162 pmd_clear_kernel(pm_dir); 162 pmd_clear(pm_dir);
163 address += HPAGE_SIZE - PAGE_SIZE; 163 address += HPAGE_SIZE - PAGE_SIZE;
164 continue; 164 continue;
165 } 165 }
@@ -192,7 +192,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
192 pu_dir = vmem_pud_alloc(); 192 pu_dir = vmem_pud_alloc();
193 if (!pu_dir) 193 if (!pu_dir)
194 goto out; 194 goto out;
195 pgd_populate_kernel(&init_mm, pg_dir, pu_dir); 195 pgd_populate(&init_mm, pg_dir, pu_dir);
196 } 196 }
197 197
198 pu_dir = pud_offset(pg_dir, address); 198 pu_dir = pud_offset(pg_dir, address);
@@ -200,7 +200,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
200 pm_dir = vmem_pmd_alloc(); 200 pm_dir = vmem_pmd_alloc();
201 if (!pm_dir) 201 if (!pm_dir)
202 goto out; 202 goto out;
203 pud_populate_kernel(&init_mm, pu_dir, pm_dir); 203 pud_populate(&init_mm, pu_dir, pm_dir);
204 } 204 }
205 205
206 pm_dir = pmd_offset(pu_dir, address); 206 pm_dir = pmd_offset(pu_dir, address);
@@ -208,7 +208,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
208 pt_dir = vmem_pte_alloc(); 208 pt_dir = vmem_pte_alloc();
209 if (!pt_dir) 209 if (!pt_dir)
210 goto out; 210 goto out;
211 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 211 pmd_populate(&init_mm, pm_dir, pt_dir);
212 } 212 }
213 213
214 pt_dir = pte_offset_kernel(pm_dir, address); 214 pt_dir = pte_offset_kernel(pm_dir, address);
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 33cbd373cce4..053caa0fd276 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -5,6 +5,7 @@
5 * Author: Heinz Graalfs <graalfs@de.ibm.com> 5 * Author: Heinz Graalfs <graalfs@de.ibm.com>
6 */ 6 */
7 7
8#include <linux/kernel_stat.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/smp.h> 11#include <linux/smp.h>
@@ -674,17 +675,11 @@ int hwsampler_activate(unsigned int cpu)
674static void hws_ext_handler(unsigned int ext_int_code, 675static void hws_ext_handler(unsigned int ext_int_code,
675 unsigned int param32, unsigned long param64) 676 unsigned int param32, unsigned long param64)
676{ 677{
677 int cpu;
678 struct hws_cpu_buffer *cb; 678 struct hws_cpu_buffer *cb;
679 679
680 cpu = smp_processor_id(); 680 kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
681 cb = &per_cpu(sampler_cpu_buffer, cpu); 681 cb = &__get_cpu_var(sampler_cpu_buffer);
682 682 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
683 atomic_xchg(
684 &cb->ext_params,
685 atomic_read(&cb->ext_params)
686 | S390_lowcore.ext_params);
687
688 if (hws_wq) 683 if (hws_wq)
689 queue_work(hws_wq, &cb->worker); 684 queue_work(hws_wq, &cb->worker);
690} 685}
@@ -764,7 +759,7 @@ static int worker_check_error(unsigned int cpu, int ext_params)
764 if (!sdbt || !*sdbt) 759 if (!sdbt || !*sdbt)
765 return -EINVAL; 760 return -EINVAL;
766 761
767 if (ext_params & EI_IEA) 762 if (ext_params & EI_PRA)
768 cb->req_alert++; 763 cb->req_alert++;
769 764
770 if (ext_params & EI_LSDA) 765 if (ext_params & EI_LSDA)
@@ -1009,7 +1004,7 @@ int hwsampler_deallocate()
1009 if (hws_state != HWS_STOPPED) 1004 if (hws_state != HWS_STOPPED)
1010 goto deallocate_exit; 1005 goto deallocate_exit;
1011 1006
1012 smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ 1007 ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
1013 deallocate_sdbt(); 1008 deallocate_sdbt();
1014 1009
1015 hws_state = HWS_DEALLOCATED; 1010 hws_state = HWS_DEALLOCATED;
@@ -1123,7 +1118,7 @@ int hwsampler_shutdown()
1123 mutex_lock(&hws_sem); 1118 mutex_lock(&hws_sem);
1124 1119
1125 if (hws_state == HWS_STOPPED) { 1120 if (hws_state == HWS_STOPPED) {
1126 smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ 1121 ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
1127 deallocate_sdbt(); 1122 deallocate_sdbt();
1128 } 1123 }
1129 if (hws_wq) { 1124 if (hws_wq) {
@@ -1198,7 +1193,7 @@ start_all_exit:
1198 hws_oom = 1; 1193 hws_oom = 1;
1199 hws_flush_all = 0; 1194 hws_flush_all = 0;
1200 /* now let them in, 1407 CPUMF external interrupts */ 1195 /* now let them in, 1407 CPUMF external interrupts */
1201 smp_ctl_set_bit(0, 5); /* set CR0 bit 58 */ 1196 ctl_set_bit(0, 5); /* set CR0 bit 58 */
1202 1197
1203 return 0; 1198 return 0;
1204} 1199}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index af4d46187a79..731c10ce67b5 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -66,7 +66,7 @@ SECTIONS
66 __machvec_end = .; 66 __machvec_end = .;
67 } 67 }
68 68
69 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 69 PERCPU_SECTION(L1_CACHE_BYTES)
70 70
71 /* 71 /*
72 * .exit.text is discarded at runtime, not link time, to deal with 72 * .exit.text is discarded at runtime, not link time, to deal with
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 92b557afe535..c0220759003e 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
108 __sun4v_2insn_patch_end = .; 108 __sun4v_2insn_patch_end = .;
109 } 109 }
110 110
111 PERCPU(SMP_CACHE_BYTES, PAGE_SIZE) 111 PERCPU_SECTION(SMP_CACHE_BYTES)
112 112
113 . = ALIGN(PAGE_SIZE); 113 . = ALIGN(PAGE_SIZE);
114 __init_end = .; 114 __init_end = .;
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 38f64fafdc10..631f10de12fe 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
60 . = ALIGN(PAGE_SIZE); 60 . = ALIGN(PAGE_SIZE);
61 VMLINUX_SYMBOL(_sinitdata) = .; 61 VMLINUX_SYMBOL(_sinitdata) = .;
62 INIT_DATA_SECTION(16) :data =0 62 INIT_DATA_SECTION(16) :data =0
63 PERCPU(L2_CACHE_BYTES, PAGE_SIZE) 63 PERCPU_SECTION(L2_CACHE_BYTES)
64 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
65 VMLINUX_SYMBOL(_einitdata) = .; 65 VMLINUX_SYMBOL(_einitdata) = .;
66 66
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index a9da516a5274..795ea8e869f4 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -29,10 +29,10 @@ config X86_64
29 def_bool 64BIT 29 def_bool 64BIT
30 30
31config RWSEM_XCHGADD_ALGORITHM 31config RWSEM_XCHGADD_ALGORITHM
32 def_bool X86_XADD 32 def_bool X86_XADD && 64BIT
33 33
34config RWSEM_GENERIC_SPINLOCK 34config RWSEM_GENERIC_SPINLOCK
35 def_bool !X86_XADD 35 def_bool !RWSEM_XCHGADD_ALGORITHM
36 36
37config 3_LEVEL_PGTABLES 37config 3_LEVEL_PGTABLES
38 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT 38 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 34bede8aad4a..4938de5512d2 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -42,7 +42,7 @@
42 INIT_SETUP(0) 42 INIT_SETUP(0)
43 } 43 }
44 44
45 PERCPU(32, 32) 45 PERCPU_SECTION(32)
46 46
47 .initcall.init : { 47 .initcall.init : {
48 INIT_CALLS 48 INIT_CALLS
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 12d55e773eb6..48142971b25d 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -8,11 +8,6 @@
8 8
9#ifdef CONFIG_X86_32 9#ifdef CONFIG_X86_32
10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
11/*
12 * For 32-bit UML - mark functions implemented in assembly that use
13 * regparm input parameters:
14 */
15#define asmregparm __attribute__((regparm(3)))
16 11
17/* 12/*
18 * Make sure the compiler doesn't do anything stupid with the 13 * Make sure the compiler doesn't do anything stupid with the
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 53278b0dfdf6..a0a9779084d1 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -509,6 +509,11 @@ do { \
509 * it in software. The address used in the cmpxchg16 instruction must be 509 * it in software. The address used in the cmpxchg16 instruction must be
510 * aligned to a 16 byte boundary. 510 * aligned to a 16 byte boundary.
511 */ 511 */
512#ifdef CONFIG_SMP
513#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
514#else
515#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
516#endif
512#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ 517#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
513({ \ 518({ \
514 char __ret; \ 519 char __ret; \
@@ -517,7 +522,7 @@ do { \
517 typeof(o2) __o2 = o2; \ 522 typeof(o2) __o2 = o2; \
518 typeof(o2) __n2 = n2; \ 523 typeof(o2) __n2 = n2; \
519 typeof(o2) __dummy; \ 524 typeof(o2) __dummy; \
520 alternative_io("call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP4, \ 525 alternative_io(CMPXCHG16B_EMU_CALL, \
521 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ 526 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
522 X86_FEATURE_CX16, \ 527 X86_FEATURE_CX16, \
523 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ 528 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9488dcff7aec..e5293394b548 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -676,7 +676,7 @@ void mask_ioapic_entries(void)
676 int apic, pin; 676 int apic, pin;
677 677
678 for (apic = 0; apic < nr_ioapics; apic++) { 678 for (apic = 0; apic < nr_ioapics; apic++) {
679 if (ioapics[apic].saved_registers) 679 if (!ioapics[apic].saved_registers)
680 continue; 680 continue;
681 681
682 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 682 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) {
@@ -699,7 +699,7 @@ int restore_ioapic_entries(void)
699 int apic, pin; 699 int apic, pin;
700 700
701 for (apic = 0; apic < nr_ioapics; apic++) { 701 for (apic = 0; apic < nr_ioapics; apic++) {
702 if (ioapics[apic].saved_registers) 702 if (!ioapics[apic].saved_registers)
703 continue; 703 continue;
704 704
705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index f65e5b521dbd..807c2a2b80f1 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1363 * We must return the syscall number to actually look up in the table. 1363 * We must return the syscall number to actually look up in the table.
1364 * This can be -1L to skip running any syscall at all. 1364 * This can be -1L to skip running any syscall at all.
1365 */ 1365 */
1366asmregparm long syscall_trace_enter(struct pt_regs *regs) 1366long syscall_trace_enter(struct pt_regs *regs)
1367{ 1367{
1368 long ret = 0; 1368 long ret = 0;
1369 1369
@@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1408 return ret ?: regs->orig_ax; 1408 return ret ?: regs->orig_ax;
1409} 1409}
1410 1410
1411asmregparm void syscall_trace_leave(struct pt_regs *regs) 1411void syscall_trace_leave(struct pt_regs *regs)
1412{ 1412{
1413 bool step; 1413 bool step;
1414 1414
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 49927a863cc1..61682f0ac264 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -326,7 +326,7 @@ SECTIONS
326 } 326 }
327 327
328#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 328#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
329 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) 329 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
330#endif 330#endif
331 331
332 . = ALIGN(PAGE_SIZE); 332 . = ALIGN(PAGE_SIZE);
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index a2820065927e..88ecea3facb4 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -155,7 +155,7 @@ SECTIONS
155 INIT_RAM_FS 155 INIT_RAM_FS
156 } 156 }
157 157
158 PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE) 158 PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
159 159
160 /* We need this dummy segment here */ 160 /* We need this dummy segment here */
161 161
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 29af660d968b..021abe6d8527 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -309,7 +309,7 @@ static void pcmcia_remove_one(struct pcmcia_device *pdev)
309 pcmcia_disable_device(pdev); 309 pcmcia_disable_device(pdev);
310} 310}
311 311
312static struct pcmcia_device_id pcmcia_devices[] = { 312static const struct pcmcia_device_id pcmcia_devices[] = {
313 PCMCIA_DEVICE_FUNC_ID(4), 313 PCMCIA_DEVICE_FUNC_ID(4),
314 PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ 314 PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
315 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ 315 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 4104b7feae67..aed1904ea67b 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -930,7 +930,7 @@ static void bluecard_release(struct pcmcia_device *link)
930 pcmcia_disable_device(link); 930 pcmcia_disable_device(link);
931} 931}
932 932
933static struct pcmcia_device_id bluecard_ids[] = { 933static const struct pcmcia_device_id bluecard_ids[] = {
934 PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e), 934 PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e),
935 PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c), 935 PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c),
936 PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab), 936 PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab),
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 0c8a65587491..4fc01949d399 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -761,7 +761,7 @@ static void bt3c_release(struct pcmcia_device *link)
761} 761}
762 762
763 763
764static struct pcmcia_device_id bt3c_ids[] = { 764static const struct pcmcia_device_id bt3c_ids[] = {
765 PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02), 765 PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02),
766 PCMCIA_DEVICE_NULL 766 PCMCIA_DEVICE_NULL
767}; 767};
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index f8a0708e2311..526b61807d94 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -689,7 +689,7 @@ static void btuart_release(struct pcmcia_device *link)
689 pcmcia_disable_device(link); 689 pcmcia_disable_device(link);
690} 690}
691 691
692static struct pcmcia_device_id btuart_ids[] = { 692static const struct pcmcia_device_id btuart_ids[] = {
693 /* don't use this driver. Use serial_cs + hci_uart instead */ 693 /* don't use this driver. Use serial_cs + hci_uart instead */
694 PCMCIA_DEVICE_NULL 694 PCMCIA_DEVICE_NULL
695}; 695};
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 26ee0cf88d20..5e4c2de9fc3f 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -636,7 +636,7 @@ static void dtl1_release(struct pcmcia_device *link)
636} 636}
637 637
638 638
639static struct pcmcia_device_id dtl1_ids[] = { 639static const struct pcmcia_device_id dtl1_ids[] = {
640 PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d), 640 PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d),
641 PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82), 641 PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82),
642 PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863), 642 PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863),
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index b0a0dccc98c1..b427711be4be 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -903,6 +903,9 @@ static struct pci_device_id agp_intel_pci_table[] = {
903 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), 903 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
904 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), 904 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), 905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
906 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
906 { } 909 { }
907}; 910};
908 911
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 5feebe2800e9..999803ce10dc 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -225,6 +225,14 @@
225#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 225#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
226#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ 226#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
227#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A 227#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
228#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
229#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
230#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
231#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
232#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
233#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
234#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
235#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
228 236
229int intel_gmch_probe(struct pci_dev *pdev, 237int intel_gmch_probe(struct pci_dev *pdev,
230 struct agp_bridge_data *bridge); 238 struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 0d09b537bb9a..85151019dde1 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1420,6 +1420,16 @@ static const struct intel_gtt_driver_description {
1420 "Sandybridge", &sandybridge_gtt_driver }, 1420 "Sandybridge", &sandybridge_gtt_driver },
1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1422 "Sandybridge", &sandybridge_gtt_driver }, 1422 "Sandybridge", &sandybridge_gtt_driver },
1423 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
1424 "Ivybridge", &sandybridge_gtt_driver },
1425 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
1426 "Ivybridge", &sandybridge_gtt_driver },
1427 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
1428 "Ivybridge", &sandybridge_gtt_driver },
1429 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
1430 "Ivybridge", &sandybridge_gtt_driver },
1431 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
1432 "Ivybridge", &sandybridge_gtt_driver },
1423 { 0, NULL, NULL } 1433 { 0, NULL, NULL }
1424}; 1434};
1425 1435
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index f845a8f718b3..a32c492baf5c 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -80,7 +80,7 @@ static void uninorth_tlbflush(struct agp_memory *mem)
80 ctrl | UNI_N_CFG_GART_INVAL); 80 ctrl | UNI_N_CFG_GART_INVAL);
81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); 81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
82 82
83 if (uninorth_rev <= 0x30) { 83 if (!mem && uninorth_rev <= 0x30) {
84 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 84 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
85 ctrl | UNI_N_CFG_GART_2xRESET); 85 ctrl | UNI_N_CFG_GART_2xRESET);
86 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 86 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 90bd01671c70..a7584860e9a7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1869,7 +1869,7 @@ static const struct file_operations cm4000_fops = {
1869 .llseek = no_llseek, 1869 .llseek = no_llseek,
1870}; 1870};
1871 1871
1872static struct pcmcia_device_id cm4000_ids[] = { 1872static const struct pcmcia_device_id cm4000_ids[] = {
1873 PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002), 1873 PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002),
1874 PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39), 1874 PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39),
1875 PCMCIA_DEVICE_NULL, 1875 PCMCIA_DEVICE_NULL,
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 5d8d59e865f4..8dd48a2be911 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -633,7 +633,7 @@ static const struct file_operations reader_fops = {
633 .llseek = no_llseek, 633 .llseek = no_llseek,
634}; 634};
635 635
636static struct pcmcia_device_id cm4040_ids[] = { 636static const struct pcmcia_device_id cm4040_ids[] = {
637 PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200), 637 PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200),
638 PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040", 638 PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040",
639 0xE32CDD8C, 0x8F23318B), 639 0xE32CDD8C, 0x8F23318B),
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index b575411c69b2..15781396af25 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2758,7 +2758,7 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
2758 } 2758 }
2759} 2759}
2760 2760
2761static struct pcmcia_device_id mgslpc_ids[] = { 2761static const struct pcmcia_device_id mgslpc_ids[] = {
2762 PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050), 2762 PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050),
2763 PCMCIA_DEVICE_NULL 2763 PCMCIA_DEVICE_NULL
2764}; 2764};
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index c64c3807f516..e0b25de1e339 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -74,6 +74,8 @@ config ZCRYPT
74 + PCI-X Cryptographic Coprocessor (PCIXCC) 74 + PCI-X Cryptographic Coprocessor (PCIXCC)
75 + Crypto Express2 Coprocessor (CEX2C) 75 + Crypto Express2 Coprocessor (CEX2C)
76 + Crypto Express2 Accelerator (CEX2A) 76 + Crypto Express2 Accelerator (CEX2A)
77 + Crypto Express3 Coprocessor (CEX3C)
78 + Crypto Express3 Accelerator (CEX3A)
77 79
78config ZCRYPT_MONOLITHIC 80config ZCRYPT_MONOLITHIC
79 bool "Monolithic zcrypt module" 81 bool "Monolithic zcrypt module"
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index adc9358c9bec..0a9357c66ff8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1413,6 +1413,64 @@ end:
1413EXPORT_SYMBOL(drm_detect_monitor_audio); 1413EXPORT_SYMBOL(drm_detect_monitor_audio);
1414 1414
1415/** 1415/**
1416 * drm_add_display_info - pull display info out if present
1417 * @edid: EDID data
1418 * @info: display info (attached to connector)
1419 *
1420 * Grab any available display info and stuff it into the drm_display_info
1421 * structure that's part of the connector. Useful for tracking bpp and
1422 * color spaces.
1423 */
1424static void drm_add_display_info(struct edid *edid,
1425 struct drm_display_info *info)
1426{
1427 info->width_mm = edid->width_cm * 10;
1428 info->height_mm = edid->height_cm * 10;
1429
1430 /* driver figures it out in this case */
1431 info->bpc = 0;
1432 info->color_formats = 0;
1433
1434 /* Only defined for 1.4 with digital displays */
1435 if (edid->revision < 4)
1436 return;
1437
1438 if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
1439 return;
1440
1441 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
1442 case DRM_EDID_DIGITAL_DEPTH_6:
1443 info->bpc = 6;
1444 break;
1445 case DRM_EDID_DIGITAL_DEPTH_8:
1446 info->bpc = 8;
1447 break;
1448 case DRM_EDID_DIGITAL_DEPTH_10:
1449 info->bpc = 10;
1450 break;
1451 case DRM_EDID_DIGITAL_DEPTH_12:
1452 info->bpc = 12;
1453 break;
1454 case DRM_EDID_DIGITAL_DEPTH_14:
1455 info->bpc = 14;
1456 break;
1457 case DRM_EDID_DIGITAL_DEPTH_16:
1458 info->bpc = 16;
1459 break;
1460 case DRM_EDID_DIGITAL_DEPTH_UNDEF:
1461 default:
1462 info->bpc = 0;
1463 break;
1464 }
1465
1466 info->color_formats = DRM_COLOR_FORMAT_RGB444;
1467 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
1468 info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
1469 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
1470 info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
1471}
1472
1473/**
1416 * drm_add_edid_modes - add modes from EDID data, if available 1474 * drm_add_edid_modes - add modes from EDID data, if available
1417 * @connector: connector we're probing 1475 * @connector: connector we're probing
1418 * @edid: edid data 1476 * @edid: edid data
@@ -1460,8 +1518,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1460 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 1518 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
1461 edid_fixup_preferred(connector, quirks); 1519 edid_fixup_preferred(connector, quirks);
1462 1520
1463 connector->display_info.width_mm = edid->width_cm * 10; 1521 drm_add_display_info(edid, &connector->display_info);
1464 connector->display_info.height_mm = edid->height_cm * 10;
1465 1522
1466 return num_modes; 1523 return num_modes;
1467} 1524}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 140b9525b48a..802b61ac3139 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -70,174 +70,50 @@ fail:
70} 70}
71EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); 71EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
72 72
73/**
74 * drm_fb_helper_connector_parse_command_line - parse command line for connector
75 * @connector - connector to parse line for
76 * @mode_option - per connector mode option
77 *
78 * This parses the connector specific then generic command lines for
79 * modes and options to configure the connector.
80 *
81 * This uses the same parameters as the fb modedb.c, except for extra
82 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
83 *
84 * enable/enable Digital/disable bit at the end
85 */
86static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
87 const char *mode_option)
88{
89 const char *name;
90 unsigned int namelen;
91 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
92 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
93 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
94 int i;
95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
96 struct drm_fb_helper_cmdline_mode *cmdline_mode;
97 struct drm_connector *connector;
98
99 if (!fb_helper_conn)
100 return false;
101 connector = fb_helper_conn->connector;
102
103 cmdline_mode = &fb_helper_conn->cmdline_mode;
104 if (!mode_option)
105 mode_option = fb_mode_option;
106
107 if (!mode_option) {
108 cmdline_mode->specified = false;
109 return false;
110 }
111
112 name = mode_option;
113 namelen = strlen(name);
114 for (i = namelen-1; i >= 0; i--) {
115 switch (name[i]) {
116 case '@':
117 namelen = i;
118 if (!refresh_specified && !bpp_specified &&
119 !yres_specified) {
120 refresh = simple_strtol(&name[i+1], NULL, 10);
121 refresh_specified = 1;
122 if (cvt || rb)
123 cvt = 0;
124 } else
125 goto done;
126 break;
127 case '-':
128 namelen = i;
129 if (!bpp_specified && !yres_specified) {
130 bpp = simple_strtol(&name[i+1], NULL, 10);
131 bpp_specified = 1;
132 if (cvt || rb)
133 cvt = 0;
134 } else
135 goto done;
136 break;
137 case 'x':
138 if (!yres_specified) {
139 yres = simple_strtol(&name[i+1], NULL, 10);
140 yres_specified = 1;
141 } else
142 goto done;
143 case '0' ... '9':
144 break;
145 case 'M':
146 if (!yres_specified)
147 cvt = 1;
148 break;
149 case 'R':
150 if (cvt)
151 rb = 1;
152 break;
153 case 'm':
154 if (!cvt)
155 margins = 1;
156 break;
157 case 'i':
158 if (!cvt)
159 interlace = 1;
160 break;
161 case 'e':
162 force = DRM_FORCE_ON;
163 break;
164 case 'D':
165 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
166 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
167 force = DRM_FORCE_ON;
168 else
169 force = DRM_FORCE_ON_DIGITAL;
170 break;
171 case 'd':
172 force = DRM_FORCE_OFF;
173 break;
174 default:
175 goto done;
176 }
177 }
178 if (i < 0 && yres_specified) {
179 xres = simple_strtol(name, NULL, 10);
180 res_specified = 1;
181 }
182done:
183
184 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
185 drm_get_connector_name(connector), xres, yres,
186 (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
187 "", (margins) ? " with margins" : "", (interlace) ?
188 " interlaced" : "");
189
190 if (force) {
191 const char *s;
192 switch (force) {
193 case DRM_FORCE_OFF: s = "OFF"; break;
194 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
195 default:
196 case DRM_FORCE_ON: s = "ON"; break;
197 }
198
199 DRM_INFO("forcing %s connector %s\n",
200 drm_get_connector_name(connector), s);
201 connector->force = force;
202 }
203
204 if (res_specified) {
205 cmdline_mode->specified = true;
206 cmdline_mode->xres = xres;
207 cmdline_mode->yres = yres;
208 }
209
210 if (refresh_specified) {
211 cmdline_mode->refresh_specified = true;
212 cmdline_mode->refresh = refresh;
213 }
214
215 if (bpp_specified) {
216 cmdline_mode->bpp_specified = true;
217 cmdline_mode->bpp = bpp;
218 }
219 cmdline_mode->rb = rb ? true : false;
220 cmdline_mode->cvt = cvt ? true : false;
221 cmdline_mode->interlace = interlace ? true : false;
222
223 return true;
224}
225
226static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) 73static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
227{ 74{
228 struct drm_fb_helper_connector *fb_helper_conn; 75 struct drm_fb_helper_connector *fb_helper_conn;
229 int i; 76 int i;
230 77
231 for (i = 0; i < fb_helper->connector_count; i++) { 78 for (i = 0; i < fb_helper->connector_count; i++) {
79 struct drm_cmdline_mode *mode;
80 struct drm_connector *connector;
232 char *option = NULL; 81 char *option = NULL;
233 82
234 fb_helper_conn = fb_helper->connector_info[i]; 83 fb_helper_conn = fb_helper->connector_info[i];
84 connector = fb_helper_conn->connector;
85 mode = &fb_helper_conn->cmdline_mode;
235 86
236 /* do something on return - turn off connector maybe */ 87 /* do something on return - turn off connector maybe */
237 if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option)) 88 if (fb_get_options(drm_get_connector_name(connector), &option))
238 continue; 89 continue;
239 90
240 drm_fb_helper_connector_parse_command_line(fb_helper_conn, option); 91 if (drm_mode_parse_command_line_for_connector(option,
92 connector,
93 mode)) {
94 if (mode->force) {
95 const char *s;
96 switch (mode->force) {
97 case DRM_FORCE_OFF: s = "OFF"; break;
98 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
99 default:
100 case DRM_FORCE_ON: s = "ON"; break;
101 }
102
103 DRM_INFO("forcing %s connector %s\n",
104 drm_get_connector_name(connector), s);
105 connector->force = mode->force;
106 }
107
108 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
109 drm_get_connector_name(connector),
110 mode->xres, mode->yres,
111 mode->refresh_specified ? mode->refresh : 60,
112 mode->rb ? " reduced blanking" : "",
113 mode->margins ? " with margins" : "",
114 mode->interlace ? " interlaced" : "");
115 }
116
241 } 117 }
242 return 0; 118 return 0;
243} 119}
@@ -901,7 +777,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
901 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 777 /* first up get a count of crtcs now in use and new min/maxes width/heights */
902 for (i = 0; i < fb_helper->connector_count; i++) { 778 for (i = 0; i < fb_helper->connector_count; i++) {
903 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 779 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
904 struct drm_fb_helper_cmdline_mode *cmdline_mode; 780 struct drm_cmdline_mode *cmdline_mode;
905 781
906 cmdline_mode = &fb_helper_conn->cmdline_mode; 782 cmdline_mode = &fb_helper_conn->cmdline_mode;
907 783
@@ -1123,7 +999,7 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_conn
1123 999
1124static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) 1000static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1125{ 1001{
1126 struct drm_fb_helper_cmdline_mode *cmdline_mode; 1002 struct drm_cmdline_mode *cmdline_mode;
1127 cmdline_mode = &fb_connector->cmdline_mode; 1003 cmdline_mode = &fb_connector->cmdline_mode;
1128 return cmdline_mode->specified; 1004 return cmdline_mode->specified;
1129} 1005}
@@ -1131,7 +1007,7 @@ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1131static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1007static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
1132 int width, int height) 1008 int width, int height)
1133{ 1009{
1134 struct drm_fb_helper_cmdline_mode *cmdline_mode; 1010 struct drm_cmdline_mode *cmdline_mode;
1135 struct drm_display_mode *mode = NULL; 1011 struct drm_display_mode *mode = NULL;
1136 1012
1137 cmdline_mode = &fb_helper_conn->cmdline_mode; 1013 cmdline_mode = &fb_helper_conn->cmdline_mode;
@@ -1163,19 +1039,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
1163 } 1039 }
1164 1040
1165create_mode: 1041create_mode:
1166 if (cmdline_mode->cvt) 1042 mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
1167 mode = drm_cvt_mode(fb_helper_conn->connector->dev, 1043 cmdline_mode);
1168 cmdline_mode->xres, cmdline_mode->yres,
1169 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1170 cmdline_mode->rb, cmdline_mode->interlace,
1171 cmdline_mode->margins);
1172 else
1173 mode = drm_gtf_mode(fb_helper_conn->connector->dev,
1174 cmdline_mode->xres, cmdline_mode->yres,
1175 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1176 cmdline_mode->interlace,
1177 cmdline_mode->margins);
1178 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1179 list_add(&mode->head, &fb_helper_conn->connector->modes); 1044 list_add(&mode->head, &fb_helper_conn->connector->modes);
1180 return mode; 1045 return mode;
1181} 1046}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a1f12cb043de..2022a5c966bb 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -684,10 +684,11 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
684 */ 684 */
685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); 685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
686 686
687 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", 687 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
688 crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, 688 crtc, (int)vbl_status, hpos, vpos,
689 raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, 689 (long)raw_time.tv_sec, (long)raw_time.tv_usec,
690 (int) duration_ns/1000, i); 690 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
691 (int)duration_ns/1000, i);
691 692
692 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; 693 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
693 if (invbl) 694 if (invbl)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 25bf87390f53..c2d32f20e2fb 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -974,3 +974,159 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
974 } 974 }
975} 975}
976EXPORT_SYMBOL(drm_mode_connector_list_update); 976EXPORT_SYMBOL(drm_mode_connector_list_update);
977
978/**
979 * drm_mode_parse_command_line_for_connector - parse command line for connector
980 * @mode_option - per connector mode option
981 * @connector - connector to parse line for
982 *
983 * This parses the connector specific then generic command lines for
984 * modes and options to configure the connector.
985 *
986 * This uses the same parameters as the fb modedb.c, except for extra
987 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
988 *
989 * enable/enable Digital/disable bit at the end
990 */
991bool drm_mode_parse_command_line_for_connector(const char *mode_option,
992 struct drm_connector *connector,
993 struct drm_cmdline_mode *mode)
994{
995 const char *name;
996 unsigned int namelen;
997 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
998 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
999 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
1000 int i;
1001 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
1002
1003#ifdef CONFIG_FB
1004 if (!mode_option)
1005 mode_option = fb_mode_option;
1006#endif
1007
1008 if (!mode_option) {
1009 mode->specified = false;
1010 return false;
1011 }
1012
1013 name = mode_option;
1014 namelen = strlen(name);
1015 for (i = namelen-1; i >= 0; i--) {
1016 switch (name[i]) {
1017 case '@':
1018 namelen = i;
1019 if (!refresh_specified && !bpp_specified &&
1020 !yres_specified) {
1021 refresh = simple_strtol(&name[i+1], NULL, 10);
1022 refresh_specified = 1;
1023 if (cvt || rb)
1024 cvt = 0;
1025 } else
1026 goto done;
1027 break;
1028 case '-':
1029 namelen = i;
1030 if (!bpp_specified && !yres_specified) {
1031 bpp = simple_strtol(&name[i+1], NULL, 10);
1032 bpp_specified = 1;
1033 if (cvt || rb)
1034 cvt = 0;
1035 } else
1036 goto done;
1037 break;
1038 case 'x':
1039 if (!yres_specified) {
1040 yres = simple_strtol(&name[i+1], NULL, 10);
1041 yres_specified = 1;
1042 } else
1043 goto done;
1044 case '0' ... '9':
1045 break;
1046 case 'M':
1047 if (!yres_specified)
1048 cvt = 1;
1049 break;
1050 case 'R':
1051 if (cvt)
1052 rb = 1;
1053 break;
1054 case 'm':
1055 if (!cvt)
1056 margins = 1;
1057 break;
1058 case 'i':
1059 if (!cvt)
1060 interlace = 1;
1061 break;
1062 case 'e':
1063 force = DRM_FORCE_ON;
1064 break;
1065 case 'D':
1066 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
1067 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
1068 force = DRM_FORCE_ON;
1069 else
1070 force = DRM_FORCE_ON_DIGITAL;
1071 break;
1072 case 'd':
1073 force = DRM_FORCE_OFF;
1074 break;
1075 default:
1076 goto done;
1077 }
1078 }
1079 if (i < 0 && yres_specified) {
1080 xres = simple_strtol(name, NULL, 10);
1081 res_specified = 1;
1082 }
1083done:
1084 if (res_specified) {
1085 mode->specified = true;
1086 mode->xres = xres;
1087 mode->yres = yres;
1088 }
1089
1090 if (refresh_specified) {
1091 mode->refresh_specified = true;
1092 mode->refresh = refresh;
1093 }
1094
1095 if (bpp_specified) {
1096 mode->bpp_specified = true;
1097 mode->bpp = bpp;
1098 }
1099 mode->rb = rb ? true : false;
1100 mode->cvt = cvt ? true : false;
1101 mode->interlace = interlace ? true : false;
1102 mode->force = force;
1103
1104 return true;
1105}
1106EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
1107
1108struct drm_display_mode *
1109drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1110 struct drm_cmdline_mode *cmd)
1111{
1112 struct drm_display_mode *mode;
1113
1114 if (cmd->cvt)
1115 mode = drm_cvt_mode(dev,
1116 cmd->xres, cmd->yres,
1117 cmd->refresh_specified ? cmd->refresh : 60,
1118 cmd->rb, cmd->interlace,
1119 cmd->margins);
1120 else
1121 mode = drm_gtf_mode(dev,
1122 cmd->xres, cmd->yres,
1123 cmd->refresh_specified ? cmd->refresh : 60,
1124 cmd->interlace,
1125 cmd->margins);
1126 if (!mode)
1127 return NULL;
1128
1129 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1130 return mode;
1131}
1132EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 001273d57f2d..6d7b083c5b77 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -62,6 +62,26 @@ struct idr drm_minors_idr;
62struct class *drm_class; 62struct class *drm_class;
63struct proc_dir_entry *drm_proc_root; 63struct proc_dir_entry *drm_proc_root;
64struct dentry *drm_debugfs_root; 64struct dentry *drm_debugfs_root;
65
66int drm_err(const char *func, const char *format, ...)
67{
68 struct va_format vaf;
69 va_list args;
70 int r;
71
72 va_start(args, format);
73
74 vaf.fmt = format;
75 vaf.va = &args;
76
77 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
78
79 va_end(args);
80
81 return r;
82}
83EXPORT_SYMBOL(drm_err);
84
65void drm_ut_debug_printk(unsigned int request_level, 85void drm_ut_debug_printk(unsigned int request_level,
66 const char *prefix, 86 const char *prefix,
67 const char *function_name, 87 const char *function_name,
@@ -78,6 +98,7 @@ void drm_ut_debug_printk(unsigned int request_level,
78 } 98 }
79} 99}
80EXPORT_SYMBOL(drm_ut_debug_printk); 100EXPORT_SYMBOL(drm_ut_debug_printk);
101
81static int drm_minor_get_id(struct drm_device *dev, int type) 102static int drm_minor_get_id(struct drm_device *dev, int type)
82{ 103{
83 int new_id; 104 int new_id;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 87c8e29465e3..51c2257b11e6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -106,11 +106,12 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
106 } 106 }
107} 107}
108 108
109static const char *agp_type_str(int type) 109static const char *cache_level_str(int type)
110{ 110{
111 switch (type) { 111 switch (type) {
112 case 0: return " uncached"; 112 case I915_CACHE_NONE: return " uncached";
113 case 1: return " snooped"; 113 case I915_CACHE_LLC: return " snooped (LLC)";
114 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
114 default: return ""; 115 default: return "";
115 } 116 }
116} 117}
@@ -127,7 +128,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
127 obj->base.write_domain, 128 obj->base.write_domain,
128 obj->last_rendering_seqno, 129 obj->last_rendering_seqno,
129 obj->last_fenced_seqno, 130 obj->last_fenced_seqno,
130 agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY), 131 cache_level_str(obj->cache_level),
131 obj->dirty ? " dirty" : "", 132 obj->dirty ? " dirty" : "",
132 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 133 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
133 if (obj->base.name) 134 if (obj->base.name)
@@ -714,7 +715,7 @@ static void print_error_buffers(struct seq_file *m,
714 dirty_flag(err->dirty), 715 dirty_flag(err->dirty),
715 purgeable_flag(err->purgeable), 716 purgeable_flag(err->purgeable),
716 ring_str(err->ring), 717 ring_str(err->ring),
717 agp_type_str(err->agp_type)); 718 cache_level_str(err->cache_level));
718 719
719 if (err->name) 720 if (err->name)
720 seq_printf(m, " (name: %d)", err->name); 721 seq_printf(m, " (name: %d)", err->name);
@@ -852,6 +853,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
852 struct drm_info_node *node = (struct drm_info_node *) m->private; 853 struct drm_info_node *node = (struct drm_info_node *) m->private;
853 struct drm_device *dev = node->minor->dev; 854 struct drm_device *dev = node->minor->dev;
854 drm_i915_private_t *dev_priv = dev->dev_private; 855 drm_i915_private_t *dev_priv = dev->dev_private;
856 int ret;
855 857
856 if (IS_GEN5(dev)) { 858 if (IS_GEN5(dev)) {
857 u16 rgvswctl = I915_READ16(MEMSWCTL); 859 u16 rgvswctl = I915_READ16(MEMSWCTL);
@@ -873,7 +875,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
873 int max_freq; 875 int max_freq;
874 876
875 /* RPSTAT1 is in the GT power well */ 877 /* RPSTAT1 is in the GT power well */
876 __gen6_gt_force_wake_get(dev_priv); 878 ret = mutex_lock_interruptible(&dev->struct_mutex);
879 if (ret)
880 return ret;
881
882 gen6_gt_force_wake_get(dev_priv);
877 883
878 rpstat = I915_READ(GEN6_RPSTAT1); 884 rpstat = I915_READ(GEN6_RPSTAT1);
879 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 885 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
@@ -883,6 +889,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
883 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 889 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
884 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 890 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
885 891
892 gen6_gt_force_wake_put(dev_priv);
893 mutex_unlock(&dev->struct_mutex);
894
886 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 895 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
887 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 896 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
888 seq_printf(m, "Render p-state ratio: %d\n", 897 seq_printf(m, "Render p-state ratio: %d\n",
@@ -917,8 +926,6 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
917 max_freq = rp_state_cap & 0xff; 926 max_freq = rp_state_cap & 0xff;
918 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 927 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
919 max_freq * 50); 928 max_freq * 50);
920
921 __gen6_gt_force_wake_put(dev_priv);
922 } else { 929 } else {
923 seq_printf(m, "no P-state info available\n"); 930 seq_printf(m, "no P-state info available\n");
924 } 931 }
@@ -1058,6 +1065,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1058 case FBC_MULTIPLE_PIPES: 1065 case FBC_MULTIPLE_PIPES:
1059 seq_printf(m, "multiple pipes are enabled"); 1066 seq_printf(m, "multiple pipes are enabled");
1060 break; 1067 break;
1068 case FBC_MODULE_PARAM:
1069 seq_printf(m, "disabled per module param (default off)");
1070 break;
1061 default: 1071 default:
1062 seq_printf(m, "unknown reason"); 1072 seq_printf(m, "unknown reason");
1063 } 1073 }
@@ -1186,6 +1196,42 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1186 return 0; 1196 return 0;
1187} 1197}
1188 1198
1199static int i915_context_status(struct seq_file *m, void *unused)
1200{
1201 struct drm_info_node *node = (struct drm_info_node *) m->private;
1202 struct drm_device *dev = node->minor->dev;
1203 drm_i915_private_t *dev_priv = dev->dev_private;
1204 int ret;
1205
1206 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1207 if (ret)
1208 return ret;
1209
1210 seq_printf(m, "power context ");
1211 describe_obj(m, dev_priv->pwrctx);
1212 seq_printf(m, "\n");
1213
1214 seq_printf(m, "render context ");
1215 describe_obj(m, dev_priv->renderctx);
1216 seq_printf(m, "\n");
1217
1218 mutex_unlock(&dev->mode_config.mutex);
1219
1220 return 0;
1221}
1222
1223static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1224{
1225 struct drm_info_node *node = (struct drm_info_node *) m->private;
1226 struct drm_device *dev = node->minor->dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private;
1228
1229 seq_printf(m, "forcewake count = %d\n",
1230 atomic_read(&dev_priv->forcewake_count));
1231
1232 return 0;
1233}
1234
1189static int 1235static int
1190i915_wedged_open(struct inode *inode, 1236i915_wedged_open(struct inode *inode,
1191 struct file *filp) 1237 struct file *filp)
@@ -1288,6 +1334,67 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1288 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1334 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1289} 1335}
1290 1336
1337static int i915_forcewake_open(struct inode *inode, struct file *file)
1338{
1339 struct drm_device *dev = inode->i_private;
1340 struct drm_i915_private *dev_priv = dev->dev_private;
1341 int ret;
1342
1343 if (!IS_GEN6(dev))
1344 return 0;
1345
1346 ret = mutex_lock_interruptible(&dev->struct_mutex);
1347 if (ret)
1348 return ret;
1349 gen6_gt_force_wake_get(dev_priv);
1350 mutex_unlock(&dev->struct_mutex);
1351
1352 return 0;
1353}
1354
1355int i915_forcewake_release(struct inode *inode, struct file *file)
1356{
1357 struct drm_device *dev = inode->i_private;
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359
1360 if (!IS_GEN6(dev))
1361 return 0;
1362
1363 /*
1364 * It's bad that we can potentially hang userspace if struct_mutex gets
1365 * forever stuck. However, if we cannot acquire this lock it means that
1366 * almost certainly the driver has hung, is not unload-able. Therefore
1367 * hanging here is probably a minor inconvenience not to be seen my
1368 * almost every user.
1369 */
1370 mutex_lock(&dev->struct_mutex);
1371 gen6_gt_force_wake_put(dev_priv);
1372 mutex_unlock(&dev->struct_mutex);
1373
1374 return 0;
1375}
1376
1377static const struct file_operations i915_forcewake_fops = {
1378 .owner = THIS_MODULE,
1379 .open = i915_forcewake_open,
1380 .release = i915_forcewake_release,
1381};
1382
1383static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1384{
1385 struct drm_device *dev = minor->dev;
1386 struct dentry *ent;
1387
1388 ent = debugfs_create_file("i915_forcewake_user",
1389 S_IRUSR,
1390 root, dev,
1391 &i915_forcewake_fops);
1392 if (IS_ERR(ent))
1393 return PTR_ERR(ent);
1394
1395 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1396}
1397
1291static struct drm_info_list i915_debugfs_list[] = { 1398static struct drm_info_list i915_debugfs_list[] = {
1292 {"i915_capabilities", i915_capabilities, 0}, 1399 {"i915_capabilities", i915_capabilities, 0},
1293 {"i915_gem_objects", i915_gem_object_info, 0}, 1400 {"i915_gem_objects", i915_gem_object_info, 0},
@@ -1324,6 +1431,8 @@ static struct drm_info_list i915_debugfs_list[] = {
1324 {"i915_sr_status", i915_sr_status, 0}, 1431 {"i915_sr_status", i915_sr_status, 0},
1325 {"i915_opregion", i915_opregion, 0}, 1432 {"i915_opregion", i915_opregion, 0},
1326 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1433 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1434 {"i915_context_status", i915_context_status, 0},
1435 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1327}; 1436};
1328#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1437#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1329 1438
@@ -1335,6 +1444,10 @@ int i915_debugfs_init(struct drm_minor *minor)
1335 if (ret) 1444 if (ret)
1336 return ret; 1445 return ret;
1337 1446
1447 ret = i915_forcewake_create(minor->debugfs_root, minor);
1448 if (ret)
1449 return ret;
1450
1338 return drm_debugfs_create_files(i915_debugfs_list, 1451 return drm_debugfs_create_files(i915_debugfs_list,
1339 I915_DEBUGFS_ENTRIES, 1452 I915_DEBUGFS_ENTRIES,
1340 minor->debugfs_root, minor); 1453 minor->debugfs_root, minor);
@@ -1344,6 +1457,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1344{ 1457{
1345 drm_debugfs_remove_files(i915_debugfs_list, 1458 drm_debugfs_remove_files(i915_debugfs_list,
1346 I915_DEBUGFS_ENTRIES, minor); 1459 I915_DEBUGFS_ENTRIES, minor);
1460 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1461 1, minor);
1347 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1462 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1348 1, minor); 1463 1, minor);
1349} 1464}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 12876f2795d2..0239e9974bf2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -571,7 +571,7 @@ static int i915_quiescent(struct drm_device *dev)
571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
572 572
573 i915_kernel_lost_context(dev); 573 i915_kernel_lost_context(dev);
574 return intel_wait_ring_buffer(ring, ring->size - 8); 574 return intel_wait_ring_idle(ring);
575} 575}
576 576
577static int i915_flush_ioctl(struct drm_device *dev, void *data, 577static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -1176,11 +1176,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1176 return can_switch; 1176 return can_switch;
1177} 1177}
1178 1178
1179static int i915_load_modeset_init(struct drm_device *dev) 1179static int i915_load_gem_init(struct drm_device *dev)
1180{ 1180{
1181 struct drm_i915_private *dev_priv = dev->dev_private; 1181 struct drm_i915_private *dev_priv = dev->dev_private;
1182 unsigned long prealloc_size, gtt_size, mappable_size; 1182 unsigned long prealloc_size, gtt_size, mappable_size;
1183 int ret = 0; 1183 int ret;
1184 1184
1185 prealloc_size = dev_priv->mm.gtt->stolen_size; 1185 prealloc_size = dev_priv->mm.gtt->stolen_size;
1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
@@ -1204,7 +1204,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1204 ret = i915_gem_init_ringbuffer(dev); 1204 ret = i915_gem_init_ringbuffer(dev);
1205 mutex_unlock(&dev->struct_mutex); 1205 mutex_unlock(&dev->struct_mutex);
1206 if (ret) 1206 if (ret)
1207 goto out; 1207 return ret;
1208 1208
1209 /* Try to set up FBC with a reasonable compressed buffer size */ 1209 /* Try to set up FBC with a reasonable compressed buffer size */
1210 if (I915_HAS_FBC(dev) && i915_powersave) { 1210 if (I915_HAS_FBC(dev) && i915_powersave) {
@@ -1222,6 +1222,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
1222 1222
1223 /* Allow hardware batchbuffers unless told otherwise. */ 1223 /* Allow hardware batchbuffers unless told otherwise. */
1224 dev_priv->allow_batchbuffer = 1; 1224 dev_priv->allow_batchbuffer = 1;
1225 return 0;
1226}
1227
1228static int i915_load_modeset_init(struct drm_device *dev)
1229{
1230 struct drm_i915_private *dev_priv = dev->dev_private;
1231 int ret;
1225 1232
1226 ret = intel_parse_bios(dev); 1233 ret = intel_parse_bios(dev);
1227 if (ret) 1234 if (ret)
@@ -1236,7 +1243,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1236 */ 1243 */
1237 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1244 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1238 if (ret && ret != -ENODEV) 1245 if (ret && ret != -ENODEV)
1239 goto cleanup_ringbuffer; 1246 goto out;
1240 1247
1241 intel_register_dsm_handler(); 1248 intel_register_dsm_handler();
1242 1249
@@ -1253,10 +1260,40 @@ static int i915_load_modeset_init(struct drm_device *dev)
1253 1260
1254 intel_modeset_init(dev); 1261 intel_modeset_init(dev);
1255 1262
1256 ret = drm_irq_install(dev); 1263 ret = i915_load_gem_init(dev);
1257 if (ret) 1264 if (ret)
1258 goto cleanup_vga_switcheroo; 1265 goto cleanup_vga_switcheroo;
1259 1266
1267 intel_modeset_gem_init(dev);
1268
1269 if (IS_IVYBRIDGE(dev)) {
1270 /* Share pre & uninstall handlers with ILK/SNB */
1271 dev->driver->irq_handler = ivybridge_irq_handler;
1272 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1273 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1274 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1275 dev->driver->enable_vblank = ivybridge_enable_vblank;
1276 dev->driver->disable_vblank = ivybridge_disable_vblank;
1277 } else if (HAS_PCH_SPLIT(dev)) {
1278 dev->driver->irq_handler = ironlake_irq_handler;
1279 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1280 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1281 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1282 dev->driver->enable_vblank = ironlake_enable_vblank;
1283 dev->driver->disable_vblank = ironlake_disable_vblank;
1284 } else {
1285 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1286 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1287 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1288 dev->driver->irq_handler = i915_driver_irq_handler;
1289 dev->driver->enable_vblank = i915_enable_vblank;
1290 dev->driver->disable_vblank = i915_disable_vblank;
1291 }
1292
1293 ret = drm_irq_install(dev);
1294 if (ret)
1295 goto cleanup_gem;
1296
1260 /* Always safe in the mode setting case. */ 1297 /* Always safe in the mode setting case. */
1261 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1298 /* FIXME: do pre/post-mode set stuff in core KMS code */
1262 dev->vblank_disable_allowed = 1; 1299 dev->vblank_disable_allowed = 1;
@@ -1274,14 +1311,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
1274 1311
1275cleanup_irq: 1312cleanup_irq:
1276 drm_irq_uninstall(dev); 1313 drm_irq_uninstall(dev);
1314cleanup_gem:
1315 mutex_lock(&dev->struct_mutex);
1316 i915_gem_cleanup_ringbuffer(dev);
1317 mutex_unlock(&dev->struct_mutex);
1277cleanup_vga_switcheroo: 1318cleanup_vga_switcheroo:
1278 vga_switcheroo_unregister_client(dev->pdev); 1319 vga_switcheroo_unregister_client(dev->pdev);
1279cleanup_vga_client: 1320cleanup_vga_client:
1280 vga_client_register(dev->pdev, NULL, NULL, NULL); 1321 vga_client_register(dev->pdev, NULL, NULL, NULL);
1281cleanup_ringbuffer:
1282 mutex_lock(&dev->struct_mutex);
1283 i915_gem_cleanup_ringbuffer(dev);
1284 mutex_unlock(&dev->struct_mutex);
1285out: 1322out:
1286 return ret; 1323 return ret;
1287} 1324}
@@ -1982,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1982 2019
1983 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2020 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1984 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2021 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1985 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 2022 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
1986 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2023 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1987 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2024 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1988 } 2025 }
@@ -2025,6 +2062,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2025 2062
2026 spin_lock_init(&dev_priv->irq_lock); 2063 spin_lock_init(&dev_priv->irq_lock);
2027 spin_lock_init(&dev_priv->error_lock); 2064 spin_lock_init(&dev_priv->error_lock);
2065 spin_lock_init(&dev_priv->rps_lock);
2028 2066
2029 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2067 if (IS_MOBILE(dev) || !IS_GEN2(dev))
2030 dev_priv->num_pipe = 2; 2068 dev_priv->num_pipe = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 32d1b3e829c8..0defd4270594 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -52,9 +52,12 @@ module_param_named(powersave, i915_powersave, int, 0600);
52unsigned int i915_semaphores = 0; 52unsigned int i915_semaphores = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 53module_param_named(semaphores, i915_semaphores, int, 0600);
54 54
55unsigned int i915_enable_rc6 = 0; 55unsigned int i915_enable_rc6 = 1;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
57 57
58unsigned int i915_enable_fbc = 0;
59module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
60
58unsigned int i915_lvds_downclock = 0; 61unsigned int i915_lvds_downclock = 0;
59module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 62module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
60 63
@@ -169,7 +172,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
169static const struct intel_device_info intel_ironlake_m_info = { 172static const struct intel_device_info intel_ironlake_m_info = {
170 .gen = 5, .is_mobile = 1, 173 .gen = 5, .is_mobile = 1,
171 .need_gfx_hws = 1, .has_hotplug = 1, 174 .need_gfx_hws = 1, .has_hotplug = 1,
172 .has_fbc = 0, /* disabled due to buggy hardware */ 175 .has_fbc = 1,
173 .has_bsd_ring = 1, 176 .has_bsd_ring = 1,
174}; 177};
175 178
@@ -188,6 +191,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
188 .has_blt_ring = 1, 191 .has_blt_ring = 1,
189}; 192};
190 193
194static const struct intel_device_info intel_ivybridge_d_info = {
195 .is_ivybridge = 1, .gen = 7,
196 .need_gfx_hws = 1, .has_hotplug = 1,
197 .has_bsd_ring = 1,
198 .has_blt_ring = 1,
199};
200
201static const struct intel_device_info intel_ivybridge_m_info = {
202 .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
203 .need_gfx_hws = 1, .has_hotplug = 1,
204 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
205 .has_bsd_ring = 1,
206 .has_blt_ring = 1,
207};
208
191static const struct pci_device_id pciidlist[] = { /* aka */ 209static const struct pci_device_id pciidlist[] = { /* aka */
192 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 210 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
193 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ 211 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
@@ -227,6 +245,11 @@ static const struct pci_device_id pciidlist[] = { /* aka */
227 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 245 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
228 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 246 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
229 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 247 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
248 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
249 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
250 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
251 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
252 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
230 {0, 0, 0} 253 {0, 0, 0}
231}; 254};
232 255
@@ -235,7 +258,9 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
235#endif 258#endif
236 259
237#define INTEL_PCH_DEVICE_ID_MASK 0xff00 260#define INTEL_PCH_DEVICE_ID_MASK 0xff00
261#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
238#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 262#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
263#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
239 264
240void intel_detect_pch (struct drm_device *dev) 265void intel_detect_pch (struct drm_device *dev)
241{ 266{
@@ -254,16 +279,23 @@ void intel_detect_pch (struct drm_device *dev)
254 int id; 279 int id;
255 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 280 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
256 281
257 if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 282 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
283 dev_priv->pch_type = PCH_IBX;
284 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
285 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
258 dev_priv->pch_type = PCH_CPT; 286 dev_priv->pch_type = PCH_CPT;
259 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 287 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
288 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
289 /* PantherPoint is CPT compatible */
290 dev_priv->pch_type = PCH_CPT;
291 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
260 } 292 }
261 } 293 }
262 pci_dev_put(pch); 294 pci_dev_put(pch);
263 } 295 }
264} 296}
265 297
266void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 298static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
267{ 299{
268 int count; 300 int count;
269 301
@@ -279,12 +311,38 @@ void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
279 udelay(10); 311 udelay(10);
280} 312}
281 313
282void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 314/*
315 * Generally this is called implicitly by the register read function. However,
316 * if some sequence requires the GT to not power down then this function should
317 * be called at the beginning of the sequence followed by a call to
318 * gen6_gt_force_wake_put() at the end of the sequence.
319 */
320void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
321{
322 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
323
324 /* Forcewake is atomic in case we get in here without the lock */
325 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
326 __gen6_gt_force_wake_get(dev_priv);
327}
328
329static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
283{ 330{
284 I915_WRITE_NOTRACE(FORCEWAKE, 0); 331 I915_WRITE_NOTRACE(FORCEWAKE, 0);
285 POSTING_READ(FORCEWAKE); 332 POSTING_READ(FORCEWAKE);
286} 333}
287 334
335/*
336 * see gen6_gt_force_wake_get()
337 */
338void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
339{
340 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
341
342 if (atomic_dec_and_test(&dev_priv->forcewake_count))
343 __gen6_gt_force_wake_put(dev_priv);
344}
345
288void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 346void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
289{ 347{
290 int loop = 500; 348 int loop = 500;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1c1b27c97e5c..ee660355ae68 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -188,7 +188,7 @@ struct drm_i915_error_state {
188 u32 dirty:1; 188 u32 dirty:1;
189 u32 purgeable:1; 189 u32 purgeable:1;
190 u32 ring:4; 190 u32 ring:4;
191 u32 agp_type:1; 191 u32 cache_level:2;
192 } *active_bo, *pinned_bo; 192 } *active_bo, *pinned_bo;
193 u32 active_bo_count, pinned_bo_count; 193 u32 active_bo_count, pinned_bo_count;
194 struct intel_overlay_error_state *overlay; 194 struct intel_overlay_error_state *overlay;
@@ -203,12 +203,19 @@ struct drm_i915_display_funcs {
203 int (*get_display_clock_speed)(struct drm_device *dev); 203 int (*get_display_clock_speed)(struct drm_device *dev);
204 int (*get_fifo_size)(struct drm_device *dev, int plane); 204 int (*get_fifo_size)(struct drm_device *dev, int plane);
205 void (*update_wm)(struct drm_device *dev); 205 void (*update_wm)(struct drm_device *dev);
206 int (*crtc_mode_set)(struct drm_crtc *crtc,
207 struct drm_display_mode *mode,
208 struct drm_display_mode *adjusted_mode,
209 int x, int y,
210 struct drm_framebuffer *old_fb);
211 void (*fdi_link_train)(struct drm_crtc *crtc);
212 void (*init_clock_gating)(struct drm_device *dev);
213 void (*init_pch_clock_gating)(struct drm_device *dev);
206 /* clock updates for mode set */ 214 /* clock updates for mode set */
207 /* cursor updates */ 215 /* cursor updates */
208 /* render clock increase/decrease */ 216 /* render clock increase/decrease */
209 /* display clock increase/decrease */ 217 /* display clock increase/decrease */
210 /* pll clock increase/decrease */ 218 /* pll clock increase/decrease */
211 /* clock gating init */
212}; 219};
213 220
214struct intel_device_info { 221struct intel_device_info {
@@ -223,6 +230,7 @@ struct intel_device_info {
223 u8 is_pineview : 1; 230 u8 is_pineview : 1;
224 u8 is_broadwater : 1; 231 u8 is_broadwater : 1;
225 u8 is_crestline : 1; 232 u8 is_crestline : 1;
233 u8 is_ivybridge : 1;
226 u8 has_fbc : 1; 234 u8 has_fbc : 1;
227 u8 has_pipe_cxsr : 1; 235 u8 has_pipe_cxsr : 1;
228 u8 has_hotplug : 1; 236 u8 has_hotplug : 1;
@@ -242,6 +250,7 @@ enum no_fbc_reason {
242 FBC_BAD_PLANE, /* fbc not supported on plane */ 250 FBC_BAD_PLANE, /* fbc not supported on plane */
243 FBC_NOT_TILED, /* buffer not tiled */ 251 FBC_NOT_TILED, /* buffer not tiled */
244 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 252 FBC_MULTIPLE_PIPES, /* more than one pipe active */
253 FBC_MODULE_PARAM,
245}; 254};
246 255
247enum intel_pch { 256enum intel_pch {
@@ -676,6 +685,10 @@ typedef struct drm_i915_private {
676 685
677 bool mchbar_need_disable; 686 bool mchbar_need_disable;
678 687
688 struct work_struct rps_work;
689 spinlock_t rps_lock;
690 u32 pm_iir;
691
679 u8 cur_delay; 692 u8 cur_delay;
680 u8 min_delay; 693 u8 min_delay;
681 u8 max_delay; 694 u8 max_delay;
@@ -703,8 +716,16 @@ typedef struct drm_i915_private {
703 struct intel_fbdev *fbdev; 716 struct intel_fbdev *fbdev;
704 717
705 struct drm_property *broadcast_rgb_property; 718 struct drm_property *broadcast_rgb_property;
719
720 atomic_t forcewake_count;
706} drm_i915_private_t; 721} drm_i915_private_t;
707 722
723enum i915_cache_level {
724 I915_CACHE_NONE,
725 I915_CACHE_LLC,
726 I915_CACHE_LLC_MLC, /* gen6+ */
727};
728
708struct drm_i915_gem_object { 729struct drm_i915_gem_object {
709 struct drm_gem_object base; 730 struct drm_gem_object base;
710 731
@@ -791,6 +812,8 @@ struct drm_i915_gem_object {
791 unsigned int pending_fenced_gpu_access:1; 812 unsigned int pending_fenced_gpu_access:1;
792 unsigned int fenced_gpu_access:1; 813 unsigned int fenced_gpu_access:1;
793 814
815 unsigned int cache_level:2;
816
794 struct page **pages; 817 struct page **pages;
795 818
796 /** 819 /**
@@ -827,8 +850,6 @@ struct drm_i915_gem_object {
827 /** Record of address bit 17 of each page at last unbind. */ 850 /** Record of address bit 17 of each page at last unbind. */
828 unsigned long *bit_17; 851 unsigned long *bit_17;
829 852
830 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
831 uint32_t agp_type;
832 853
833 /** 854 /**
834 * If present, while GEM_DOMAIN_CPU is in the read domain this array 855 * If present, while GEM_DOMAIN_CPU is in the read domain this array
@@ -915,13 +936,21 @@ enum intel_chip_family {
915#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 936#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
916#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 937#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
917#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 938#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
939#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
918#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 940#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
919 941
942/*
943 * The genX designation typically refers to the render engine, so render
944 * capability related checks should use IS_GEN, while display and other checks
945 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
946 * chips, etc.).
947 */
920#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 948#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
921#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 949#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
922#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 950#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
923#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 951#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
924#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 952#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
953#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
925 954
926#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 955#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
927#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 956#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
@@ -948,8 +977,8 @@ enum intel_chip_family {
948#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 977#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
949#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 978#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
950 979
951#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) 980#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
952#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) 981#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
953 982
954#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 983#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
955#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 984#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -967,6 +996,7 @@ extern unsigned int i915_lvds_downclock;
967extern unsigned int i915_panel_use_ssc; 996extern unsigned int i915_panel_use_ssc;
968extern int i915_vbt_sdvo_panel_type; 997extern int i915_vbt_sdvo_panel_type;
969extern unsigned int i915_enable_rc6; 998extern unsigned int i915_enable_rc6;
999extern unsigned int i915_enable_fbc;
970 1000
971extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1001extern int i915_suspend(struct drm_device *dev, pm_message_t state);
972extern int i915_resume(struct drm_device *dev); 1002extern int i915_resume(struct drm_device *dev);
@@ -1010,12 +1040,27 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
1010extern void i915_driver_irq_preinstall(struct drm_device * dev); 1040extern void i915_driver_irq_preinstall(struct drm_device * dev);
1011extern int i915_driver_irq_postinstall(struct drm_device *dev); 1041extern int i915_driver_irq_postinstall(struct drm_device *dev);
1012extern void i915_driver_irq_uninstall(struct drm_device * dev); 1042extern void i915_driver_irq_uninstall(struct drm_device * dev);
1043
1044extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
1045extern void ironlake_irq_preinstall(struct drm_device *dev);
1046extern int ironlake_irq_postinstall(struct drm_device *dev);
1047extern void ironlake_irq_uninstall(struct drm_device *dev);
1048
1049extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
1050extern void ivybridge_irq_preinstall(struct drm_device *dev);
1051extern int ivybridge_irq_postinstall(struct drm_device *dev);
1052extern void ivybridge_irq_uninstall(struct drm_device *dev);
1053
1013extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1054extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1014 struct drm_file *file_priv); 1055 struct drm_file *file_priv);
1015extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1056extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1016 struct drm_file *file_priv); 1057 struct drm_file *file_priv);
1017extern int i915_enable_vblank(struct drm_device *dev, int crtc); 1058extern int i915_enable_vblank(struct drm_device *dev, int crtc);
1018extern void i915_disable_vblank(struct drm_device *dev, int crtc); 1059extern void i915_disable_vblank(struct drm_device *dev, int crtc);
1060extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
1061extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
1062extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
1063extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
1019extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 1064extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
1020extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); 1065extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
1021extern int i915_vblank_swap(struct drm_device *dev, void *data, 1066extern int i915_vblank_swap(struct drm_device *dev, void *data,
@@ -1265,6 +1310,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1265 1310
1266/* modesetting */ 1311/* modesetting */
1267extern void intel_modeset_init(struct drm_device *dev); 1312extern void intel_modeset_init(struct drm_device *dev);
1313extern void intel_modeset_gem_init(struct drm_device *dev);
1268extern void intel_modeset_cleanup(struct drm_device *dev); 1314extern void intel_modeset_cleanup(struct drm_device *dev);
1269extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1315extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1270extern void i8xx_disable_fbc(struct drm_device *dev); 1316extern void i8xx_disable_fbc(struct drm_device *dev);
@@ -1312,13 +1358,34 @@ extern void intel_display_print_error_state(struct seq_file *m,
1312 LOCK_TEST_WITH_RETURN(dev, file); \ 1358 LOCK_TEST_WITH_RETURN(dev, file); \
1313} while (0) 1359} while (0)
1314 1360
1361/* On SNB platform, before reading ring registers forcewake bit
1362 * must be set to prevent GT core from power down and stale values being
1363 * returned.
1364 */
1365void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1366void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1367void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1368
1369/* We give fast paths for the really cool registers */
1370#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1371 (((dev_priv)->info->gen >= 6) && \
1372 ((reg) < 0x40000) && \
1373 ((reg) != FORCEWAKE))
1315 1374
1316#define __i915_read(x, y) \ 1375#define __i915_read(x, y) \
1317static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1376static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1318 u##x val = read##y(dev_priv->regs + reg); \ 1377 u##x val = 0; \
1378 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1379 gen6_gt_force_wake_get(dev_priv); \
1380 val = read##y(dev_priv->regs + reg); \
1381 gen6_gt_force_wake_put(dev_priv); \
1382 } else { \
1383 val = read##y(dev_priv->regs + reg); \
1384 } \
1319 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 1385 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1320 return val; \ 1386 return val; \
1321} 1387}
1388
1322__i915_read(8, b) 1389__i915_read(8, b)
1323__i915_read(16, w) 1390__i915_read(16, w)
1324__i915_read(32, l) 1391__i915_read(32, l)
@@ -1328,6 +1395,9 @@ __i915_read(64, q)
1328#define __i915_write(x, y) \ 1395#define __i915_write(x, y) \
1329static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 1396static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1330 trace_i915_reg_rw(true, reg, val, sizeof(val)); \ 1397 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1398 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1399 __gen6_gt_wait_for_fifo(dev_priv); \
1400 } \
1331 write##y(val, dev_priv->regs + reg); \ 1401 write##y(val, dev_priv->regs + reg); \
1332} 1402}
1333__i915_write(8, b) 1403__i915_write(8, b)
@@ -1356,33 +1426,4 @@ __i915_write(64, q)
1356#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1426#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1357 1427
1358 1428
1359/* On SNB platform, before reading ring registers forcewake bit
1360 * must be set to prevent GT core from power down and stale values being
1361 * returned.
1362 */
1363void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1364void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1365void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1366
1367static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1368{
1369 u32 val;
1370
1371 if (dev_priv->info->gen >= 6) {
1372 __gen6_gt_force_wake_get(dev_priv);
1373 val = I915_READ(reg);
1374 __gen6_gt_force_wake_put(dev_priv);
1375 } else
1376 val = I915_READ(reg);
1377
1378 return val;
1379}
1380
1381static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1382 u32 reg, u32 val)
1383{
1384 if (dev_priv->info->gen >= 6)
1385 __gen6_gt_wait_for_fifo(dev_priv);
1386 I915_WRITE(reg, val);
1387}
1388#endif 1429#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7ce3f353af33..c6289034e29a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2673,6 +2673,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2673update: 2673update:
2674 obj->tiling_changed = false; 2674 obj->tiling_changed = false;
2675 switch (INTEL_INFO(dev)->gen) { 2675 switch (INTEL_INFO(dev)->gen) {
2676 case 7:
2676 case 6: 2677 case 6:
2677 ret = sandybridge_write_fence_reg(obj, pipelined); 2678 ret = sandybridge_write_fence_reg(obj, pipelined);
2678 break; 2679 break;
@@ -2706,6 +2707,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
2706 uint32_t fence_reg = reg - dev_priv->fence_regs; 2707 uint32_t fence_reg = reg - dev_priv->fence_regs;
2707 2708
2708 switch (INTEL_INFO(dev)->gen) { 2709 switch (INTEL_INFO(dev)->gen) {
2710 case 7:
2709 case 6: 2711 case 6:
2710 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); 2712 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2711 break; 2713 break;
@@ -2878,6 +2880,17 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2878 if (obj->pages == NULL) 2880 if (obj->pages == NULL)
2879 return; 2881 return;
2880 2882
2883 /* If the GPU is snooping the contents of the CPU cache,
2884 * we do not need to manually clear the CPU cache lines. However,
2885 * the caches are only snooped when the render cache is
2886 * flushed/invalidated. As we always have to emit invalidations
2887 * and flushes when moving into and out of the RENDER domain, correct
2888 * snooping behaviour occurs naturally as the result of our domain
2889 * tracking.
2890 */
2891 if (obj->cache_level != I915_CACHE_NONE)
2892 return;
2893
2881 trace_i915_gem_object_clflush(obj); 2894 trace_i915_gem_object_clflush(obj);
2882 2895
2883 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); 2896 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
@@ -3569,7 +3582,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3569 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3582 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3570 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3583 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3571 3584
3572 obj->agp_type = AGP_USER_MEMORY; 3585 obj->cache_level = I915_CACHE_NONE;
3573 obj->base.driver_private = NULL; 3586 obj->base.driver_private = NULL;
3574 obj->fence_reg = I915_FENCE_REG_NONE; 3587 obj->fence_reg = I915_FENCE_REG_NONE;
3575 INIT_LIST_HEAD(&obj->mm_list); 3588 INIT_LIST_HEAD(&obj->mm_list);
@@ -3845,25 +3858,10 @@ i915_gem_load(struct drm_device *dev)
3845 dev_priv->num_fence_regs = 8; 3858 dev_priv->num_fence_regs = 8;
3846 3859
3847 /* Initialize fence registers to zero */ 3860 /* Initialize fence registers to zero */
3848 switch (INTEL_INFO(dev)->gen) { 3861 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3849 case 6: 3862 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3850 for (i = 0; i < 16; i++)
3851 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3852 break;
3853 case 5:
3854 case 4:
3855 for (i = 0; i < 16; i++)
3856 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
3857 break;
3858 case 3:
3859 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3860 for (i = 0; i < 8; i++)
3861 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
3862 case 2:
3863 for (i = 0; i < 8; i++)
3864 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3865 break;
3866 } 3863 }
3864
3867 i915_gem_detect_bit_6_swizzle(dev); 3865 i915_gem_detect_bit_6_swizzle(dev);
3868 init_waitqueue_head(&dev_priv->pending_flip_queue); 3866 init_waitqueue_head(&dev_priv->pending_flip_queue);
3869 3867
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b0abdc64aa9f..e46b645773cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -29,6 +29,26 @@
29#include "i915_trace.h" 29#include "i915_trace.h"
30#include "intel_drv.h" 30#include "intel_drv.h"
31 31
32/* XXX kill agp_type! */
33static unsigned int cache_level_to_agp_type(struct drm_device *dev,
34 enum i915_cache_level cache_level)
35{
36 switch (cache_level) {
37 case I915_CACHE_LLC_MLC:
38 if (INTEL_INFO(dev)->gen >= 6)
39 return AGP_USER_CACHED_MEMORY_LLC_MLC;
40 /* Older chipsets do not have this extra level of CPU
41 * cacheing, so fallthrough and request the PTE simply
42 * as cached.
43 */
44 case I915_CACHE_LLC:
45 return AGP_USER_CACHED_MEMORY;
46 default:
47 case I915_CACHE_NONE:
48 return AGP_USER_MEMORY;
49 }
50}
51
32void i915_gem_restore_gtt_mappings(struct drm_device *dev) 52void i915_gem_restore_gtt_mappings(struct drm_device *dev)
33{ 53{
34 struct drm_i915_private *dev_priv = dev->dev_private; 54 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -39,6 +59,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40 60
41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
62 unsigned int agp_type =
63 cache_level_to_agp_type(dev, obj->cache_level);
64
42 i915_gem_clflush_object(obj); 65 i915_gem_clflush_object(obj);
43 66
44 if (dev_priv->mm.gtt->needs_dmar) { 67 if (dev_priv->mm.gtt->needs_dmar) {
@@ -46,15 +69,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
46 69
47 intel_gtt_insert_sg_entries(obj->sg_list, 70 intel_gtt_insert_sg_entries(obj->sg_list,
48 obj->num_sg, 71 obj->num_sg,
49 obj->gtt_space->start 72 obj->gtt_space->start >> PAGE_SHIFT,
50 >> PAGE_SHIFT, 73 agp_type);
51 obj->agp_type);
52 } else 74 } else
53 intel_gtt_insert_pages(obj->gtt_space->start 75 intel_gtt_insert_pages(obj->gtt_space->start
54 >> PAGE_SHIFT, 76 >> PAGE_SHIFT,
55 obj->base.size >> PAGE_SHIFT, 77 obj->base.size >> PAGE_SHIFT,
56 obj->pages, 78 obj->pages,
57 obj->agp_type); 79 agp_type);
58 } 80 }
59 81
60 intel_gtt_chipset_flush(); 82 intel_gtt_chipset_flush();
@@ -64,6 +86,7 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
64{ 86{
65 struct drm_device *dev = obj->base.dev; 87 struct drm_device *dev = obj->base.dev;
66 struct drm_i915_private *dev_priv = dev->dev_private; 88 struct drm_i915_private *dev_priv = dev->dev_private;
89 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
67 int ret; 90 int ret;
68 91
69 if (dev_priv->mm.gtt->needs_dmar) { 92 if (dev_priv->mm.gtt->needs_dmar) {
@@ -77,12 +100,12 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
77 intel_gtt_insert_sg_entries(obj->sg_list, 100 intel_gtt_insert_sg_entries(obj->sg_list,
78 obj->num_sg, 101 obj->num_sg,
79 obj->gtt_space->start >> PAGE_SHIFT, 102 obj->gtt_space->start >> PAGE_SHIFT,
80 obj->agp_type); 103 agp_type);
81 } else 104 } else
82 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, 105 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
83 obj->base.size >> PAGE_SHIFT, 106 obj->base.size >> PAGE_SHIFT,
84 obj->pages, 107 obj->pages,
85 obj->agp_type); 108 agp_type);
86 109
87 return 0; 110 return 0;
88} 111}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 281ad3d6115d..82d70fd9e933 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
94 94
95 if (IS_GEN5(dev) || IS_GEN6(dev)) { 95 if (INTEL_INFO(dev)->gen >= 5) {
96 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
97 * same swizzling setup. 97 * same swizzling setup.
98 */ 98 */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 188b497e5076..b79619a7b788 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -367,22 +367,30 @@ static void notify_ring(struct drm_device *dev,
367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
368} 368}
369 369
370static void gen6_pm_irq_handler(struct drm_device *dev) 370static void gen6_pm_rps_work(struct work_struct *work)
371{ 371{
372 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 372 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
373 rps_work);
373 u8 new_delay = dev_priv->cur_delay; 374 u8 new_delay = dev_priv->cur_delay;
374 u32 pm_iir; 375 u32 pm_iir, pm_imr;
376
377 spin_lock_irq(&dev_priv->rps_lock);
378 pm_iir = dev_priv->pm_iir;
379 dev_priv->pm_iir = 0;
380 pm_imr = I915_READ(GEN6_PMIMR);
381 spin_unlock_irq(&dev_priv->rps_lock);
375 382
376 pm_iir = I915_READ(GEN6_PMIIR);
377 if (!pm_iir) 383 if (!pm_iir)
378 return; 384 return;
379 385
386 mutex_lock(&dev_priv->dev->struct_mutex);
380 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 387 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
381 if (dev_priv->cur_delay != dev_priv->max_delay) 388 if (dev_priv->cur_delay != dev_priv->max_delay)
382 new_delay = dev_priv->cur_delay + 1; 389 new_delay = dev_priv->cur_delay + 1;
383 if (new_delay > dev_priv->max_delay) 390 if (new_delay > dev_priv->max_delay)
384 new_delay = dev_priv->max_delay; 391 new_delay = dev_priv->max_delay;
385 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { 392 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
393 gen6_gt_force_wake_get(dev_priv);
386 if (dev_priv->cur_delay != dev_priv->min_delay) 394 if (dev_priv->cur_delay != dev_priv->min_delay)
387 new_delay = dev_priv->cur_delay - 1; 395 new_delay = dev_priv->cur_delay - 1;
388 if (new_delay < dev_priv->min_delay) { 396 if (new_delay < dev_priv->min_delay) {
@@ -396,13 +404,19 @@ static void gen6_pm_irq_handler(struct drm_device *dev)
396 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 404 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
397 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); 405 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
398 } 406 }
399 407 gen6_gt_force_wake_put(dev_priv);
400 } 408 }
401 409
402 gen6_set_rps(dev, new_delay); 410 gen6_set_rps(dev_priv->dev, new_delay);
403 dev_priv->cur_delay = new_delay; 411 dev_priv->cur_delay = new_delay;
404 412
405 I915_WRITE(GEN6_PMIIR, pm_iir); 413 /*
414 * rps_lock not held here because clearing is non-destructive. There is
415 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
416 * by holding struct_mutex for the duration of the write.
417 */
418 I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
419 mutex_unlock(&dev_priv->dev->struct_mutex);
406} 420}
407 421
408static void pch_irq_handler(struct drm_device *dev) 422static void pch_irq_handler(struct drm_device *dev)
@@ -448,8 +462,97 @@ static void pch_irq_handler(struct drm_device *dev)
448 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 462 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
449} 463}
450 464
451static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 465irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
466{
467 struct drm_device *dev = (struct drm_device *) arg;
468 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
469 int ret = IRQ_NONE;
470 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
471 struct drm_i915_master_private *master_priv;
472
473 atomic_inc(&dev_priv->irq_received);
474
475 /* disable master interrupt before clearing iir */
476 de_ier = I915_READ(DEIER);
477 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
478 POSTING_READ(DEIER);
479
480 de_iir = I915_READ(DEIIR);
481 gt_iir = I915_READ(GTIIR);
482 pch_iir = I915_READ(SDEIIR);
483 pm_iir = I915_READ(GEN6_PMIIR);
484
485 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
486 goto done;
487
488 ret = IRQ_HANDLED;
489
490 if (dev->primary->master) {
491 master_priv = dev->primary->master->driver_priv;
492 if (master_priv->sarea_priv)
493 master_priv->sarea_priv->last_dispatch =
494 READ_BREADCRUMB(dev_priv);
495 }
496
497 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
498 notify_ring(dev, &dev_priv->ring[RCS]);
499 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
500 notify_ring(dev, &dev_priv->ring[VCS]);
501 if (gt_iir & GT_BLT_USER_INTERRUPT)
502 notify_ring(dev, &dev_priv->ring[BCS]);
503
504 if (de_iir & DE_GSE_IVB)
505 intel_opregion_gse_intr(dev);
506
507 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
508 intel_prepare_page_flip(dev, 0);
509 intel_finish_page_flip_plane(dev, 0);
510 }
511
512 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
513 intel_prepare_page_flip(dev, 1);
514 intel_finish_page_flip_plane(dev, 1);
515 }
516
517 if (de_iir & DE_PIPEA_VBLANK_IVB)
518 drm_handle_vblank(dev, 0);
519
520 if (de_iir & DE_PIPEB_VBLANK_IVB);
521 drm_handle_vblank(dev, 1);
522
523 /* check event from PCH */
524 if (de_iir & DE_PCH_EVENT_IVB) {
525 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
526 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
527 pch_irq_handler(dev);
528 }
529
530 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
531 unsigned long flags;
532 spin_lock_irqsave(&dev_priv->rps_lock, flags);
533 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
534 I915_WRITE(GEN6_PMIMR, pm_iir);
535 dev_priv->pm_iir |= pm_iir;
536 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
537 queue_work(dev_priv->wq, &dev_priv->rps_work);
538 }
539
540 /* should clear PCH hotplug event before clear CPU irq */
541 I915_WRITE(SDEIIR, pch_iir);
542 I915_WRITE(GTIIR, gt_iir);
543 I915_WRITE(DEIIR, de_iir);
544 I915_WRITE(GEN6_PMIIR, pm_iir);
545
546done:
547 I915_WRITE(DEIER, de_ier);
548 POSTING_READ(DEIER);
549
550 return ret;
551}
552
553irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
452{ 554{
555 struct drm_device *dev = (struct drm_device *) arg;
453 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 556 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
454 int ret = IRQ_NONE; 557 int ret = IRQ_NONE;
455 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 558 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
@@ -457,6 +560,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
457 struct drm_i915_master_private *master_priv; 560 struct drm_i915_master_private *master_priv;
458 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 561 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
459 562
563 atomic_inc(&dev_priv->irq_received);
564
460 if (IS_GEN6(dev)) 565 if (IS_GEN6(dev))
461 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; 566 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
462 567
@@ -526,13 +631,30 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
526 i915_handle_rps_change(dev); 631 i915_handle_rps_change(dev);
527 } 632 }
528 633
529 if (IS_GEN6(dev)) 634 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
530 gen6_pm_irq_handler(dev); 635 /*
636 * IIR bits should never already be set because IMR should
637 * prevent an interrupt from being shown in IIR. The warning
638 * displays a case where we've unsafely cleared
639 * dev_priv->pm_iir. Although missing an interrupt of the same
640 * type is not a problem, it displays a problem in the logic.
641 *
642 * The mask bit in IMR is cleared by rps_work.
643 */
644 unsigned long flags;
645 spin_lock_irqsave(&dev_priv->rps_lock, flags);
646 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
647 I915_WRITE(GEN6_PMIMR, pm_iir);
648 dev_priv->pm_iir |= pm_iir;
649 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
650 queue_work(dev_priv->wq, &dev_priv->rps_work);
651 }
531 652
532 /* should clear PCH hotplug event before clear CPU irq */ 653 /* should clear PCH hotplug event before clear CPU irq */
533 I915_WRITE(SDEIIR, pch_iir); 654 I915_WRITE(SDEIIR, pch_iir);
534 I915_WRITE(GTIIR, gt_iir); 655 I915_WRITE(GTIIR, gt_iir);
535 I915_WRITE(DEIIR, de_iir); 656 I915_WRITE(DEIIR, de_iir);
657 I915_WRITE(GEN6_PMIIR, pm_iir);
536 658
537done: 659done:
538 I915_WRITE(DEIER, de_ier); 660 I915_WRITE(DEIER, de_ier);
@@ -676,7 +798,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
676 err->dirty = obj->dirty; 798 err->dirty = obj->dirty;
677 err->purgeable = obj->madv != I915_MADV_WILLNEED; 799 err->purgeable = obj->madv != I915_MADV_WILLNEED;
678 err->ring = obj->ring ? obj->ring->id : 0; 800 err->ring = obj->ring ? obj->ring->id : 0;
679 err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; 801 err->cache_level = obj->cache_level;
680 802
681 if (++i == count) 803 if (++i == count)
682 break; 804 break;
@@ -1103,9 +1225,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1103 1225
1104 atomic_inc(&dev_priv->irq_received); 1226 atomic_inc(&dev_priv->irq_received);
1105 1227
1106 if (HAS_PCH_SPLIT(dev))
1107 return ironlake_irq_handler(dev);
1108
1109 iir = I915_READ(IIR); 1228 iir = I915_READ(IIR);
1110 1229
1111 if (INTEL_INFO(dev)->gen >= 4) 1230 if (INTEL_INFO(dev)->gen >= 4)
@@ -1344,10 +1463,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1344 return -EINVAL; 1463 return -EINVAL;
1345 1464
1346 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1465 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1347 if (HAS_PCH_SPLIT(dev)) 1466 if (INTEL_INFO(dev)->gen >= 4)
1348 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1349 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1350 else if (INTEL_INFO(dev)->gen >= 4)
1351 i915_enable_pipestat(dev_priv, pipe, 1467 i915_enable_pipestat(dev_priv, pipe,
1352 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1468 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1353 else 1469 else
@@ -1362,6 +1478,38 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1362 return 0; 1478 return 0;
1363} 1479}
1364 1480
1481int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1482{
1483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1484 unsigned long irqflags;
1485
1486 if (!i915_pipe_enabled(dev, pipe))
1487 return -EINVAL;
1488
1489 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1490 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1491 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1492 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1493
1494 return 0;
1495}
1496
1497int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1498{
1499 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1500 unsigned long irqflags;
1501
1502 if (!i915_pipe_enabled(dev, pipe))
1503 return -EINVAL;
1504
1505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1506 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1507 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1508 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1509
1510 return 0;
1511}
1512
1365/* Called from drm generic code, passed 'crtc' which 1513/* Called from drm generic code, passed 'crtc' which
1366 * we use as a pipe index 1514 * we use as a pipe index
1367 */ 1515 */
@@ -1375,13 +1523,31 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1375 I915_WRITE(INSTPM, 1523 I915_WRITE(INSTPM,
1376 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1524 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1377 1525
1378 if (HAS_PCH_SPLIT(dev)) 1526 i915_disable_pipestat(dev_priv, pipe,
1379 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1527 PIPE_VBLANK_INTERRUPT_ENABLE |
1380 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1528 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1381 else 1529 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1382 i915_disable_pipestat(dev_priv, pipe, 1530}
1383 PIPE_VBLANK_INTERRUPT_ENABLE | 1531
1384 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1532void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1533{
1534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1535 unsigned long irqflags;
1536
1537 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1538 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1539 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1540 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1541}
1542
1543void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1544{
1545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1546 unsigned long irqflags;
1547
1548 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1549 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1550 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1385 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1551 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1386} 1552}
1387 1553
@@ -1562,10 +1728,17 @@ repeat:
1562 1728
1563/* drm_dma.h hooks 1729/* drm_dma.h hooks
1564*/ 1730*/
1565static void ironlake_irq_preinstall(struct drm_device *dev) 1731void ironlake_irq_preinstall(struct drm_device *dev)
1566{ 1732{
1567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1733 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1568 1734
1735 atomic_set(&dev_priv->irq_received, 0);
1736
1737 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1738 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1739 if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1741
1569 I915_WRITE(HWSTAM, 0xeffe); 1742 I915_WRITE(HWSTAM, 0xeffe);
1570 1743
1571 /* XXX hotplug from PCH */ 1744 /* XXX hotplug from PCH */
@@ -1585,7 +1758,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1585 POSTING_READ(SDEIER); 1758 POSTING_READ(SDEIER);
1586} 1759}
1587 1760
1588static int ironlake_irq_postinstall(struct drm_device *dev) 1761int ironlake_irq_postinstall(struct drm_device *dev)
1589{ 1762{
1590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1763 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1591 /* enable kind of interrupts always enabled */ 1764 /* enable kind of interrupts always enabled */
@@ -1594,6 +1767,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1594 u32 render_irqs; 1767 u32 render_irqs;
1595 u32 hotplug_mask; 1768 u32 hotplug_mask;
1596 1769
1770 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1771 if (HAS_BSD(dev))
1772 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1773 if (HAS_BLT(dev))
1774 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1775
1776 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1597 dev_priv->irq_mask = ~display_mask; 1777 dev_priv->irq_mask = ~display_mask;
1598 1778
1599 /* should always can generate irq */ 1779 /* should always can generate irq */
@@ -1650,6 +1830,56 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1650 return 0; 1830 return 0;
1651} 1831}
1652 1832
1833int ivybridge_irq_postinstall(struct drm_device *dev)
1834{
1835 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1836 /* enable kind of interrupts always enabled */
1837 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1838 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1839 DE_PLANEB_FLIP_DONE_IVB;
1840 u32 render_irqs;
1841 u32 hotplug_mask;
1842
1843 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1844 if (HAS_BSD(dev))
1845 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1846 if (HAS_BLT(dev))
1847 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1848
1849 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1850 dev_priv->irq_mask = ~display_mask;
1851
1852 /* should always can generate irq */
1853 I915_WRITE(DEIIR, I915_READ(DEIIR));
1854 I915_WRITE(DEIMR, dev_priv->irq_mask);
1855 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1856 DE_PIPEB_VBLANK_IVB);
1857 POSTING_READ(DEIER);
1858
1859 dev_priv->gt_irq_mask = ~0;
1860
1861 I915_WRITE(GTIIR, I915_READ(GTIIR));
1862 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1863
1864 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1865 GT_BLT_USER_INTERRUPT;
1866 I915_WRITE(GTIER, render_irqs);
1867 POSTING_READ(GTIER);
1868
1869 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1870 SDE_PORTB_HOTPLUG_CPT |
1871 SDE_PORTC_HOTPLUG_CPT |
1872 SDE_PORTD_HOTPLUG_CPT);
1873 dev_priv->pch_irq_mask = ~hotplug_mask;
1874
1875 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1876 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1877 I915_WRITE(SDEIER, hotplug_mask);
1878 POSTING_READ(SDEIER);
1879
1880 return 0;
1881}
1882
1653void i915_driver_irq_preinstall(struct drm_device * dev) 1883void i915_driver_irq_preinstall(struct drm_device * dev)
1654{ 1884{
1655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1885 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1660,11 +1890,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1660 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1890 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1661 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1891 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1662 1892
1663 if (HAS_PCH_SPLIT(dev)) {
1664 ironlake_irq_preinstall(dev);
1665 return;
1666 }
1667
1668 if (I915_HAS_HOTPLUG(dev)) { 1893 if (I915_HAS_HOTPLUG(dev)) {
1669 I915_WRITE(PORT_HOTPLUG_EN, 0); 1894 I915_WRITE(PORT_HOTPLUG_EN, 0);
1670 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1895 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -1688,17 +1913,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1688 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1913 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1689 u32 error_mask; 1914 u32 error_mask;
1690 1915
1691 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1692 if (HAS_BSD(dev))
1693 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1694 if (HAS_BLT(dev))
1695 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1696
1697 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1916 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1698 1917
1699 if (HAS_PCH_SPLIT(dev))
1700 return ironlake_irq_postinstall(dev);
1701
1702 /* Unmask the interrupts that we always want on. */ 1918 /* Unmask the interrupts that we always want on. */
1703 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 1919 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1704 1920
@@ -1767,9 +1983,15 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1767 return 0; 1983 return 0;
1768} 1984}
1769 1985
1770static void ironlake_irq_uninstall(struct drm_device *dev) 1986void ironlake_irq_uninstall(struct drm_device *dev)
1771{ 1987{
1772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1988 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1989
1990 if (!dev_priv)
1991 return;
1992
1993 dev_priv->vblank_pipe = 0;
1994
1773 I915_WRITE(HWSTAM, 0xffffffff); 1995 I915_WRITE(HWSTAM, 0xffffffff);
1774 1996
1775 I915_WRITE(DEIMR, 0xffffffff); 1997 I915_WRITE(DEIMR, 0xffffffff);
@@ -1791,11 +2013,6 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1791 2013
1792 dev_priv->vblank_pipe = 0; 2014 dev_priv->vblank_pipe = 0;
1793 2015
1794 if (HAS_PCH_SPLIT(dev)) {
1795 ironlake_irq_uninstall(dev);
1796 return;
1797 }
1798
1799 if (I915_HAS_HOTPLUG(dev)) { 2016 if (I915_HAS_HOTPLUG(dev)) {
1800 I915_WRITE(PORT_HOTPLUG_EN, 0); 2017 I915_WRITE(PORT_HOTPLUG_EN, 0);
1801 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2018 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f39ac3a0fa93..2f967af8e62e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -291,6 +291,9 @@
291#define RING_MAX_IDLE(base) ((base)+0x54) 291#define RING_MAX_IDLE(base) ((base)+0x54)
292#define RING_HWS_PGA(base) ((base)+0x80) 292#define RING_HWS_PGA(base) ((base)+0x80)
293#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 293#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
294#define RENDER_HWS_PGA_GEN7 (0x04080)
295#define BSD_HWS_PGA_GEN7 (0x04180)
296#define BLT_HWS_PGA_GEN7 (0x04280)
294#define RING_ACTHD(base) ((base)+0x74) 297#define RING_ACTHD(base) ((base)+0x74)
295#define RING_NOPID(base) ((base)+0x94) 298#define RING_NOPID(base) ((base)+0x94)
296#define RING_IMR(base) ((base)+0xa8) 299#define RING_IMR(base) ((base)+0xa8)
@@ -2778,6 +2781,19 @@
2778#define DE_PIPEA_VSYNC (1 << 3) 2781#define DE_PIPEA_VSYNC (1 << 3)
2779#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 2782#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
2780 2783
2784/* More Ivybridge lolz */
2785#define DE_ERR_DEBUG_IVB (1<<30)
2786#define DE_GSE_IVB (1<<29)
2787#define DE_PCH_EVENT_IVB (1<<28)
2788#define DE_DP_A_HOTPLUG_IVB (1<<27)
2789#define DE_AUX_CHANNEL_A_IVB (1<<26)
2790#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
2791#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
2792#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
2793#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
2794#define DE_PIPEB_VBLANK_IVB (1<<5)
2795#define DE_PIPEA_VBLANK_IVB (1<<0)
2796
2781#define DEISR 0x44000 2797#define DEISR 0x44000
2782#define DEIMR 0x44004 2798#define DEIMR 0x44004
2783#define DEIIR 0x44008 2799#define DEIIR 0x44008
@@ -2809,6 +2825,7 @@
2809#define ILK_eDP_A_DISABLE (1<<24) 2825#define ILK_eDP_A_DISABLE (1<<24)
2810#define ILK_DESKTOP (1<<23) 2826#define ILK_DESKTOP (1<<23)
2811#define ILK_DSPCLK_GATE 0x42020 2827#define ILK_DSPCLK_GATE 0x42020
2828#define IVB_VRHUNIT_CLK_GATE (1<<28)
2812#define ILK_DPARB_CLK_GATE (1<<5) 2829#define ILK_DPARB_CLK_GATE (1<<5)
2813#define ILK_DPFD_CLK_GATE (1<<7) 2830#define ILK_DPFD_CLK_GATE (1<<7)
2814 2831
@@ -3057,6 +3074,9 @@
3057#define TRANS_6BPC (2<<5) 3074#define TRANS_6BPC (2<<5)
3058#define TRANS_12BPC (3<<5) 3075#define TRANS_12BPC (3<<5)
3059 3076
3077#define SOUTH_CHICKEN2 0xc2004
3078#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3079
3060#define _FDI_RXA_CHICKEN 0xc200c 3080#define _FDI_RXA_CHICKEN 0xc200c
3061#define _FDI_RXB_CHICKEN 0xc2010 3081#define _FDI_RXB_CHICKEN 0xc2010
3062#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) 3082#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
@@ -3104,7 +3124,15 @@
3104#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 3124#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
3105/* Ironlake: hardwired to 1 */ 3125/* Ironlake: hardwired to 1 */
3106#define FDI_TX_PLL_ENABLE (1<<14) 3126#define FDI_TX_PLL_ENABLE (1<<14)
3127
3128/* Ivybridge has different bits for lolz */
3129#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
3130#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
3131#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
3132#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
3133
3107/* both Tx and Rx */ 3134/* both Tx and Rx */
3135#define FDI_LINK_TRAIN_AUTO (1<<10)
3108#define FDI_SCRAMBLING_ENABLE (0<<7) 3136#define FDI_SCRAMBLING_ENABLE (0<<7)
3109#define FDI_SCRAMBLING_DISABLE (1<<7) 3137#define FDI_SCRAMBLING_DISABLE (1<<7)
3110 3138
@@ -3114,6 +3142,8 @@
3114#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 3142#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
3115#define FDI_RX_ENABLE (1<<31) 3143#define FDI_RX_ENABLE (1<<31)
3116/* train, dp width same as FDI_TX */ 3144/* train, dp width same as FDI_TX */
3145#define FDI_FS_ERRC_ENABLE (1<<27)
3146#define FDI_FE_ERRC_ENABLE (1<<26)
3117#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3147#define FDI_DP_PORT_WIDTH_X8 (7<<19)
3118#define FDI_8BPC (0<<16) 3148#define FDI_8BPC (0<<16)
3119#define FDI_10BPC (1<<16) 3149#define FDI_10BPC (1<<16)
@@ -3386,7 +3416,7 @@
3386#define GEN6_PMINTRMSK 0xA168 3416#define GEN6_PMINTRMSK 0xA168
3387 3417
3388#define GEN6_PMISR 0x44020 3418#define GEN6_PMISR 0x44020
3389#define GEN6_PMIMR 0x44024 3419#define GEN6_PMIMR 0x44024 /* rps_lock */
3390#define GEN6_PMIIR 0x44028 3420#define GEN6_PMIIR 0x44028
3391#define GEN6_PMIER 0x4402C 3421#define GEN6_PMIER 0x4402C
3392#define GEN6_PM_MBOX_EVENT (1<<25) 3422#define GEN6_PM_MBOX_EVENT (1<<25)
@@ -3396,6 +3426,9 @@
3396#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) 3426#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
3397#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) 3427#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
3398#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) 3428#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
3429#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
3430 GEN6_PM_RP_DOWN_THRESHOLD | \
3431 GEN6_PM_RP_DOWN_TIMEOUT)
3399 3432
3400#define GEN6_PCODE_MAILBOX 0x138124 3433#define GEN6_PCODE_MAILBOX 0x138124
3401#define GEN6_PCODE_READY (1<<31) 3434#define GEN6_PCODE_READY (1<<31)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index da474153a0a2..60a94d2b5264 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -863,8 +863,7 @@ int i915_restore_state(struct drm_device *dev)
863 I915_WRITE(IMR, dev_priv->saveIMR); 863 I915_WRITE(IMR, dev_priv->saveIMR);
864 } 864 }
865 865
866 /* Clock gating state */ 866 intel_init_clock_gating(dev);
867 intel_enable_clock_gating(dev);
868 867
869 if (IS_IRONLAKE_M(dev)) { 868 if (IS_IRONLAKE_M(dev)) {
870 ironlake_enable_drps(dev); 869 ironlake_enable_drps(dev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fb5b4d426ae0..927442a11925 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -214,9 +214,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
214 i915_lvds_downclock) { 214 i915_lvds_downclock) {
215 dev_priv->lvds_downclock_avail = 1; 215 dev_priv->lvds_downclock_avail = 1;
216 dev_priv->lvds_downclock = temp_downclock; 216 dev_priv->lvds_downclock = temp_downclock;
217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
218 "Normal Clock %dKHz, downclock %dKHz\n", 218 "Normal Clock %dKHz, downclock %dKHz\n",
219 temp_downclock, panel_fixed_mode->clock); 219 temp_downclock, panel_fixed_mode->clock);
220 } 220 }
221 return; 221 return;
222} 222}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index d03fc05b39c0..e93f93cc7e78 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -305,13 +305,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
305} 305}
306 306
307static enum drm_connector_status 307static enum drm_connector_status
308intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) 308intel_crt_load_detect(struct intel_crt *crt)
309{ 309{
310 struct drm_encoder *encoder = &crt->base.base; 310 struct drm_device *dev = crt->base.base.dev;
311 struct drm_device *dev = encoder->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 312 uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
314 uint32_t pipe = intel_crtc->pipe;
315 uint32_t save_bclrpat; 313 uint32_t save_bclrpat;
316 uint32_t save_vtotal; 314 uint32_t save_vtotal;
317 uint32_t vtotal, vactive; 315 uint32_t vtotal, vactive;
@@ -432,7 +430,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
432 struct drm_device *dev = connector->dev; 430 struct drm_device *dev = connector->dev;
433 struct intel_crt *crt = intel_attached_crt(connector); 431 struct intel_crt *crt = intel_attached_crt(connector);
434 struct drm_crtc *crtc; 432 struct drm_crtc *crtc;
435 int dpms_mode;
436 enum drm_connector_status status; 433 enum drm_connector_status status;
437 434
438 if (I915_HAS_HOTPLUG(dev)) { 435 if (I915_HAS_HOTPLUG(dev)) {
@@ -454,17 +451,18 @@ intel_crt_detect(struct drm_connector *connector, bool force)
454 /* for pre-945g platforms use load detect */ 451 /* for pre-945g platforms use load detect */
455 crtc = crt->base.base.crtc; 452 crtc = crt->base.base.crtc;
456 if (crtc && crtc->enabled) { 453 if (crtc && crtc->enabled) {
457 status = intel_crt_load_detect(crtc, crt); 454 status = intel_crt_load_detect(crt);
458 } else { 455 } else {
459 crtc = intel_get_load_detect_pipe(&crt->base, connector, 456 struct intel_load_detect_pipe tmp;
460 NULL, &dpms_mode); 457
461 if (crtc) { 458 if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
459 &tmp)) {
462 if (intel_crt_detect_ddc(connector)) 460 if (intel_crt_detect_ddc(connector))
463 status = connector_status_connected; 461 status = connector_status_connected;
464 else 462 else
465 status = intel_crt_load_detect(crtc, crt); 463 status = intel_crt_load_detect(crt);
466 intel_release_load_detect_pipe(&crt->base, 464 intel_release_load_detect_pipe(&crt->base, connector,
467 connector, dpms_mode); 465 &tmp);
468 } else 466 } else
469 status = connector_status_unknown; 467 status = connector_status_unknown;
470 } 468 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2166ee071ddb..f553ddfdc168 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -76,255 +76,6 @@ struct intel_limit {
76 int, int, intel_clock_t *); 76 int, int, intel_clock_t *);
77}; 77};
78 78
79#define I8XX_DOT_MIN 25000
80#define I8XX_DOT_MAX 350000
81#define I8XX_VCO_MIN 930000
82#define I8XX_VCO_MAX 1400000
83#define I8XX_N_MIN 3
84#define I8XX_N_MAX 16
85#define I8XX_M_MIN 96
86#define I8XX_M_MAX 140
87#define I8XX_M1_MIN 18
88#define I8XX_M1_MAX 26
89#define I8XX_M2_MIN 6
90#define I8XX_M2_MAX 16
91#define I8XX_P_MIN 4
92#define I8XX_P_MAX 128
93#define I8XX_P1_MIN 2
94#define I8XX_P1_MAX 33
95#define I8XX_P1_LVDS_MIN 1
96#define I8XX_P1_LVDS_MAX 6
97#define I8XX_P2_SLOW 4
98#define I8XX_P2_FAST 2
99#define I8XX_P2_LVDS_SLOW 14
100#define I8XX_P2_LVDS_FAST 7
101#define I8XX_P2_SLOW_LIMIT 165000
102
103#define I9XX_DOT_MIN 20000
104#define I9XX_DOT_MAX 400000
105#define I9XX_VCO_MIN 1400000
106#define I9XX_VCO_MAX 2800000
107#define PINEVIEW_VCO_MIN 1700000
108#define PINEVIEW_VCO_MAX 3500000
109#define I9XX_N_MIN 1
110#define I9XX_N_MAX 6
111/* Pineview's Ncounter is a ring counter */
112#define PINEVIEW_N_MIN 3
113#define PINEVIEW_N_MAX 6
114#define I9XX_M_MIN 70
115#define I9XX_M_MAX 120
116#define PINEVIEW_M_MIN 2
117#define PINEVIEW_M_MAX 256
118#define I9XX_M1_MIN 10
119#define I9XX_M1_MAX 22
120#define I9XX_M2_MIN 5
121#define I9XX_M2_MAX 9
122/* Pineview M1 is reserved, and must be 0 */
123#define PINEVIEW_M1_MIN 0
124#define PINEVIEW_M1_MAX 0
125#define PINEVIEW_M2_MIN 0
126#define PINEVIEW_M2_MAX 254
127#define I9XX_P_SDVO_DAC_MIN 5
128#define I9XX_P_SDVO_DAC_MAX 80
129#define I9XX_P_LVDS_MIN 7
130#define I9XX_P_LVDS_MAX 98
131#define PINEVIEW_P_LVDS_MIN 7
132#define PINEVIEW_P_LVDS_MAX 112
133#define I9XX_P1_MIN 1
134#define I9XX_P1_MAX 8
135#define I9XX_P2_SDVO_DAC_SLOW 10
136#define I9XX_P2_SDVO_DAC_FAST 5
137#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
138#define I9XX_P2_LVDS_SLOW 14
139#define I9XX_P2_LVDS_FAST 7
140#define I9XX_P2_LVDS_SLOW_LIMIT 112000
141
142/*The parameter is for SDVO on G4x platform*/
143#define G4X_DOT_SDVO_MIN 25000
144#define G4X_DOT_SDVO_MAX 270000
145#define G4X_VCO_MIN 1750000
146#define G4X_VCO_MAX 3500000
147#define G4X_N_SDVO_MIN 1
148#define G4X_N_SDVO_MAX 4
149#define G4X_M_SDVO_MIN 104
150#define G4X_M_SDVO_MAX 138
151#define G4X_M1_SDVO_MIN 17
152#define G4X_M1_SDVO_MAX 23
153#define G4X_M2_SDVO_MIN 5
154#define G4X_M2_SDVO_MAX 11
155#define G4X_P_SDVO_MIN 10
156#define G4X_P_SDVO_MAX 30
157#define G4X_P1_SDVO_MIN 1
158#define G4X_P1_SDVO_MAX 3
159#define G4X_P2_SDVO_SLOW 10
160#define G4X_P2_SDVO_FAST 10
161#define G4X_P2_SDVO_LIMIT 270000
162
163/*The parameter is for HDMI_DAC on G4x platform*/
164#define G4X_DOT_HDMI_DAC_MIN 22000
165#define G4X_DOT_HDMI_DAC_MAX 400000
166#define G4X_N_HDMI_DAC_MIN 1
167#define G4X_N_HDMI_DAC_MAX 4
168#define G4X_M_HDMI_DAC_MIN 104
169#define G4X_M_HDMI_DAC_MAX 138
170#define G4X_M1_HDMI_DAC_MIN 16
171#define G4X_M1_HDMI_DAC_MAX 23
172#define G4X_M2_HDMI_DAC_MIN 5
173#define G4X_M2_HDMI_DAC_MAX 11
174#define G4X_P_HDMI_DAC_MIN 5
175#define G4X_P_HDMI_DAC_MAX 80
176#define G4X_P1_HDMI_DAC_MIN 1
177#define G4X_P1_HDMI_DAC_MAX 8
178#define G4X_P2_HDMI_DAC_SLOW 10
179#define G4X_P2_HDMI_DAC_FAST 5
180#define G4X_P2_HDMI_DAC_LIMIT 165000
181
182/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
183#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
184#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
185#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
186#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
187#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
188#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
189#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
190#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
191#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
192#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
193#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
194#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
195#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
196#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
197#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
198#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
199#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
200
201/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
202#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
203#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
204#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
205#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
206#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
207#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
208#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
209#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
210#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
211#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
212#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
213#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
214#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
215#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
216#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
218#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
219
220/*The parameter is for DISPLAY PORT on G4x platform*/
221#define G4X_DOT_DISPLAY_PORT_MIN 161670
222#define G4X_DOT_DISPLAY_PORT_MAX 227000
223#define G4X_N_DISPLAY_PORT_MIN 1
224#define G4X_N_DISPLAY_PORT_MAX 2
225#define G4X_M_DISPLAY_PORT_MIN 97
226#define G4X_M_DISPLAY_PORT_MAX 108
227#define G4X_M1_DISPLAY_PORT_MIN 0x10
228#define G4X_M1_DISPLAY_PORT_MAX 0x12
229#define G4X_M2_DISPLAY_PORT_MIN 0x05
230#define G4X_M2_DISPLAY_PORT_MAX 0x06
231#define G4X_P_DISPLAY_PORT_MIN 10
232#define G4X_P_DISPLAY_PORT_MAX 20
233#define G4X_P1_DISPLAY_PORT_MIN 1
234#define G4X_P1_DISPLAY_PORT_MAX 2
235#define G4X_P2_DISPLAY_PORT_SLOW 10
236#define G4X_P2_DISPLAY_PORT_FAST 10
237#define G4X_P2_DISPLAY_PORT_LIMIT 0
238
239/* Ironlake / Sandybridge */
240/* as we calculate clock using (register_value + 2) for
241 N/M1/M2, so here the range value for them is (actual_value-2).
242 */
243#define IRONLAKE_DOT_MIN 25000
244#define IRONLAKE_DOT_MAX 350000
245#define IRONLAKE_VCO_MIN 1760000
246#define IRONLAKE_VCO_MAX 3510000
247#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
252
253/* We have parameter ranges for different type of outputs. */
254
255/* DAC & HDMI Refclk 120Mhz */
256#define IRONLAKE_DAC_N_MIN 1
257#define IRONLAKE_DAC_N_MAX 5
258#define IRONLAKE_DAC_M_MIN 79
259#define IRONLAKE_DAC_M_MAX 127
260#define IRONLAKE_DAC_P_MIN 5
261#define IRONLAKE_DAC_P_MAX 80
262#define IRONLAKE_DAC_P1_MIN 1
263#define IRONLAKE_DAC_P1_MAX 8
264#define IRONLAKE_DAC_P2_SLOW 10
265#define IRONLAKE_DAC_P2_FAST 5
266
267/* LVDS single-channel 120Mhz refclk */
268#define IRONLAKE_LVDS_S_N_MIN 1
269#define IRONLAKE_LVDS_S_N_MAX 3
270#define IRONLAKE_LVDS_S_M_MIN 79
271#define IRONLAKE_LVDS_S_M_MAX 118
272#define IRONLAKE_LVDS_S_P_MIN 28
273#define IRONLAKE_LVDS_S_P_MAX 112
274#define IRONLAKE_LVDS_S_P1_MIN 2
275#define IRONLAKE_LVDS_S_P1_MAX 8
276#define IRONLAKE_LVDS_S_P2_SLOW 14
277#define IRONLAKE_LVDS_S_P2_FAST 14
278
279/* LVDS dual-channel 120Mhz refclk */
280#define IRONLAKE_LVDS_D_N_MIN 1
281#define IRONLAKE_LVDS_D_N_MAX 3
282#define IRONLAKE_LVDS_D_M_MIN 79
283#define IRONLAKE_LVDS_D_M_MAX 127
284#define IRONLAKE_LVDS_D_P_MIN 14
285#define IRONLAKE_LVDS_D_P_MAX 56
286#define IRONLAKE_LVDS_D_P1_MIN 2
287#define IRONLAKE_LVDS_D_P1_MAX 8
288#define IRONLAKE_LVDS_D_P2_SLOW 7
289#define IRONLAKE_LVDS_D_P2_FAST 7
290
291/* LVDS single-channel 100Mhz refclk */
292#define IRONLAKE_LVDS_S_SSC_N_MIN 1
293#define IRONLAKE_LVDS_S_SSC_N_MAX 2
294#define IRONLAKE_LVDS_S_SSC_M_MIN 79
295#define IRONLAKE_LVDS_S_SSC_M_MAX 126
296#define IRONLAKE_LVDS_S_SSC_P_MIN 28
297#define IRONLAKE_LVDS_S_SSC_P_MAX 112
298#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
299#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
300#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
301#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
302
303/* LVDS dual-channel 100Mhz refclk */
304#define IRONLAKE_LVDS_D_SSC_N_MIN 1
305#define IRONLAKE_LVDS_D_SSC_N_MAX 3
306#define IRONLAKE_LVDS_D_SSC_M_MIN 79
307#define IRONLAKE_LVDS_D_SSC_M_MAX 126
308#define IRONLAKE_LVDS_D_SSC_P_MIN 14
309#define IRONLAKE_LVDS_D_SSC_P_MAX 42
310#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
311#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
312#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
313#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
314
315/* DisplayPort */
316#define IRONLAKE_DP_N_MIN 1
317#define IRONLAKE_DP_N_MAX 2
318#define IRONLAKE_DP_M_MIN 81
319#define IRONLAKE_DP_M_MAX 90
320#define IRONLAKE_DP_P_MIN 10
321#define IRONLAKE_DP_P_MAX 20
322#define IRONLAKE_DP_P2_FAST 10
323#define IRONLAKE_DP_P2_SLOW 10
324#define IRONLAKE_DP_P2_LIMIT 0
325#define IRONLAKE_DP_P1_MIN 1
326#define IRONLAKE_DP_P1_MAX 2
327
328/* FDI */ 79/* FDI */
329#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 80#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
330 81
@@ -353,292 +104,253 @@ intel_fdi_link_freq(struct drm_device *dev)
353} 104}
354 105
355static const intel_limit_t intel_limits_i8xx_dvo = { 106static const intel_limit_t intel_limits_i8xx_dvo = {
356 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 107 .dot = { .min = 25000, .max = 350000 },
357 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 108 .vco = { .min = 930000, .max = 1400000 },
358 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 109 .n = { .min = 3, .max = 16 },
359 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 110 .m = { .min = 96, .max = 140 },
360 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 111 .m1 = { .min = 18, .max = 26 },
361 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 112 .m2 = { .min = 6, .max = 16 },
362 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 113 .p = { .min = 4, .max = 128 },
363 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 114 .p1 = { .min = 2, .max = 33 },
364 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 115 .p2 = { .dot_limit = 165000,
365 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 116 .p2_slow = 4, .p2_fast = 2 },
366 .find_pll = intel_find_best_PLL, 117 .find_pll = intel_find_best_PLL,
367}; 118};
368 119
369static const intel_limit_t intel_limits_i8xx_lvds = { 120static const intel_limit_t intel_limits_i8xx_lvds = {
370 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 121 .dot = { .min = 25000, .max = 350000 },
371 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 122 .vco = { .min = 930000, .max = 1400000 },
372 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 123 .n = { .min = 3, .max = 16 },
373 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 124 .m = { .min = 96, .max = 140 },
374 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 125 .m1 = { .min = 18, .max = 26 },
375 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 126 .m2 = { .min = 6, .max = 16 },
376 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 127 .p = { .min = 4, .max = 128 },
377 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 128 .p1 = { .min = 1, .max = 6 },
378 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 129 .p2 = { .dot_limit = 165000,
379 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 130 .p2_slow = 14, .p2_fast = 7 },
380 .find_pll = intel_find_best_PLL, 131 .find_pll = intel_find_best_PLL,
381}; 132};
382 133
383static const intel_limit_t intel_limits_i9xx_sdvo = { 134static const intel_limit_t intel_limits_i9xx_sdvo = {
384 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 135 .dot = { .min = 20000, .max = 400000 },
385 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 136 .vco = { .min = 1400000, .max = 2800000 },
386 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 137 .n = { .min = 1, .max = 6 },
387 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 138 .m = { .min = 70, .max = 120 },
388 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 139 .m1 = { .min = 10, .max = 22 },
389 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 140 .m2 = { .min = 5, .max = 9 },
390 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 141 .p = { .min = 5, .max = 80 },
391 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 142 .p1 = { .min = 1, .max = 8 },
392 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 143 .p2 = { .dot_limit = 200000,
393 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 144 .p2_slow = 10, .p2_fast = 5 },
394 .find_pll = intel_find_best_PLL, 145 .find_pll = intel_find_best_PLL,
395}; 146};
396 147
397static const intel_limit_t intel_limits_i9xx_lvds = { 148static const intel_limit_t intel_limits_i9xx_lvds = {
398 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 149 .dot = { .min = 20000, .max = 400000 },
399 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 150 .vco = { .min = 1400000, .max = 2800000 },
400 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 151 .n = { .min = 1, .max = 6 },
401 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 152 .m = { .min = 70, .max = 120 },
402 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 153 .m1 = { .min = 10, .max = 22 },
403 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 154 .m2 = { .min = 5, .max = 9 },
404 .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, 155 .p = { .min = 7, .max = 98 },
405 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 156 .p1 = { .min = 1, .max = 8 },
406 /* The single-channel range is 25-112Mhz, and dual-channel 157 .p2 = { .dot_limit = 112000,
407 * is 80-224Mhz. Prefer single channel as much as possible. 158 .p2_slow = 14, .p2_fast = 7 },
408 */
409 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
410 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
411 .find_pll = intel_find_best_PLL, 159 .find_pll = intel_find_best_PLL,
412}; 160};
413 161
414 /* below parameter and function is for G4X Chipset Family*/ 162
415static const intel_limit_t intel_limits_g4x_sdvo = { 163static const intel_limit_t intel_limits_g4x_sdvo = {
416 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, 164 .dot = { .min = 25000, .max = 270000 },
417 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 165 .vco = { .min = 1750000, .max = 3500000},
418 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, 166 .n = { .min = 1, .max = 4 },
419 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, 167 .m = { .min = 104, .max = 138 },
420 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, 168 .m1 = { .min = 17, .max = 23 },
421 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, 169 .m2 = { .min = 5, .max = 11 },
422 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, 170 .p = { .min = 10, .max = 30 },
423 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, 171 .p1 = { .min = 1, .max = 3},
424 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, 172 .p2 = { .dot_limit = 270000,
425 .p2_slow = G4X_P2_SDVO_SLOW, 173 .p2_slow = 10,
426 .p2_fast = G4X_P2_SDVO_FAST 174 .p2_fast = 10
427 }, 175 },
428 .find_pll = intel_g4x_find_best_PLL, 176 .find_pll = intel_g4x_find_best_PLL,
429}; 177};
430 178
431static const intel_limit_t intel_limits_g4x_hdmi = { 179static const intel_limit_t intel_limits_g4x_hdmi = {
432 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, 180 .dot = { .min = 22000, .max = 400000 },
433 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 181 .vco = { .min = 1750000, .max = 3500000},
434 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, 182 .n = { .min = 1, .max = 4 },
435 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, 183 .m = { .min = 104, .max = 138 },
436 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, 184 .m1 = { .min = 16, .max = 23 },
437 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, 185 .m2 = { .min = 5, .max = 11 },
438 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, 186 .p = { .min = 5, .max = 80 },
439 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, 187 .p1 = { .min = 1, .max = 8},
440 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, 188 .p2 = { .dot_limit = 165000,
441 .p2_slow = G4X_P2_HDMI_DAC_SLOW, 189 .p2_slow = 10, .p2_fast = 5 },
442 .p2_fast = G4X_P2_HDMI_DAC_FAST
443 },
444 .find_pll = intel_g4x_find_best_PLL, 190 .find_pll = intel_g4x_find_best_PLL,
445}; 191};
446 192
447static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 193static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
448 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, 194 .dot = { .min = 20000, .max = 115000 },
449 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, 195 .vco = { .min = 1750000, .max = 3500000 },
450 .vco = { .min = G4X_VCO_MIN, 196 .n = { .min = 1, .max = 3 },
451 .max = G4X_VCO_MAX }, 197 .m = { .min = 104, .max = 138 },
452 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, 198 .m1 = { .min = 17, .max = 23 },
453 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, 199 .m2 = { .min = 5, .max = 11 },
454 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, 200 .p = { .min = 28, .max = 112 },
455 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, 201 .p1 = { .min = 2, .max = 8 },
456 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, 202 .p2 = { .dot_limit = 0,
457 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, 203 .p2_slow = 14, .p2_fast = 14
458 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
459 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
460 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
461 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
462 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
463 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
464 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
465 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
466 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
467 }, 204 },
468 .find_pll = intel_g4x_find_best_PLL, 205 .find_pll = intel_g4x_find_best_PLL,
469}; 206};
470 207
471static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 208static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
472 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, 209 .dot = { .min = 80000, .max = 224000 },
473 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, 210 .vco = { .min = 1750000, .max = 3500000 },
474 .vco = { .min = G4X_VCO_MIN, 211 .n = { .min = 1, .max = 3 },
475 .max = G4X_VCO_MAX }, 212 .m = { .min = 104, .max = 138 },
476 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, 213 .m1 = { .min = 17, .max = 23 },
477 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, 214 .m2 = { .min = 5, .max = 11 },
478 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, 215 .p = { .min = 14, .max = 42 },
479 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, 216 .p1 = { .min = 2, .max = 6 },
480 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, 217 .p2 = { .dot_limit = 0,
481 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, 218 .p2_slow = 7, .p2_fast = 7
482 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
483 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
484 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
485 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
486 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
487 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
488 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
489 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
490 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
491 }, 219 },
492 .find_pll = intel_g4x_find_best_PLL, 220 .find_pll = intel_g4x_find_best_PLL,
493}; 221};
494 222
495static const intel_limit_t intel_limits_g4x_display_port = { 223static const intel_limit_t intel_limits_g4x_display_port = {
496 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, 224 .dot = { .min = 161670, .max = 227000 },
497 .max = G4X_DOT_DISPLAY_PORT_MAX }, 225 .vco = { .min = 1750000, .max = 3500000},
498 .vco = { .min = G4X_VCO_MIN, 226 .n = { .min = 1, .max = 2 },
499 .max = G4X_VCO_MAX}, 227 .m = { .min = 97, .max = 108 },
500 .n = { .min = G4X_N_DISPLAY_PORT_MIN, 228 .m1 = { .min = 0x10, .max = 0x12 },
501 .max = G4X_N_DISPLAY_PORT_MAX }, 229 .m2 = { .min = 0x05, .max = 0x06 },
502 .m = { .min = G4X_M_DISPLAY_PORT_MIN, 230 .p = { .min = 10, .max = 20 },
503 .max = G4X_M_DISPLAY_PORT_MAX }, 231 .p1 = { .min = 1, .max = 2},
504 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, 232 .p2 = { .dot_limit = 0,
505 .max = G4X_M1_DISPLAY_PORT_MAX }, 233 .p2_slow = 10, .p2_fast = 10 },
506 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
507 .max = G4X_M2_DISPLAY_PORT_MAX },
508 .p = { .min = G4X_P_DISPLAY_PORT_MIN,
509 .max = G4X_P_DISPLAY_PORT_MAX },
510 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
511 .max = G4X_P1_DISPLAY_PORT_MAX},
512 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
513 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
514 .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
515 .find_pll = intel_find_pll_g4x_dp, 234 .find_pll = intel_find_pll_g4x_dp,
516}; 235};
517 236
518static const intel_limit_t intel_limits_pineview_sdvo = { 237static const intel_limit_t intel_limits_pineview_sdvo = {
519 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 238 .dot = { .min = 20000, .max = 400000},
520 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 239 .vco = { .min = 1700000, .max = 3500000 },
521 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 240 /* Pineview's Ncounter is a ring counter */
522 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 241 .n = { .min = 3, .max = 6 },
523 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 242 .m = { .min = 2, .max = 256 },
524 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 243 /* Pineview only has one combined m divider, which we treat as m2. */
525 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 244 .m1 = { .min = 0, .max = 0 },
526 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 245 .m2 = { .min = 0, .max = 254 },
527 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 246 .p = { .min = 5, .max = 80 },
528 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 247 .p1 = { .min = 1, .max = 8 },
248 .p2 = { .dot_limit = 200000,
249 .p2_slow = 10, .p2_fast = 5 },
529 .find_pll = intel_find_best_PLL, 250 .find_pll = intel_find_best_PLL,
530}; 251};
531 252
532static const intel_limit_t intel_limits_pineview_lvds = { 253static const intel_limit_t intel_limits_pineview_lvds = {
533 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 254 .dot = { .min = 20000, .max = 400000 },
534 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 255 .vco = { .min = 1700000, .max = 3500000 },
535 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 256 .n = { .min = 3, .max = 6 },
536 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 257 .m = { .min = 2, .max = 256 },
537 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 258 .m1 = { .min = 0, .max = 0 },
538 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 259 .m2 = { .min = 0, .max = 254 },
539 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, 260 .p = { .min = 7, .max = 112 },
540 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 261 .p1 = { .min = 1, .max = 8 },
541 /* Pineview only supports single-channel mode. */ 262 .p2 = { .dot_limit = 112000,
542 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 263 .p2_slow = 14, .p2_fast = 14 },
543 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
544 .find_pll = intel_find_best_PLL, 264 .find_pll = intel_find_best_PLL,
545}; 265};
546 266
267/* Ironlake / Sandybridge
268 *
269 * We calculate clock using (register_value + 2) for N/M1/M2, so here
270 * the range value for them is (actual_value - 2).
271 */
547static const intel_limit_t intel_limits_ironlake_dac = { 272static const intel_limit_t intel_limits_ironlake_dac = {
548 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 273 .dot = { .min = 25000, .max = 350000 },
549 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 274 .vco = { .min = 1760000, .max = 3510000 },
550 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, 275 .n = { .min = 1, .max = 5 },
551 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, 276 .m = { .min = 79, .max = 127 },
552 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 277 .m1 = { .min = 12, .max = 22 },
553 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 278 .m2 = { .min = 5, .max = 9 },
554 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, 279 .p = { .min = 5, .max = 80 },
555 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, 280 .p1 = { .min = 1, .max = 8 },
556 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 281 .p2 = { .dot_limit = 225000,
557 .p2_slow = IRONLAKE_DAC_P2_SLOW, 282 .p2_slow = 10, .p2_fast = 5 },
558 .p2_fast = IRONLAKE_DAC_P2_FAST },
559 .find_pll = intel_g4x_find_best_PLL, 283 .find_pll = intel_g4x_find_best_PLL,
560}; 284};
561 285
562static const intel_limit_t intel_limits_ironlake_single_lvds = { 286static const intel_limit_t intel_limits_ironlake_single_lvds = {
563 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 287 .dot = { .min = 25000, .max = 350000 },
564 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 288 .vco = { .min = 1760000, .max = 3510000 },
565 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, 289 .n = { .min = 1, .max = 3 },
566 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, 290 .m = { .min = 79, .max = 118 },
567 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 291 .m1 = { .min = 12, .max = 22 },
568 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 292 .m2 = { .min = 5, .max = 9 },
569 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, 293 .p = { .min = 28, .max = 112 },
570 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, 294 .p1 = { .min = 2, .max = 8 },
571 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 295 .p2 = { .dot_limit = 225000,
572 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, 296 .p2_slow = 14, .p2_fast = 14 },
573 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
574 .find_pll = intel_g4x_find_best_PLL, 297 .find_pll = intel_g4x_find_best_PLL,
575}; 298};
576 299
577static const intel_limit_t intel_limits_ironlake_dual_lvds = { 300static const intel_limit_t intel_limits_ironlake_dual_lvds = {
578 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 301 .dot = { .min = 25000, .max = 350000 },
579 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 302 .vco = { .min = 1760000, .max = 3510000 },
580 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, 303 .n = { .min = 1, .max = 3 },
581 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, 304 .m = { .min = 79, .max = 127 },
582 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 305 .m1 = { .min = 12, .max = 22 },
583 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 306 .m2 = { .min = 5, .max = 9 },
584 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, 307 .p = { .min = 14, .max = 56 },
585 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, 308 .p1 = { .min = 2, .max = 8 },
586 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 309 .p2 = { .dot_limit = 225000,
587 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, 310 .p2_slow = 7, .p2_fast = 7 },
588 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
589 .find_pll = intel_g4x_find_best_PLL, 311 .find_pll = intel_g4x_find_best_PLL,
590}; 312};
591 313
314/* LVDS 100mhz refclk limits. */
592static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 315static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
593 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 316 .dot = { .min = 25000, .max = 350000 },
594 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 317 .vco = { .min = 1760000, .max = 3510000 },
595 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, 318 .n = { .min = 1, .max = 2 },
596 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, 319 .m = { .min = 79, .max = 126 },
597 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 320 .m1 = { .min = 12, .max = 22 },
598 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 321 .m2 = { .min = 5, .max = 9 },
599 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, 322 .p = { .min = 28, .max = 112 },
600 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, 323 .p1 = { .min = 2,.max = 8 },
601 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 324 .p2 = { .dot_limit = 225000,
602 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, 325 .p2_slow = 14, .p2_fast = 14 },
603 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
604 .find_pll = intel_g4x_find_best_PLL, 326 .find_pll = intel_g4x_find_best_PLL,
605}; 327};
606 328
607static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 329static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
608 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 330 .dot = { .min = 25000, .max = 350000 },
609 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 331 .vco = { .min = 1760000, .max = 3510000 },
610 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, 332 .n = { .min = 1, .max = 3 },
611 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, 333 .m = { .min = 79, .max = 126 },
612 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 334 .m1 = { .min = 12, .max = 22 },
613 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 335 .m2 = { .min = 5, .max = 9 },
614 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, 336 .p = { .min = 14, .max = 42 },
615 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, 337 .p1 = { .min = 2,.max = 6 },
616 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 338 .p2 = { .dot_limit = 225000,
617 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, 339 .p2_slow = 7, .p2_fast = 7 },
618 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
619 .find_pll = intel_g4x_find_best_PLL, 340 .find_pll = intel_g4x_find_best_PLL,
620}; 341};
621 342
622static const intel_limit_t intel_limits_ironlake_display_port = { 343static const intel_limit_t intel_limits_ironlake_display_port = {
623 .dot = { .min = IRONLAKE_DOT_MIN, 344 .dot = { .min = 25000, .max = 350000 },
624 .max = IRONLAKE_DOT_MAX }, 345 .vco = { .min = 1760000, .max = 3510000},
625 .vco = { .min = IRONLAKE_VCO_MIN, 346 .n = { .min = 1, .max = 2 },
626 .max = IRONLAKE_VCO_MAX}, 347 .m = { .min = 81, .max = 90 },
627 .n = { .min = IRONLAKE_DP_N_MIN, 348 .m1 = { .min = 12, .max = 22 },
628 .max = IRONLAKE_DP_N_MAX }, 349 .m2 = { .min = 5, .max = 9 },
629 .m = { .min = IRONLAKE_DP_M_MIN, 350 .p = { .min = 10, .max = 20 },
630 .max = IRONLAKE_DP_M_MAX }, 351 .p1 = { .min = 1, .max = 2},
631 .m1 = { .min = IRONLAKE_M1_MIN, 352 .p2 = { .dot_limit = 0,
632 .max = IRONLAKE_M1_MAX }, 353 .p2_slow = 10, .p2_fast = 10 },
633 .m2 = { .min = IRONLAKE_M2_MIN,
634 .max = IRONLAKE_M2_MAX },
635 .p = { .min = IRONLAKE_DP_P_MIN,
636 .max = IRONLAKE_DP_P_MAX },
637 .p1 = { .min = IRONLAKE_DP_P1_MIN,
638 .max = IRONLAKE_DP_P1_MAX},
639 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
640 .p2_slow = IRONLAKE_DP_P2_SLOW,
641 .p2_fast = IRONLAKE_DP_P2_FAST },
642 .find_pll = intel_find_pll_ironlake_dp, 354 .find_pll = intel_find_pll_ironlake_dp,
643}; 355};
644 356
@@ -1828,7 +1540,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1828 u32 blt_ecoskpd; 1540 u32 blt_ecoskpd;
1829 1541
1830 /* Make sure blitter notifies FBC of writes */ 1542 /* Make sure blitter notifies FBC of writes */
1831 __gen6_gt_force_wake_get(dev_priv); 1543 gen6_gt_force_wake_get(dev_priv);
1832 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1544 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1833 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1545 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1834 GEN6_BLITTER_LOCK_SHIFT; 1546 GEN6_BLITTER_LOCK_SHIFT;
@@ -1839,7 +1551,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1839 GEN6_BLITTER_LOCK_SHIFT); 1551 GEN6_BLITTER_LOCK_SHIFT);
1840 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1552 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1841 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1553 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1842 __gen6_gt_force_wake_put(dev_priv); 1554 gen6_gt_force_wake_put(dev_priv);
1843} 1555}
1844 1556
1845static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1557static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -2019,6 +1731,11 @@ static void intel_update_fbc(struct drm_device *dev)
2019 intel_fb = to_intel_framebuffer(fb); 1731 intel_fb = to_intel_framebuffer(fb);
2020 obj = intel_fb->obj; 1732 obj = intel_fb->obj;
2021 1733
1734 if (!i915_enable_fbc) {
1735 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
1736 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1737 goto out_disable;
1738 }
2022 if (intel_fb->obj->base.size > dev_priv->cfb_size) { 1739 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
2023 DRM_DEBUG_KMS("framebuffer too large, disabling " 1740 DRM_DEBUG_KMS("framebuffer too large, disabling "
2024 "compression\n"); 1741 "compression\n");
@@ -2339,8 +2056,13 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2339 /* enable normal train */ 2056 /* enable normal train */
2340 reg = FDI_TX_CTL(pipe); 2057 reg = FDI_TX_CTL(pipe);
2341 temp = I915_READ(reg); 2058 temp = I915_READ(reg);
2342 temp &= ~FDI_LINK_TRAIN_NONE; 2059 if (IS_IVYBRIDGE(dev)) {
2343 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2060 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2061 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2062 } else {
2063 temp &= ~FDI_LINK_TRAIN_NONE;
2064 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2065 }
2344 I915_WRITE(reg, temp); 2066 I915_WRITE(reg, temp);
2345 2067
2346 reg = FDI_RX_CTL(pipe); 2068 reg = FDI_RX_CTL(pipe);
@@ -2357,6 +2079,11 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2357 /* wait one idle pattern time */ 2079 /* wait one idle pattern time */
2358 POSTING_READ(reg); 2080 POSTING_READ(reg);
2359 udelay(1000); 2081 udelay(1000);
2082
2083 /* IVB wants error correction enabled */
2084 if (IS_IVYBRIDGE(dev))
2085 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2086 FDI_FE_ERRC_ENABLE);
2360} 2087}
2361 2088
2362/* The FDI link training functions for ILK/Ibexpeak. */ 2089/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2584,7 +2311,116 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2584 DRM_DEBUG_KMS("FDI train done.\n"); 2311 DRM_DEBUG_KMS("FDI train done.\n");
2585} 2312}
2586 2313
2587static void ironlake_fdi_enable(struct drm_crtc *crtc) 2314/* Manual link training for Ivy Bridge A0 parts */
2315static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2316{
2317 struct drm_device *dev = crtc->dev;
2318 struct drm_i915_private *dev_priv = dev->dev_private;
2319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2320 int pipe = intel_crtc->pipe;
2321 u32 reg, temp, i;
2322
2323 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2324 for train result */
2325 reg = FDI_RX_IMR(pipe);
2326 temp = I915_READ(reg);
2327 temp &= ~FDI_RX_SYMBOL_LOCK;
2328 temp &= ~FDI_RX_BIT_LOCK;
2329 I915_WRITE(reg, temp);
2330
2331 POSTING_READ(reg);
2332 udelay(150);
2333
2334 /* enable CPU FDI TX and PCH FDI RX */
2335 reg = FDI_TX_CTL(pipe);
2336 temp = I915_READ(reg);
2337 temp &= ~(7 << 19);
2338 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2339 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2340 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2341 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2342 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2343 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2344
2345 reg = FDI_RX_CTL(pipe);
2346 temp = I915_READ(reg);
2347 temp &= ~FDI_LINK_TRAIN_AUTO;
2348 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2349 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2350 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2351
2352 POSTING_READ(reg);
2353 udelay(150);
2354
2355 for (i = 0; i < 4; i++ ) {
2356 reg = FDI_TX_CTL(pipe);
2357 temp = I915_READ(reg);
2358 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2359 temp |= snb_b_fdi_train_param[i];
2360 I915_WRITE(reg, temp);
2361
2362 POSTING_READ(reg);
2363 udelay(500);
2364
2365 reg = FDI_RX_IIR(pipe);
2366 temp = I915_READ(reg);
2367 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2368
2369 if (temp & FDI_RX_BIT_LOCK ||
2370 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2371 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2372 DRM_DEBUG_KMS("FDI train 1 done.\n");
2373 break;
2374 }
2375 }
2376 if (i == 4)
2377 DRM_ERROR("FDI train 1 fail!\n");
2378
2379 /* Train 2 */
2380 reg = FDI_TX_CTL(pipe);
2381 temp = I915_READ(reg);
2382 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2383 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2384 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2385 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2386 I915_WRITE(reg, temp);
2387
2388 reg = FDI_RX_CTL(pipe);
2389 temp = I915_READ(reg);
2390 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2391 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2392 I915_WRITE(reg, temp);
2393
2394 POSTING_READ(reg);
2395 udelay(150);
2396
2397 for (i = 0; i < 4; i++ ) {
2398 reg = FDI_TX_CTL(pipe);
2399 temp = I915_READ(reg);
2400 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2401 temp |= snb_b_fdi_train_param[i];
2402 I915_WRITE(reg, temp);
2403
2404 POSTING_READ(reg);
2405 udelay(500);
2406
2407 reg = FDI_RX_IIR(pipe);
2408 temp = I915_READ(reg);
2409 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2410
2411 if (temp & FDI_RX_SYMBOL_LOCK) {
2412 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2413 DRM_DEBUG_KMS("FDI train 2 done.\n");
2414 break;
2415 }
2416 }
2417 if (i == 4)
2418 DRM_ERROR("FDI train 2 fail!\n");
2419
2420 DRM_DEBUG_KMS("FDI train done.\n");
2421}
2422
2423static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2588{ 2424{
2589 struct drm_device *dev = crtc->dev; 2425 struct drm_device *dev = crtc->dev;
2590 struct drm_i915_private *dev_priv = dev->dev_private; 2426 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2757,10 +2593,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2757 u32 reg, temp; 2593 u32 reg, temp;
2758 2594
2759 /* For PCH output, training FDI link */ 2595 /* For PCH output, training FDI link */
2760 if (IS_GEN6(dev)) 2596 dev_priv->display.fdi_link_train(crtc);
2761 gen6_fdi_link_train(crtc);
2762 else
2763 ironlake_fdi_link_train(crtc);
2764 2597
2765 intel_enable_pch_pll(dev_priv, pipe); 2598 intel_enable_pch_pll(dev_priv, pipe);
2766 2599
@@ -2850,7 +2683,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2850 is_pch_port = intel_crtc_driving_pch(crtc); 2683 is_pch_port = intel_crtc_driving_pch(crtc);
2851 2684
2852 if (is_pch_port) 2685 if (is_pch_port)
2853 ironlake_fdi_enable(crtc); 2686 ironlake_fdi_pll_enable(crtc);
2854 else 2687 else
2855 ironlake_fdi_disable(crtc); 2688 ironlake_fdi_disable(crtc);
2856 2689
@@ -2873,7 +2706,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2873 ironlake_pch_enable(crtc); 2706 ironlake_pch_enable(crtc);
2874 2707
2875 intel_crtc_load_lut(crtc); 2708 intel_crtc_load_lut(crtc);
2709
2710 mutex_lock(&dev->struct_mutex);
2876 intel_update_fbc(dev); 2711 intel_update_fbc(dev);
2712 mutex_unlock(&dev->struct_mutex);
2713
2877 intel_crtc_update_cursor(crtc, true); 2714 intel_crtc_update_cursor(crtc, true);
2878} 2715}
2879 2716
@@ -2969,8 +2806,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2969 2806
2970 intel_crtc->active = false; 2807 intel_crtc->active = false;
2971 intel_update_watermarks(dev); 2808 intel_update_watermarks(dev);
2809
2810 mutex_lock(&dev->struct_mutex);
2972 intel_update_fbc(dev); 2811 intel_update_fbc(dev);
2973 intel_clear_scanline_wait(dev); 2812 intel_clear_scanline_wait(dev);
2813 mutex_unlock(&dev->struct_mutex);
2974} 2814}
2975 2815
2976static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 2816static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3497,11 +3337,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3497 1000; 3337 1000;
3498 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 3338 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3499 3339
3500 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 3340 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3501 3341
3502 wm_size = fifo_size - (entries_required + wm->guard_size); 3342 wm_size = fifo_size - (entries_required + wm->guard_size);
3503 3343
3504 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 3344 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3505 3345
3506 /* Don't promote wm_size to unsigned... */ 3346 /* Don't promote wm_size to unsigned... */
3507 if (wm_size > (long)wm->max_wm) 3347 if (wm_size > (long)wm->max_wm)
@@ -3823,13 +3663,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
3823 display_wm, cursor_wm); 3663 display_wm, cursor_wm);
3824 3664
3825 if (display_wm > display->max_wm) { 3665 if (display_wm > display->max_wm) {
3826 DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n", 3666 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3827 display_wm, display->max_wm); 3667 display_wm, display->max_wm);
3828 return false; 3668 return false;
3829 } 3669 }
3830 3670
3831 if (cursor_wm > cursor->max_wm) { 3671 if (cursor_wm > cursor->max_wm) {
3832 DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n", 3672 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3833 cursor_wm, cursor->max_wm); 3673 cursor_wm, cursor->max_wm);
3834 return false; 3674 return false;
3835 } 3675 }
@@ -4516,34 +4356,28 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4516 return dev_priv->lvds_use_ssc && i915_panel_use_ssc; 4356 return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
4517} 4357}
4518 4358
4519static int intel_crtc_mode_set(struct drm_crtc *crtc, 4359static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4520 struct drm_display_mode *mode, 4360 struct drm_display_mode *mode,
4521 struct drm_display_mode *adjusted_mode, 4361 struct drm_display_mode *adjusted_mode,
4522 int x, int y, 4362 int x, int y,
4523 struct drm_framebuffer *old_fb) 4363 struct drm_framebuffer *old_fb)
4524{ 4364{
4525 struct drm_device *dev = crtc->dev; 4365 struct drm_device *dev = crtc->dev;
4526 struct drm_i915_private *dev_priv = dev->dev_private; 4366 struct drm_i915_private *dev_priv = dev->dev_private;
4527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4528 int pipe = intel_crtc->pipe; 4368 int pipe = intel_crtc->pipe;
4529 int plane = intel_crtc->plane; 4369 int plane = intel_crtc->plane;
4530 u32 fp_reg, dpll_reg;
4531 int refclk, num_connectors = 0; 4370 int refclk, num_connectors = 0;
4532 intel_clock_t clock, reduced_clock; 4371 intel_clock_t clock, reduced_clock;
4533 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4372 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4534 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 4373 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4535 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4374 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4536 struct intel_encoder *has_edp_encoder = NULL;
4537 struct drm_mode_config *mode_config = &dev->mode_config; 4375 struct drm_mode_config *mode_config = &dev->mode_config;
4538 struct intel_encoder *encoder; 4376 struct intel_encoder *encoder;
4539 const intel_limit_t *limit; 4377 const intel_limit_t *limit;
4540 int ret; 4378 int ret;
4541 struct fdi_m_n m_n = {0}; 4379 u32 temp;
4542 u32 reg, temp;
4543 u32 lvds_sync = 0; 4380 u32 lvds_sync = 0;
4544 int target_clock;
4545
4546 drm_vblank_pre_modeset(dev, pipe);
4547 4381
4548 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4382 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4549 if (encoder->base.crtc != crtc) 4383 if (encoder->base.crtc != crtc)
@@ -4571,9 +4405,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4571 case INTEL_OUTPUT_DISPLAYPORT: 4405 case INTEL_OUTPUT_DISPLAYPORT:
4572 is_dp = true; 4406 is_dp = true;
4573 break; 4407 break;
4574 case INTEL_OUTPUT_EDP:
4575 has_edp_encoder = encoder;
4576 break;
4577 } 4408 }
4578 4409
4579 num_connectors++; 4410 num_connectors++;
@@ -4585,9 +4416,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4585 refclk / 1000); 4416 refclk / 1000);
4586 } else if (!IS_GEN2(dev)) { 4417 } else if (!IS_GEN2(dev)) {
4587 refclk = 96000; 4418 refclk = 96000;
4588 if (HAS_PCH_SPLIT(dev) &&
4589 (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
4590 refclk = 120000; /* 120Mhz refclk */
4591 } else { 4419 } else {
4592 refclk = 48000; 4420 refclk = 48000;
4593 } 4421 }
@@ -4601,7 +4429,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4601 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 4429 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4602 if (!ok) { 4430 if (!ok) {
4603 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4431 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4604 drm_vblank_post_modeset(dev, pipe);
4605 return -EINVAL; 4432 return -EINVAL;
4606 } 4433 }
4607 4434
@@ -4645,143 +4472,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4645 } 4472 }
4646 } 4473 }
4647 4474
4648 /* FDI link */
4649 if (HAS_PCH_SPLIT(dev)) {
4650 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4651 int lane = 0, link_bw, bpp;
4652 /* CPU eDP doesn't require FDI link, so just set DP M/N
4653 according to current link config */
4654 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4655 target_clock = mode->clock;
4656 intel_edp_link_config(has_edp_encoder,
4657 &lane, &link_bw);
4658 } else {
4659 /* [e]DP over FDI requires target mode clock
4660 instead of link clock */
4661 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4662 target_clock = mode->clock;
4663 else
4664 target_clock = adjusted_mode->clock;
4665
4666 /* FDI is a binary signal running at ~2.7GHz, encoding
4667 * each output octet as 10 bits. The actual frequency
4668 * is stored as a divider into a 100MHz clock, and the
4669 * mode pixel clock is stored in units of 1KHz.
4670 * Hence the bw of each lane in terms of the mode signal
4671 * is:
4672 */
4673 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4674 }
4675
4676 /* determine panel color depth */
4677 temp = I915_READ(PIPECONF(pipe));
4678 temp &= ~PIPE_BPC_MASK;
4679 if (is_lvds) {
4680 /* the BPC will be 6 if it is 18-bit LVDS panel */
4681 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
4682 temp |= PIPE_8BPC;
4683 else
4684 temp |= PIPE_6BPC;
4685 } else if (has_edp_encoder) {
4686 switch (dev_priv->edp.bpp/3) {
4687 case 8:
4688 temp |= PIPE_8BPC;
4689 break;
4690 case 10:
4691 temp |= PIPE_10BPC;
4692 break;
4693 case 6:
4694 temp |= PIPE_6BPC;
4695 break;
4696 case 12:
4697 temp |= PIPE_12BPC;
4698 break;
4699 }
4700 } else
4701 temp |= PIPE_8BPC;
4702 I915_WRITE(PIPECONF(pipe), temp);
4703
4704 switch (temp & PIPE_BPC_MASK) {
4705 case PIPE_8BPC:
4706 bpp = 24;
4707 break;
4708 case PIPE_10BPC:
4709 bpp = 30;
4710 break;
4711 case PIPE_6BPC:
4712 bpp = 18;
4713 break;
4714 case PIPE_12BPC:
4715 bpp = 36;
4716 break;
4717 default:
4718 DRM_ERROR("unknown pipe bpc value\n");
4719 bpp = 24;
4720 }
4721
4722 if (!lane) {
4723 /*
4724 * Account for spread spectrum to avoid
4725 * oversubscribing the link. Max center spread
4726 * is 2.5%; use 5% for safety's sake.
4727 */
4728 u32 bps = target_clock * bpp * 21 / 20;
4729 lane = bps / (link_bw * 8) + 1;
4730 }
4731
4732 intel_crtc->fdi_lanes = lane;
4733
4734 if (pixel_multiplier > 1)
4735 link_bw *= pixel_multiplier;
4736 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
4737 }
4738
4739 /* Ironlake: try to setup display ref clock before DPLL
4740 * enabling. This is only under driver's control after
4741 * PCH B stepping, previous chipset stepping should be
4742 * ignoring this setting.
4743 */
4744 if (HAS_PCH_SPLIT(dev)) {
4745 temp = I915_READ(PCH_DREF_CONTROL);
4746 /* Always enable nonspread source */
4747 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4748 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4749 temp &= ~DREF_SSC_SOURCE_MASK;
4750 temp |= DREF_SSC_SOURCE_ENABLE;
4751 I915_WRITE(PCH_DREF_CONTROL, temp);
4752
4753 POSTING_READ(PCH_DREF_CONTROL);
4754 udelay(200);
4755
4756 if (has_edp_encoder) {
4757 if (intel_panel_use_ssc(dev_priv)) {
4758 temp |= DREF_SSC1_ENABLE;
4759 I915_WRITE(PCH_DREF_CONTROL, temp);
4760
4761 POSTING_READ(PCH_DREF_CONTROL);
4762 udelay(200);
4763 }
4764 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4765
4766 /* Enable CPU source on CPU attached eDP */
4767 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4768 if (intel_panel_use_ssc(dev_priv))
4769 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4770 else
4771 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4772 } else {
4773 /* Enable SSC on PCH eDP if needed */
4774 if (intel_panel_use_ssc(dev_priv)) {
4775 DRM_ERROR("enabling SSC on PCH\n");
4776 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4777 }
4778 }
4779 I915_WRITE(PCH_DREF_CONTROL, temp);
4780 POSTING_READ(PCH_DREF_CONTROL);
4781 udelay(200);
4782 }
4783 }
4784
4785 if (IS_PINEVIEW(dev)) { 4475 if (IS_PINEVIEW(dev)) {
4786 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 4476 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4787 if (has_reduced_clock) 4477 if (has_reduced_clock)
@@ -4794,25 +4484,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4794 reduced_clock.m2; 4484 reduced_clock.m2;
4795 } 4485 }
4796 4486
4797 /* Enable autotuning of the PLL clock (if permissible) */ 4487 dpll = DPLL_VGA_MODE_DIS;
4798 if (HAS_PCH_SPLIT(dev)) {
4799 int factor = 21;
4800
4801 if (is_lvds) {
4802 if ((intel_panel_use_ssc(dev_priv) &&
4803 dev_priv->lvds_ssc_freq == 100) ||
4804 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4805 factor = 25;
4806 } else if (is_sdvo && is_tv)
4807 factor = 20;
4808
4809 if (clock.m1 < factor * clock.n)
4810 fp |= FP_CB_TUNE;
4811 }
4812
4813 dpll = 0;
4814 if (!HAS_PCH_SPLIT(dev))
4815 dpll = DPLL_VGA_MODE_DIS;
4816 4488
4817 if (!IS_GEN2(dev)) { 4489 if (!IS_GEN2(dev)) {
4818 if (is_lvds) 4490 if (is_lvds)
@@ -4824,12 +4496,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4824 if (pixel_multiplier > 1) { 4496 if (pixel_multiplier > 1) {
4825 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4497 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4826 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4498 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4827 else if (HAS_PCH_SPLIT(dev))
4828 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4829 } 4499 }
4830 dpll |= DPLL_DVO_HIGH_SPEED; 4500 dpll |= DPLL_DVO_HIGH_SPEED;
4831 } 4501 }
4832 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4502 if (is_dp)
4833 dpll |= DPLL_DVO_HIGH_SPEED; 4503 dpll |= DPLL_DVO_HIGH_SPEED;
4834 4504
4835 /* compute bitmask from p1 value */ 4505 /* compute bitmask from p1 value */
@@ -4837,9 +4507,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4837 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 4507 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4838 else { 4508 else {
4839 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4509 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4840 /* also FPA1 */
4841 if (HAS_PCH_SPLIT(dev))
4842 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4843 if (IS_G4X(dev) && has_reduced_clock) 4510 if (IS_G4X(dev) && has_reduced_clock)
4844 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4511 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4845 } 4512 }
@@ -4857,7 +4524,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4857 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4524 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4858 break; 4525 break;
4859 } 4526 }
4860 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 4527 if (INTEL_INFO(dev)->gen >= 4)
4861 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4528 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4862 } else { 4529 } else {
4863 if (is_lvds) { 4530 if (is_lvds) {
@@ -4891,12 +4558,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4891 4558
4892 /* Ironlake's plane is forced to pipe, bit 24 is to 4559 /* Ironlake's plane is forced to pipe, bit 24 is to
4893 enable color space conversion */ 4560 enable color space conversion */
4894 if (!HAS_PCH_SPLIT(dev)) { 4561 if (pipe == 0)
4895 if (pipe == 0) 4562 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4896 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4563 else
4897 else 4564 dspcntr |= DISPPLANE_SEL_PIPE_B;
4898 dspcntr |= DISPPLANE_SEL_PIPE_B;
4899 }
4900 4565
4901 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4566 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4902 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4567 /* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -4912,27 +4577,506 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4912 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4577 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4913 } 4578 }
4914 4579
4915 if (!HAS_PCH_SPLIT(dev)) 4580 dpll |= DPLL_VCO_ENABLE;
4916 dpll |= DPLL_VCO_ENABLE;
4917 4581
4918 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4582 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4919 drm_mode_debug_printmodeline(mode); 4583 drm_mode_debug_printmodeline(mode);
4920 4584
4921 /* assign to Ironlake registers */ 4585 I915_WRITE(FP0(pipe), fp);
4922 if (HAS_PCH_SPLIT(dev)) { 4586 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4923 fp_reg = PCH_FP0(pipe); 4587
4924 dpll_reg = PCH_DPLL(pipe); 4588 POSTING_READ(DPLL(pipe));
4589 udelay(150);
4590
4591 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4592 * This is an exception to the general rule that mode_set doesn't turn
4593 * things on.
4594 */
4595 if (is_lvds) {
4596 temp = I915_READ(LVDS);
4597 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4598 if (pipe == 1) {
4599 temp |= LVDS_PIPEB_SELECT;
4600 } else {
4601 temp &= ~LVDS_PIPEB_SELECT;
4602 }
4603 /* set the corresponsding LVDS_BORDER bit */
4604 temp |= dev_priv->lvds_border_bits;
4605 /* Set the B0-B3 data pairs corresponding to whether we're going to
4606 * set the DPLLs for dual-channel mode or not.
4607 */
4608 if (clock.p2 == 7)
4609 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4610 else
4611 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4612
4613 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4614 * appropriately here, but we need to look more thoroughly into how
4615 * panels behave in the two modes.
4616 */
4617 /* set the dithering flag on LVDS as needed */
4618 if (INTEL_INFO(dev)->gen >= 4) {
4619 if (dev_priv->lvds_dither)
4620 temp |= LVDS_ENABLE_DITHER;
4621 else
4622 temp &= ~LVDS_ENABLE_DITHER;
4623 }
4624 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4625 lvds_sync |= LVDS_HSYNC_POLARITY;
4626 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4627 lvds_sync |= LVDS_VSYNC_POLARITY;
4628 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4629 != lvds_sync) {
4630 char flags[2] = "-+";
4631 DRM_INFO("Changing LVDS panel from "
4632 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4633 flags[!(temp & LVDS_HSYNC_POLARITY)],
4634 flags[!(temp & LVDS_VSYNC_POLARITY)],
4635 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4636 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4637 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4638 temp |= lvds_sync;
4639 }
4640 I915_WRITE(LVDS, temp);
4641 }
4642
4643 if (is_dp) {
4644 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4645 }
4646
4647 I915_WRITE(DPLL(pipe), dpll);
4648
4649 /* Wait for the clocks to stabilize. */
4650 POSTING_READ(DPLL(pipe));
4651 udelay(150);
4652
4653 if (INTEL_INFO(dev)->gen >= 4) {
4654 temp = 0;
4655 if (is_sdvo) {
4656 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4657 if (temp > 1)
4658 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4659 else
4660 temp = 0;
4661 }
4662 I915_WRITE(DPLL_MD(pipe), temp);
4663 } else {
4664 /* The pixel multiplier can only be updated once the
4665 * DPLL is enabled and the clocks are stable.
4666 *
4667 * So write it again.
4668 */
4669 I915_WRITE(DPLL(pipe), dpll);
4670 }
4671
4672 intel_crtc->lowfreq_avail = false;
4673 if (is_lvds && has_reduced_clock && i915_powersave) {
4674 I915_WRITE(FP1(pipe), fp2);
4675 intel_crtc->lowfreq_avail = true;
4676 if (HAS_PIPE_CXSR(dev)) {
4677 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4678 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4679 }
4680 } else {
4681 I915_WRITE(FP1(pipe), fp);
4682 if (HAS_PIPE_CXSR(dev)) {
4683 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4684 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4685 }
4686 }
4687
4688 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4689 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4690 /* the chip adds 2 halflines automatically */
4691 adjusted_mode->crtc_vdisplay -= 1;
4692 adjusted_mode->crtc_vtotal -= 1;
4693 adjusted_mode->crtc_vblank_start -= 1;
4694 adjusted_mode->crtc_vblank_end -= 1;
4695 adjusted_mode->crtc_vsync_end -= 1;
4696 adjusted_mode->crtc_vsync_start -= 1;
4697 } else
4698 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4699
4700 I915_WRITE(HTOTAL(pipe),
4701 (adjusted_mode->crtc_hdisplay - 1) |
4702 ((adjusted_mode->crtc_htotal - 1) << 16));
4703 I915_WRITE(HBLANK(pipe),
4704 (adjusted_mode->crtc_hblank_start - 1) |
4705 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4706 I915_WRITE(HSYNC(pipe),
4707 (adjusted_mode->crtc_hsync_start - 1) |
4708 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4709
4710 I915_WRITE(VTOTAL(pipe),
4711 (adjusted_mode->crtc_vdisplay - 1) |
4712 ((adjusted_mode->crtc_vtotal - 1) << 16));
4713 I915_WRITE(VBLANK(pipe),
4714 (adjusted_mode->crtc_vblank_start - 1) |
4715 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4716 I915_WRITE(VSYNC(pipe),
4717 (adjusted_mode->crtc_vsync_start - 1) |
4718 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4719
4720 /* pipesrc and dspsize control the size that is scaled from,
4721 * which should always be the user's requested size.
4722 */
4723 I915_WRITE(DSPSIZE(plane),
4724 ((mode->vdisplay - 1) << 16) |
4725 (mode->hdisplay - 1));
4726 I915_WRITE(DSPPOS(plane), 0);
4727 I915_WRITE(PIPESRC(pipe),
4728 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4729
4730 I915_WRITE(PIPECONF(pipe), pipeconf);
4731 POSTING_READ(PIPECONF(pipe));
4732 intel_enable_pipe(dev_priv, pipe, false);
4733
4734 intel_wait_for_vblank(dev, pipe);
4735
4736 I915_WRITE(DSPCNTR(plane), dspcntr);
4737 POSTING_READ(DSPCNTR(plane));
4738
4739 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4740
4741 intel_update_watermarks(dev);
4742
4743 return ret;
4744}
4745
4746static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4747 struct drm_display_mode *mode,
4748 struct drm_display_mode *adjusted_mode,
4749 int x, int y,
4750 struct drm_framebuffer *old_fb)
4751{
4752 struct drm_device *dev = crtc->dev;
4753 struct drm_i915_private *dev_priv = dev->dev_private;
4754 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4755 int pipe = intel_crtc->pipe;
4756 int plane = intel_crtc->plane;
4757 int refclk, num_connectors = 0;
4758 intel_clock_t clock, reduced_clock;
4759 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4760 bool ok, has_reduced_clock = false, is_sdvo = false;
4761 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4762 struct intel_encoder *has_edp_encoder = NULL;
4763 struct drm_mode_config *mode_config = &dev->mode_config;
4764 struct intel_encoder *encoder;
4765 const intel_limit_t *limit;
4766 int ret;
4767 struct fdi_m_n m_n = {0};
4768 u32 temp;
4769 u32 lvds_sync = 0;
4770 int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
4771
4772 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4773 if (encoder->base.crtc != crtc)
4774 continue;
4775
4776 switch (encoder->type) {
4777 case INTEL_OUTPUT_LVDS:
4778 is_lvds = true;
4779 break;
4780 case INTEL_OUTPUT_SDVO:
4781 case INTEL_OUTPUT_HDMI:
4782 is_sdvo = true;
4783 if (encoder->needs_tv_clock)
4784 is_tv = true;
4785 break;
4786 case INTEL_OUTPUT_TVOUT:
4787 is_tv = true;
4788 break;
4789 case INTEL_OUTPUT_ANALOG:
4790 is_crt = true;
4791 break;
4792 case INTEL_OUTPUT_DISPLAYPORT:
4793 is_dp = true;
4794 break;
4795 case INTEL_OUTPUT_EDP:
4796 has_edp_encoder = encoder;
4797 break;
4798 }
4799
4800 num_connectors++;
4801 }
4802
4803 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4804 refclk = dev_priv->lvds_ssc_freq * 1000;
4805 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4806 refclk / 1000);
4807 } else {
4808 refclk = 96000;
4809 if (!has_edp_encoder ||
4810 intel_encoder_is_pch_edp(&has_edp_encoder->base))
4811 refclk = 120000; /* 120Mhz refclk */
4812 }
4813
4814 /*
4815 * Returns a set of divisors for the desired target clock with the given
4816 * refclk, or FALSE. The returned values represent the clock equation:
4817 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4818 */
4819 limit = intel_limit(crtc, refclk);
4820 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4821 if (!ok) {
4822 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4823 return -EINVAL;
4824 }
4825
4826 /* Ensure that the cursor is valid for the new mode before changing... */
4827 intel_crtc_update_cursor(crtc, true);
4828
4829 if (is_lvds && dev_priv->lvds_downclock_avail) {
4830 has_reduced_clock = limit->find_pll(limit, crtc,
4831 dev_priv->lvds_downclock,
4832 refclk,
4833 &reduced_clock);
4834 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4835 /*
4836 * If the different P is found, it means that we can't
4837 * switch the display clock by using the FP0/FP1.
4838 * In such case we will disable the LVDS downclock
4839 * feature.
4840 */
4841 DRM_DEBUG_KMS("Different P is found for "
4842 "LVDS clock/downclock\n");
4843 has_reduced_clock = 0;
4844 }
4845 }
4846 /* SDVO TV has fixed PLL values depend on its clock range,
4847 this mirrors vbios setting. */
4848 if (is_sdvo && is_tv) {
4849 if (adjusted_mode->clock >= 100000
4850 && adjusted_mode->clock < 140500) {
4851 clock.p1 = 2;
4852 clock.p2 = 10;
4853 clock.n = 3;
4854 clock.m1 = 16;
4855 clock.m2 = 8;
4856 } else if (adjusted_mode->clock >= 140500
4857 && adjusted_mode->clock <= 200000) {
4858 clock.p1 = 1;
4859 clock.p2 = 10;
4860 clock.n = 6;
4861 clock.m1 = 12;
4862 clock.m2 = 8;
4863 }
4864 }
4865
4866 /* FDI link */
4867 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4868 lane = 0;
4869 /* CPU eDP doesn't require FDI link, so just set DP M/N
4870 according to current link config */
4871 if (has_edp_encoder &&
4872 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4873 target_clock = mode->clock;
4874 intel_edp_link_config(has_edp_encoder,
4875 &lane, &link_bw);
4925 } else { 4876 } else {
4926 fp_reg = FP0(pipe); 4877 /* [e]DP over FDI requires target mode clock
4927 dpll_reg = DPLL(pipe); 4878 instead of link clock */
4879 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4880 target_clock = mode->clock;
4881 else
4882 target_clock = adjusted_mode->clock;
4883
4884 /* FDI is a binary signal running at ~2.7GHz, encoding
4885 * each output octet as 10 bits. The actual frequency
4886 * is stored as a divider into a 100MHz clock, and the
4887 * mode pixel clock is stored in units of 1KHz.
4888 * Hence the bw of each lane in terms of the mode signal
4889 * is:
4890 */
4891 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4892 }
4893
4894 /* determine panel color depth */
4895 temp = I915_READ(PIPECONF(pipe));
4896 temp &= ~PIPE_BPC_MASK;
4897 if (is_lvds) {
4898 /* the BPC will be 6 if it is 18-bit LVDS panel */
4899 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
4900 temp |= PIPE_8BPC;
4901 else
4902 temp |= PIPE_6BPC;
4903 } else if (has_edp_encoder) {
4904 switch (dev_priv->edp.bpp/3) {
4905 case 8:
4906 temp |= PIPE_8BPC;
4907 break;
4908 case 10:
4909 temp |= PIPE_10BPC;
4910 break;
4911 case 6:
4912 temp |= PIPE_6BPC;
4913 break;
4914 case 12:
4915 temp |= PIPE_12BPC;
4916 break;
4917 }
4918 } else
4919 temp |= PIPE_8BPC;
4920 I915_WRITE(PIPECONF(pipe), temp);
4921
4922 switch (temp & PIPE_BPC_MASK) {
4923 case PIPE_8BPC:
4924 bpp = 24;
4925 break;
4926 case PIPE_10BPC:
4927 bpp = 30;
4928 break;
4929 case PIPE_6BPC:
4930 bpp = 18;
4931 break;
4932 case PIPE_12BPC:
4933 bpp = 36;
4934 break;
4935 default:
4936 DRM_ERROR("unknown pipe bpc value\n");
4937 bpp = 24;
4938 }
4939
4940 if (!lane) {
4941 /*
4942 * Account for spread spectrum to avoid
4943 * oversubscribing the link. Max center spread
4944 * is 2.5%; use 5% for safety's sake.
4945 */
4946 u32 bps = target_clock * bpp * 21 / 20;
4947 lane = bps / (link_bw * 8) + 1;
4928 } 4948 }
4929 4949
4950 intel_crtc->fdi_lanes = lane;
4951
4952 if (pixel_multiplier > 1)
4953 link_bw *= pixel_multiplier;
4954 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
4955
4956 /* Ironlake: try to setup display ref clock before DPLL
4957 * enabling. This is only under driver's control after
4958 * PCH B stepping, previous chipset stepping should be
4959 * ignoring this setting.
4960 */
4961 temp = I915_READ(PCH_DREF_CONTROL);
4962 /* Always enable nonspread source */
4963 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4964 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4965 temp &= ~DREF_SSC_SOURCE_MASK;
4966 temp |= DREF_SSC_SOURCE_ENABLE;
4967 I915_WRITE(PCH_DREF_CONTROL, temp);
4968
4969 POSTING_READ(PCH_DREF_CONTROL);
4970 udelay(200);
4971
4972 if (has_edp_encoder) {
4973 if (intel_panel_use_ssc(dev_priv)) {
4974 temp |= DREF_SSC1_ENABLE;
4975 I915_WRITE(PCH_DREF_CONTROL, temp);
4976
4977 POSTING_READ(PCH_DREF_CONTROL);
4978 udelay(200);
4979 }
4980 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4981
4982 /* Enable CPU source on CPU attached eDP */
4983 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4984 if (intel_panel_use_ssc(dev_priv))
4985 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4986 else
4987 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4988 } else {
4989 /* Enable SSC on PCH eDP if needed */
4990 if (intel_panel_use_ssc(dev_priv)) {
4991 DRM_ERROR("enabling SSC on PCH\n");
4992 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4993 }
4994 }
4995 I915_WRITE(PCH_DREF_CONTROL, temp);
4996 POSTING_READ(PCH_DREF_CONTROL);
4997 udelay(200);
4998 }
4999
5000 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5001 if (has_reduced_clock)
5002 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5003 reduced_clock.m2;
5004
5005 /* Enable autotuning of the PLL clock (if permissible) */
5006 factor = 21;
5007 if (is_lvds) {
5008 if ((intel_panel_use_ssc(dev_priv) &&
5009 dev_priv->lvds_ssc_freq == 100) ||
5010 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5011 factor = 25;
5012 } else if (is_sdvo && is_tv)
5013 factor = 20;
5014
5015 if (clock.m1 < factor * clock.n)
5016 fp |= FP_CB_TUNE;
5017
5018 dpll = 0;
5019
5020 if (is_lvds)
5021 dpll |= DPLLB_MODE_LVDS;
5022 else
5023 dpll |= DPLLB_MODE_DAC_SERIAL;
5024 if (is_sdvo) {
5025 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5026 if (pixel_multiplier > 1) {
5027 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5028 }
5029 dpll |= DPLL_DVO_HIGH_SPEED;
5030 }
5031 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5032 dpll |= DPLL_DVO_HIGH_SPEED;
5033
5034 /* compute bitmask from p1 value */
5035 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5036 /* also FPA1 */
5037 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5038
5039 switch (clock.p2) {
5040 case 5:
5041 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5042 break;
5043 case 7:
5044 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5045 break;
5046 case 10:
5047 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5048 break;
5049 case 14:
5050 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5051 break;
5052 }
5053
5054 if (is_sdvo && is_tv)
5055 dpll |= PLL_REF_INPUT_TVCLKINBC;
5056 else if (is_tv)
5057 /* XXX: just matching BIOS for now */
5058 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5059 dpll |= 3;
5060 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5061 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5062 else
5063 dpll |= PLL_REF_INPUT_DREFCLK;
5064
5065 /* setup pipeconf */
5066 pipeconf = I915_READ(PIPECONF(pipe));
5067
5068 /* Set up the display plane register */
5069 dspcntr = DISPPLANE_GAMMA_ENABLE;
5070
5071 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5072 drm_mode_debug_printmodeline(mode);
5073
4930 /* PCH eDP needs FDI, but CPU eDP does not */ 5074 /* PCH eDP needs FDI, but CPU eDP does not */
4931 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5075 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4932 I915_WRITE(fp_reg, fp); 5076 I915_WRITE(PCH_FP0(pipe), fp);
4933 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 5077 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4934 5078
4935 POSTING_READ(dpll_reg); 5079 POSTING_READ(PCH_DPLL(pipe));
4936 udelay(150); 5080 udelay(150);
4937 } 5081 }
4938 5082
@@ -4964,11 +5108,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4964 * things on. 5108 * things on.
4965 */ 5109 */
4966 if (is_lvds) { 5110 if (is_lvds) {
4967 reg = LVDS; 5111 temp = I915_READ(PCH_LVDS);
4968 if (HAS_PCH_SPLIT(dev))
4969 reg = PCH_LVDS;
4970
4971 temp = I915_READ(reg);
4972 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5112 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4973 if (pipe == 1) { 5113 if (pipe == 1) {
4974 if (HAS_PCH_CPT(dev)) 5114 if (HAS_PCH_CPT(dev))
@@ -4995,13 +5135,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4995 * appropriately here, but we need to look more thoroughly into how 5135 * appropriately here, but we need to look more thoroughly into how
4996 * panels behave in the two modes. 5136 * panels behave in the two modes.
4997 */ 5137 */
4998 /* set the dithering flag on non-PCH LVDS as needed */
4999 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
5000 if (dev_priv->lvds_dither)
5001 temp |= LVDS_ENABLE_DITHER;
5002 else
5003 temp &= ~LVDS_ENABLE_DITHER;
5004 }
5005 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5138 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5006 lvds_sync |= LVDS_HSYNC_POLARITY; 5139 lvds_sync |= LVDS_HSYNC_POLARITY;
5007 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5140 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -5018,22 +5151,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5018 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5151 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5019 temp |= lvds_sync; 5152 temp |= lvds_sync;
5020 } 5153 }
5021 I915_WRITE(reg, temp); 5154 I915_WRITE(PCH_LVDS, temp);
5022 } 5155 }
5023 5156
5024 /* set the dithering flag and clear for anything other than a panel. */ 5157 /* set the dithering flag and clear for anything other than a panel. */
5025 if (HAS_PCH_SPLIT(dev)) { 5158 pipeconf &= ~PIPECONF_DITHER_EN;
5026 pipeconf &= ~PIPECONF_DITHER_EN; 5159 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5027 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5160 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
5028 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5161 pipeconf |= PIPECONF_DITHER_EN;
5029 pipeconf |= PIPECONF_DITHER_EN; 5162 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5030 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5031 }
5032 } 5163 }
5033 5164
5034 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5165 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5035 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5166 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5036 } else if (HAS_PCH_SPLIT(dev)) { 5167 } else {
5037 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5168 /* For non-DP output, clear any trans DP clock recovery setting.*/
5038 I915_WRITE(TRANSDATA_M1(pipe), 0); 5169 I915_WRITE(TRANSDATA_M1(pipe), 0);
5039 I915_WRITE(TRANSDATA_N1(pipe), 0); 5170 I915_WRITE(TRANSDATA_N1(pipe), 0);
@@ -5041,43 +5172,32 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5041 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5172 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5042 } 5173 }
5043 5174
5044 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5175 if (!has_edp_encoder ||
5045 I915_WRITE(dpll_reg, dpll); 5176 intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5177 I915_WRITE(PCH_DPLL(pipe), dpll);
5046 5178
5047 /* Wait for the clocks to stabilize. */ 5179 /* Wait for the clocks to stabilize. */
5048 POSTING_READ(dpll_reg); 5180 POSTING_READ(PCH_DPLL(pipe));
5049 udelay(150); 5181 udelay(150);
5050 5182
5051 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 5183 /* The pixel multiplier can only be updated once the
5052 temp = 0; 5184 * DPLL is enabled and the clocks are stable.
5053 if (is_sdvo) { 5185 *
5054 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 5186 * So write it again.
5055 if (temp > 1) 5187 */
5056 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5188 I915_WRITE(PCH_DPLL(pipe), dpll);
5057 else
5058 temp = 0;
5059 }
5060 I915_WRITE(DPLL_MD(pipe), temp);
5061 } else {
5062 /* The pixel multiplier can only be updated once the
5063 * DPLL is enabled and the clocks are stable.
5064 *
5065 * So write it again.
5066 */
5067 I915_WRITE(dpll_reg, dpll);
5068 }
5069 } 5189 }
5070 5190
5071 intel_crtc->lowfreq_avail = false; 5191 intel_crtc->lowfreq_avail = false;
5072 if (is_lvds && has_reduced_clock && i915_powersave) { 5192 if (is_lvds && has_reduced_clock && i915_powersave) {
5073 I915_WRITE(fp_reg + 4, fp2); 5193 I915_WRITE(PCH_FP1(pipe), fp2);
5074 intel_crtc->lowfreq_avail = true; 5194 intel_crtc->lowfreq_avail = true;
5075 if (HAS_PIPE_CXSR(dev)) { 5195 if (HAS_PIPE_CXSR(dev)) {
5076 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5196 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5077 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 5197 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5078 } 5198 }
5079 } else { 5199 } else {
5080 I915_WRITE(fp_reg + 4, fp); 5200 I915_WRITE(PCH_FP1(pipe), fp);
5081 if (HAS_PIPE_CXSR(dev)) { 5201 if (HAS_PIPE_CXSR(dev)) {
5082 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 5202 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5083 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 5203 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -5116,33 +5236,24 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5116 (adjusted_mode->crtc_vsync_start - 1) | 5236 (adjusted_mode->crtc_vsync_start - 1) |
5117 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 5237 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5118 5238
5119 /* pipesrc and dspsize control the size that is scaled from, 5239 /* pipesrc controls the size that is scaled from, which should
5120 * which should always be the user's requested size. 5240 * always be the user's requested size.
5121 */ 5241 */
5122 if (!HAS_PCH_SPLIT(dev)) {
5123 I915_WRITE(DSPSIZE(plane),
5124 ((mode->vdisplay - 1) << 16) |
5125 (mode->hdisplay - 1));
5126 I915_WRITE(DSPPOS(plane), 0);
5127 }
5128 I915_WRITE(PIPESRC(pipe), 5242 I915_WRITE(PIPESRC(pipe),
5129 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 5243 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5130 5244
5131 if (HAS_PCH_SPLIT(dev)) { 5245 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5132 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 5246 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5133 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 5247 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5134 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 5248 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5135 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5136 5249
5137 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5250 if (has_edp_encoder &&
5138 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5251 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5139 } 5252 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5140 } 5253 }
5141 5254
5142 I915_WRITE(PIPECONF(pipe), pipeconf); 5255 I915_WRITE(PIPECONF(pipe), pipeconf);
5143 POSTING_READ(PIPECONF(pipe)); 5256 POSTING_READ(PIPECONF(pipe));
5144 if (!HAS_PCH_SPLIT(dev))
5145 intel_enable_pipe(dev_priv, pipe, false);
5146 5257
5147 intel_wait_for_vblank(dev, pipe); 5258 intel_wait_for_vblank(dev, pipe);
5148 5259
@@ -5161,6 +5272,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5161 5272
5162 intel_update_watermarks(dev); 5273 intel_update_watermarks(dev);
5163 5274
5275 return ret;
5276}
5277
5278static int intel_crtc_mode_set(struct drm_crtc *crtc,
5279 struct drm_display_mode *mode,
5280 struct drm_display_mode *adjusted_mode,
5281 int x, int y,
5282 struct drm_framebuffer *old_fb)
5283{
5284 struct drm_device *dev = crtc->dev;
5285 struct drm_i915_private *dev_priv = dev->dev_private;
5286 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5287 int pipe = intel_crtc->pipe;
5288 int ret;
5289
5290 drm_vblank_pre_modeset(dev, pipe);
5291
5292 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5293 x, y, old_fb);
5294
5164 drm_vblank_post_modeset(dev, pipe); 5295 drm_vblank_post_modeset(dev, pipe);
5165 5296
5166 return ret; 5297 return ret;
@@ -5483,43 +5614,140 @@ static struct drm_display_mode load_detect_mode = {
5483 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 5614 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5484}; 5615};
5485 5616
5486struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5617static struct drm_framebuffer *
5487 struct drm_connector *connector, 5618intel_framebuffer_create(struct drm_device *dev,
5488 struct drm_display_mode *mode, 5619 struct drm_mode_fb_cmd *mode_cmd,
5489 int *dpms_mode) 5620 struct drm_i915_gem_object *obj)
5621{
5622 struct intel_framebuffer *intel_fb;
5623 int ret;
5624
5625 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5626 if (!intel_fb) {
5627 drm_gem_object_unreference_unlocked(&obj->base);
5628 return ERR_PTR(-ENOMEM);
5629 }
5630
5631 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5632 if (ret) {
5633 drm_gem_object_unreference_unlocked(&obj->base);
5634 kfree(intel_fb);
5635 return ERR_PTR(ret);
5636 }
5637
5638 return &intel_fb->base;
5639}
5640
5641static u32
5642intel_framebuffer_pitch_for_width(int width, int bpp)
5643{
5644 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5645 return ALIGN(pitch, 64);
5646}
5647
5648static u32
5649intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5650{
5651 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5652 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5653}
5654
5655static struct drm_framebuffer *
5656intel_framebuffer_create_for_mode(struct drm_device *dev,
5657 struct drm_display_mode *mode,
5658 int depth, int bpp)
5659{
5660 struct drm_i915_gem_object *obj;
5661 struct drm_mode_fb_cmd mode_cmd;
5662
5663 obj = i915_gem_alloc_object(dev,
5664 intel_framebuffer_size_for_mode(mode, bpp));
5665 if (obj == NULL)
5666 return ERR_PTR(-ENOMEM);
5667
5668 mode_cmd.width = mode->hdisplay;
5669 mode_cmd.height = mode->vdisplay;
5670 mode_cmd.depth = depth;
5671 mode_cmd.bpp = bpp;
5672 mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
5673
5674 return intel_framebuffer_create(dev, &mode_cmd, obj);
5675}
5676
5677static struct drm_framebuffer *
5678mode_fits_in_fbdev(struct drm_device *dev,
5679 struct drm_display_mode *mode)
5680{
5681 struct drm_i915_private *dev_priv = dev->dev_private;
5682 struct drm_i915_gem_object *obj;
5683 struct drm_framebuffer *fb;
5684
5685 if (dev_priv->fbdev == NULL)
5686 return NULL;
5687
5688 obj = dev_priv->fbdev->ifb.obj;
5689 if (obj == NULL)
5690 return NULL;
5691
5692 fb = &dev_priv->fbdev->ifb.base;
5693 if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
5694 fb->bits_per_pixel))
5695 return NULL;
5696
5697 if (obj->base.size < mode->vdisplay * fb->pitch)
5698 return NULL;
5699
5700 return fb;
5701}
5702
5703bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5704 struct drm_connector *connector,
5705 struct drm_display_mode *mode,
5706 struct intel_load_detect_pipe *old)
5490{ 5707{
5491 struct intel_crtc *intel_crtc; 5708 struct intel_crtc *intel_crtc;
5492 struct drm_crtc *possible_crtc; 5709 struct drm_crtc *possible_crtc;
5493 struct drm_crtc *supported_crtc =NULL;
5494 struct drm_encoder *encoder = &intel_encoder->base; 5710 struct drm_encoder *encoder = &intel_encoder->base;
5495 struct drm_crtc *crtc = NULL; 5711 struct drm_crtc *crtc = NULL;
5496 struct drm_device *dev = encoder->dev; 5712 struct drm_device *dev = encoder->dev;
5497 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5713 struct drm_framebuffer *old_fb;
5498 struct drm_crtc_helper_funcs *crtc_funcs;
5499 int i = -1; 5714 int i = -1;
5500 5715
5716 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5717 connector->base.id, drm_get_connector_name(connector),
5718 encoder->base.id, drm_get_encoder_name(encoder));
5719
5501 /* 5720 /*
5502 * Algorithm gets a little messy: 5721 * Algorithm gets a little messy:
5722 *
5503 * - if the connector already has an assigned crtc, use it (but make 5723 * - if the connector already has an assigned crtc, use it (but make
5504 * sure it's on first) 5724 * sure it's on first)
5725 *
5505 * - try to find the first unused crtc that can drive this connector, 5726 * - try to find the first unused crtc that can drive this connector,
5506 * and use that if we find one 5727 * and use that if we find one
5507 * - if there are no unused crtcs available, try to use the first
5508 * one we found that supports the connector
5509 */ 5728 */
5510 5729
5511 /* See if we already have a CRTC for this connector */ 5730 /* See if we already have a CRTC for this connector */
5512 if (encoder->crtc) { 5731 if (encoder->crtc) {
5513 crtc = encoder->crtc; 5732 crtc = encoder->crtc;
5514 /* Make sure the crtc and connector are running */ 5733
5515 intel_crtc = to_intel_crtc(crtc); 5734 intel_crtc = to_intel_crtc(crtc);
5516 *dpms_mode = intel_crtc->dpms_mode; 5735 old->dpms_mode = intel_crtc->dpms_mode;
5736 old->load_detect_temp = false;
5737
5738 /* Make sure the crtc and connector are running */
5517 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5739 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5740 struct drm_encoder_helper_funcs *encoder_funcs;
5741 struct drm_crtc_helper_funcs *crtc_funcs;
5742
5518 crtc_funcs = crtc->helper_private; 5743 crtc_funcs = crtc->helper_private;
5519 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5744 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5745
5746 encoder_funcs = encoder->helper_private;
5520 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5747 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5521 } 5748 }
5522 return crtc; 5749
5750 return true;
5523 } 5751 }
5524 5752
5525 /* Find an unused one (if possible) */ 5753 /* Find an unused one (if possible) */
@@ -5531,46 +5759,66 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5531 crtc = possible_crtc; 5759 crtc = possible_crtc;
5532 break; 5760 break;
5533 } 5761 }
5534 if (!supported_crtc)
5535 supported_crtc = possible_crtc;
5536 } 5762 }
5537 5763
5538 /* 5764 /*
5539 * If we didn't find an unused CRTC, don't use any. 5765 * If we didn't find an unused CRTC, don't use any.
5540 */ 5766 */
5541 if (!crtc) { 5767 if (!crtc) {
5542 return NULL; 5768 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5769 return false;
5543 } 5770 }
5544 5771
5545 encoder->crtc = crtc; 5772 encoder->crtc = crtc;
5546 connector->encoder = encoder; 5773 connector->encoder = encoder;
5547 intel_encoder->load_detect_temp = true;
5548 5774
5549 intel_crtc = to_intel_crtc(crtc); 5775 intel_crtc = to_intel_crtc(crtc);
5550 *dpms_mode = intel_crtc->dpms_mode; 5776 old->dpms_mode = intel_crtc->dpms_mode;
5777 old->load_detect_temp = true;
5778 old->release_fb = NULL;
5551 5779
5552 if (!crtc->enabled) { 5780 if (!mode)
5553 if (!mode) 5781 mode = &load_detect_mode;
5554 mode = &load_detect_mode;
5555 drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
5556 } else {
5557 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5558 crtc_funcs = crtc->helper_private;
5559 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5560 }
5561 5782
5562 /* Add this connector to the crtc */ 5783 old_fb = crtc->fb;
5563 encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); 5784
5564 encoder_funcs->commit(encoder); 5785 /* We need a framebuffer large enough to accommodate all accesses
5786 * that the plane may generate whilst we perform load detection.
5787 * We can not rely on the fbcon either being present (we get called
5788 * during its initialisation to detect all boot displays, or it may
5789 * not even exist) or that it is large enough to satisfy the
5790 * requested mode.
5791 */
5792 crtc->fb = mode_fits_in_fbdev(dev, mode);
5793 if (crtc->fb == NULL) {
5794 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5795 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5796 old->release_fb = crtc->fb;
5797 } else
5798 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5799 if (IS_ERR(crtc->fb)) {
5800 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5801 crtc->fb = old_fb;
5802 return false;
5803 }
5804
5805 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5806 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5807 if (old->release_fb)
5808 old->release_fb->funcs->destroy(old->release_fb);
5809 crtc->fb = old_fb;
5810 return false;
5565 } 5811 }
5812
5566 /* let the connector get through one full cycle before testing */ 5813 /* let the connector get through one full cycle before testing */
5567 intel_wait_for_vblank(dev, intel_crtc->pipe); 5814 intel_wait_for_vblank(dev, intel_crtc->pipe);
5568 5815
5569 return crtc; 5816 return true;
5570} 5817}
5571 5818
5572void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5819void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5573 struct drm_connector *connector, int dpms_mode) 5820 struct drm_connector *connector,
5821 struct intel_load_detect_pipe *old)
5574{ 5822{
5575 struct drm_encoder *encoder = &intel_encoder->base; 5823 struct drm_encoder *encoder = &intel_encoder->base;
5576 struct drm_device *dev = encoder->dev; 5824 struct drm_device *dev = encoder->dev;
@@ -5578,19 +5826,24 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5578 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5826 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5579 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5827 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5580 5828
5581 if (intel_encoder->load_detect_temp) { 5829 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5582 encoder->crtc = NULL; 5830 connector->base.id, drm_get_connector_name(connector),
5831 encoder->base.id, drm_get_encoder_name(encoder));
5832
5833 if (old->load_detect_temp) {
5583 connector->encoder = NULL; 5834 connector->encoder = NULL;
5584 intel_encoder->load_detect_temp = false;
5585 crtc->enabled = drm_helper_crtc_in_use(crtc);
5586 drm_helper_disable_unused_functions(dev); 5835 drm_helper_disable_unused_functions(dev);
5836
5837 if (old->release_fb)
5838 old->release_fb->funcs->destroy(old->release_fb);
5839
5840 return;
5587 } 5841 }
5588 5842
5589 /* Switch crtc and encoder back off if necessary */ 5843 /* Switch crtc and encoder back off if necessary */
5590 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 5844 if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5591 if (encoder->crtc == crtc) 5845 encoder_funcs->dpms(encoder, old->dpms_mode);
5592 encoder_funcs->dpms(encoder, dpms_mode); 5846 crtc_funcs->dpms(crtc, old->dpms_mode);
5593 crtc_funcs->dpms(crtc, dpms_mode);
5594 } 5847 }
5595} 5848}
5596 5849
@@ -6185,6 +6438,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6185 break; 6438 break;
6186 6439
6187 case 6: 6440 case 6:
6441 case 7:
6188 OUT_RING(MI_DISPLAY_FLIP | 6442 OUT_RING(MI_DISPLAY_FLIP |
6189 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6443 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6190 OUT_RING(fb->pitch | obj->tiling_mode); 6444 OUT_RING(fb->pitch | obj->tiling_mode);
@@ -6504,6 +6758,9 @@ static void intel_setup_outputs(struct drm_device *dev)
6504 } 6758 }
6505 6759
6506 intel_panel_setup_backlight(dev); 6760 intel_panel_setup_backlight(dev);
6761
6762 /* disable all the possible outputs/crtcs before entering KMS mode */
6763 drm_helper_disable_unused_functions(dev);
6507} 6764}
6508 6765
6509static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 6766static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -6571,27 +6828,12 @@ intel_user_framebuffer_create(struct drm_device *dev,
6571 struct drm_mode_fb_cmd *mode_cmd) 6828 struct drm_mode_fb_cmd *mode_cmd)
6572{ 6829{
6573 struct drm_i915_gem_object *obj; 6830 struct drm_i915_gem_object *obj;
6574 struct intel_framebuffer *intel_fb;
6575 int ret;
6576 6831
6577 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); 6832 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
6578 if (&obj->base == NULL) 6833 if (&obj->base == NULL)
6579 return ERR_PTR(-ENOENT); 6834 return ERR_PTR(-ENOENT);
6580 6835
6581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6836 return intel_framebuffer_create(dev, mode_cmd, obj);
6582 if (!intel_fb) {
6583 drm_gem_object_unreference_unlocked(&obj->base);
6584 return ERR_PTR(-ENOMEM);
6585 }
6586
6587 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6588 if (ret) {
6589 drm_gem_object_unreference_unlocked(&obj->base);
6590 kfree(intel_fb);
6591 return ERR_PTR(ret);
6592 }
6593
6594 return &intel_fb->base;
6595} 6837}
6596 6838
6597static const struct drm_mode_config_funcs intel_mode_funcs = { 6839static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -6605,13 +6847,14 @@ intel_alloc_context_page(struct drm_device *dev)
6605 struct drm_i915_gem_object *ctx; 6847 struct drm_i915_gem_object *ctx;
6606 int ret; 6848 int ret;
6607 6849
6850 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
6851
6608 ctx = i915_gem_alloc_object(dev, 4096); 6852 ctx = i915_gem_alloc_object(dev, 4096);
6609 if (!ctx) { 6853 if (!ctx) {
6610 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 6854 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
6611 return NULL; 6855 return NULL;
6612 } 6856 }
6613 6857
6614 mutex_lock(&dev->struct_mutex);
6615 ret = i915_gem_object_pin(ctx, 4096, true); 6858 ret = i915_gem_object_pin(ctx, 4096, true);
6616 if (ret) { 6859 if (ret) {
6617 DRM_ERROR("failed to pin power context: %d\n", ret); 6860 DRM_ERROR("failed to pin power context: %d\n", ret);
@@ -6623,7 +6866,6 @@ intel_alloc_context_page(struct drm_device *dev)
6623 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 6866 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
6624 goto err_unpin; 6867 goto err_unpin;
6625 } 6868 }
6626 mutex_unlock(&dev->struct_mutex);
6627 6869
6628 return ctx; 6870 return ctx;
6629 6871
@@ -6758,6 +7000,11 @@ void gen6_disable_rps(struct drm_device *dev)
6758 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 7000 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6759 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 7001 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
6760 I915_WRITE(GEN6_PMIER, 0); 7002 I915_WRITE(GEN6_PMIER, 0);
7003
7004 spin_lock_irq(&dev_priv->rps_lock);
7005 dev_priv->pm_iir = 0;
7006 spin_unlock_irq(&dev_priv->rps_lock);
7007
6761 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 7008 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
6762} 7009}
6763 7010
@@ -6851,7 +7098,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6851{ 7098{
6852 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 7099 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6853 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 7100 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6854 u32 pcu_mbox; 7101 u32 pcu_mbox, rc6_mask = 0;
6855 int cur_freq, min_freq, max_freq; 7102 int cur_freq, min_freq, max_freq;
6856 int i; 7103 int i;
6857 7104
@@ -6862,7 +7109,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6862 * userspace... 7109 * userspace...
6863 */ 7110 */
6864 I915_WRITE(GEN6_RC_STATE, 0); 7111 I915_WRITE(GEN6_RC_STATE, 0);
6865 __gen6_gt_force_wake_get(dev_priv); 7112 mutex_lock(&dev_priv->dev->struct_mutex);
7113 gen6_gt_force_wake_get(dev_priv);
6866 7114
6867 /* disable the counters and set deterministic thresholds */ 7115 /* disable the counters and set deterministic thresholds */
6868 I915_WRITE(GEN6_RC_CONTROL, 0); 7116 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6882,9 +7130,12 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6882 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 7130 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6883 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 7131 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6884 7132
7133 if (i915_enable_rc6)
7134 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7135 GEN6_RC_CTL_RC6_ENABLE;
7136
6885 I915_WRITE(GEN6_RC_CONTROL, 7137 I915_WRITE(GEN6_RC_CONTROL,
6886 GEN6_RC_CTL_RC6p_ENABLE | 7138 rc6_mask |
6887 GEN6_RC_CTL_RC6_ENABLE |
6888 GEN6_RC_CTL_EI_MODE(1) | 7139 GEN6_RC_CTL_EI_MODE(1) |
6889 GEN6_RC_CTL_HW_ENABLE); 7140 GEN6_RC_CTL_HW_ENABLE);
6890 7141
@@ -6956,168 +7207,237 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6956 GEN6_PM_RP_DOWN_THRESHOLD | 7207 GEN6_PM_RP_DOWN_THRESHOLD |
6957 GEN6_PM_RP_UP_EI_EXPIRED | 7208 GEN6_PM_RP_UP_EI_EXPIRED |
6958 GEN6_PM_RP_DOWN_EI_EXPIRED); 7209 GEN6_PM_RP_DOWN_EI_EXPIRED);
7210 spin_lock_irq(&dev_priv->rps_lock);
7211 WARN_ON(dev_priv->pm_iir != 0);
6959 I915_WRITE(GEN6_PMIMR, 0); 7212 I915_WRITE(GEN6_PMIMR, 0);
7213 spin_unlock_irq(&dev_priv->rps_lock);
6960 /* enable all PM interrupts */ 7214 /* enable all PM interrupts */
6961 I915_WRITE(GEN6_PMINTRMSK, 0); 7215 I915_WRITE(GEN6_PMINTRMSK, 0);
6962 7216
6963 __gen6_gt_force_wake_put(dev_priv); 7217 gen6_gt_force_wake_put(dev_priv);
7218 mutex_unlock(&dev_priv->dev->struct_mutex);
6964} 7219}
6965 7220
6966void intel_enable_clock_gating(struct drm_device *dev) 7221static void ironlake_init_clock_gating(struct drm_device *dev)
7222{
7223 struct drm_i915_private *dev_priv = dev->dev_private;
7224 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7225
7226 /* Required for FBC */
7227 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7228 DPFCRUNIT_CLOCK_GATE_DISABLE |
7229 DPFDUNIT_CLOCK_GATE_DISABLE;
7230 /* Required for CxSR */
7231 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7232
7233 I915_WRITE(PCH_3DCGDIS0,
7234 MARIUNIT_CLOCK_GATE_DISABLE |
7235 SVSMUNIT_CLOCK_GATE_DISABLE);
7236 I915_WRITE(PCH_3DCGDIS1,
7237 VFMUNIT_CLOCK_GATE_DISABLE);
7238
7239 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7240
7241 /*
7242 * According to the spec the following bits should be set in
7243 * order to enable memory self-refresh
7244 * The bit 22/21 of 0x42004
7245 * The bit 5 of 0x42020
7246 * The bit 15 of 0x45000
7247 */
7248 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7249 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7250 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7251 I915_WRITE(ILK_DSPCLK_GATE,
7252 (I915_READ(ILK_DSPCLK_GATE) |
7253 ILK_DPARB_CLK_GATE));
7254 I915_WRITE(DISP_ARB_CTL,
7255 (I915_READ(DISP_ARB_CTL) |
7256 DISP_FBC_WM_DIS));
7257 I915_WRITE(WM3_LP_ILK, 0);
7258 I915_WRITE(WM2_LP_ILK, 0);
7259 I915_WRITE(WM1_LP_ILK, 0);
7260
7261 /*
7262 * Based on the document from hardware guys the following bits
7263 * should be set unconditionally in order to enable FBC.
7264 * The bit 22 of 0x42000
7265 * The bit 22 of 0x42004
7266 * The bit 7,8,9 of 0x42020.
7267 */
7268 if (IS_IRONLAKE_M(dev)) {
7269 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7270 I915_READ(ILK_DISPLAY_CHICKEN1) |
7271 ILK_FBCQ_DIS);
7272 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7273 I915_READ(ILK_DISPLAY_CHICKEN2) |
7274 ILK_DPARB_GATE);
7275 I915_WRITE(ILK_DSPCLK_GATE,
7276 I915_READ(ILK_DSPCLK_GATE) |
7277 ILK_DPFC_DIS1 |
7278 ILK_DPFC_DIS2 |
7279 ILK_CLK_FBC);
7280 }
7281
7282 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7283 I915_READ(ILK_DISPLAY_CHICKEN2) |
7284 ILK_ELPIN_409_SELECT);
7285 I915_WRITE(_3D_CHICKEN2,
7286 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7287 _3D_CHICKEN2_WM_READ_PIPELINED);
7288}
7289
7290static void gen6_init_clock_gating(struct drm_device *dev)
6967{ 7291{
6968 struct drm_i915_private *dev_priv = dev->dev_private; 7292 struct drm_i915_private *dev_priv = dev->dev_private;
6969 int pipe; 7293 int pipe;
7294 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7295
7296 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7297
7298 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7299 I915_READ(ILK_DISPLAY_CHICKEN2) |
7300 ILK_ELPIN_409_SELECT);
7301
7302 I915_WRITE(WM3_LP_ILK, 0);
7303 I915_WRITE(WM2_LP_ILK, 0);
7304 I915_WRITE(WM1_LP_ILK, 0);
6970 7305
6971 /* 7306 /*
6972 * Disable clock gating reported to work incorrectly according to the 7307 * According to the spec the following bits should be
6973 * specs, but enable as much else as we can. 7308 * set in order to enable memory self-refresh and fbc:
7309 * The bit21 and bit22 of 0x42000
7310 * The bit21 and bit22 of 0x42004
7311 * The bit5 and bit7 of 0x42020
7312 * The bit14 of 0x70180
7313 * The bit14 of 0x71180
6974 */ 7314 */
6975 if (HAS_PCH_SPLIT(dev)) { 7315 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6976 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 7316 I915_READ(ILK_DISPLAY_CHICKEN1) |
7317 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7318 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7319 I915_READ(ILK_DISPLAY_CHICKEN2) |
7320 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7321 I915_WRITE(ILK_DSPCLK_GATE,
7322 I915_READ(ILK_DSPCLK_GATE) |
7323 ILK_DPARB_CLK_GATE |
7324 ILK_DPFD_CLK_GATE);
6977 7325
6978 if (IS_GEN5(dev)) { 7326 for_each_pipe(pipe)
6979 /* Required for FBC */ 7327 I915_WRITE(DSPCNTR(pipe),
6980 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 7328 I915_READ(DSPCNTR(pipe)) |
6981 DPFCRUNIT_CLOCK_GATE_DISABLE | 7329 DISPPLANE_TRICKLE_FEED_DISABLE);
6982 DPFDUNIT_CLOCK_GATE_DISABLE; 7330}
6983 /* Required for CxSR */
6984 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6985
6986 I915_WRITE(PCH_3DCGDIS0,
6987 MARIUNIT_CLOCK_GATE_DISABLE |
6988 SVSMUNIT_CLOCK_GATE_DISABLE);
6989 I915_WRITE(PCH_3DCGDIS1,
6990 VFMUNIT_CLOCK_GATE_DISABLE);
6991 }
6992 7331
6993 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 7332static void ivybridge_init_clock_gating(struct drm_device *dev)
7333{
7334 struct drm_i915_private *dev_priv = dev->dev_private;
7335 int pipe;
7336 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
6994 7337
6995 /* 7338 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
6996 * On Ibex Peak and Cougar Point, we need to disable clock
6997 * gating for the panel power sequencer or it will fail to
6998 * start up when no ports are active.
6999 */
7000 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7001 7339
7002 /* 7340 I915_WRITE(WM3_LP_ILK, 0);
7003 * According to the spec the following bits should be set in 7341 I915_WRITE(WM2_LP_ILK, 0);
7004 * order to enable memory self-refresh 7342 I915_WRITE(WM1_LP_ILK, 0);
7005 * The bit 22/21 of 0x42004
7006 * The bit 5 of 0x42020
7007 * The bit 15 of 0x45000
7008 */
7009 if (IS_GEN5(dev)) {
7010 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7011 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7012 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7013 I915_WRITE(ILK_DSPCLK_GATE,
7014 (I915_READ(ILK_DSPCLK_GATE) |
7015 ILK_DPARB_CLK_GATE));
7016 I915_WRITE(DISP_ARB_CTL,
7017 (I915_READ(DISP_ARB_CTL) |
7018 DISP_FBC_WM_DIS));
7019 I915_WRITE(WM3_LP_ILK, 0);
7020 I915_WRITE(WM2_LP_ILK, 0);
7021 I915_WRITE(WM1_LP_ILK, 0);
7022 }
7023 /*
7024 * Based on the document from hardware guys the following bits
7025 * should be set unconditionally in order to enable FBC.
7026 * The bit 22 of 0x42000
7027 * The bit 22 of 0x42004
7028 * The bit 7,8,9 of 0x42020.
7029 */
7030 if (IS_IRONLAKE_M(dev)) {
7031 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7032 I915_READ(ILK_DISPLAY_CHICKEN1) |
7033 ILK_FBCQ_DIS);
7034 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7035 I915_READ(ILK_DISPLAY_CHICKEN2) |
7036 ILK_DPARB_GATE);
7037 I915_WRITE(ILK_DSPCLK_GATE,
7038 I915_READ(ILK_DSPCLK_GATE) |
7039 ILK_DPFC_DIS1 |
7040 ILK_DPFC_DIS2 |
7041 ILK_CLK_FBC);
7042 }
7043 7343
7044 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7344 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7045 I915_READ(ILK_DISPLAY_CHICKEN2) |
7046 ILK_ELPIN_409_SELECT);
7047 7345
7048 if (IS_GEN5(dev)) { 7346 for_each_pipe(pipe)
7049 I915_WRITE(_3D_CHICKEN2, 7347 I915_WRITE(DSPCNTR(pipe),
7050 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 7348 I915_READ(DSPCNTR(pipe)) |
7051 _3D_CHICKEN2_WM_READ_PIPELINED); 7349 DISPPLANE_TRICKLE_FEED_DISABLE);
7052 } 7350}
7053 7351
7054 if (IS_GEN6(dev)) { 7352static void g4x_init_clock_gating(struct drm_device *dev)
7055 I915_WRITE(WM3_LP_ILK, 0); 7353{
7056 I915_WRITE(WM2_LP_ILK, 0); 7354 struct drm_i915_private *dev_priv = dev->dev_private;
7057 I915_WRITE(WM1_LP_ILK, 0); 7355 uint32_t dspclk_gate;
7058 7356
7059 /* 7357 I915_WRITE(RENCLK_GATE_D1, 0);
7060 * According to the spec the following bits should be 7358 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7061 * set in order to enable memory self-refresh and fbc: 7359 GS_UNIT_CLOCK_GATE_DISABLE |
7062 * The bit21 and bit22 of 0x42000 7360 CL_UNIT_CLOCK_GATE_DISABLE);
7063 * The bit21 and bit22 of 0x42004 7361 I915_WRITE(RAMCLK_GATE_D, 0);
7064 * The bit5 and bit7 of 0x42020 7362 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7065 * The bit14 of 0x70180 7363 OVRUNIT_CLOCK_GATE_DISABLE |
7066 * The bit14 of 0x71180 7364 OVCUNIT_CLOCK_GATE_DISABLE;
7067 */ 7365 if (IS_GM45(dev))
7068 I915_WRITE(ILK_DISPLAY_CHICKEN1, 7366 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7069 I915_READ(ILK_DISPLAY_CHICKEN1) | 7367 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7070 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7368}
7071 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7072 I915_READ(ILK_DISPLAY_CHICKEN2) |
7073 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7074 I915_WRITE(ILK_DSPCLK_GATE,
7075 I915_READ(ILK_DSPCLK_GATE) |
7076 ILK_DPARB_CLK_GATE |
7077 ILK_DPFD_CLK_GATE);
7078
7079 for_each_pipe(pipe)
7080 I915_WRITE(DSPCNTR(pipe),
7081 I915_READ(DSPCNTR(pipe)) |
7082 DISPPLANE_TRICKLE_FEED_DISABLE);
7083 }
7084 } else if (IS_G4X(dev)) {
7085 uint32_t dspclk_gate;
7086 I915_WRITE(RENCLK_GATE_D1, 0);
7087 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7088 GS_UNIT_CLOCK_GATE_DISABLE |
7089 CL_UNIT_CLOCK_GATE_DISABLE);
7090 I915_WRITE(RAMCLK_GATE_D, 0);
7091 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7092 OVRUNIT_CLOCK_GATE_DISABLE |
7093 OVCUNIT_CLOCK_GATE_DISABLE;
7094 if (IS_GM45(dev))
7095 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7096 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7097 } else if (IS_CRESTLINE(dev)) {
7098 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7099 I915_WRITE(RENCLK_GATE_D2, 0);
7100 I915_WRITE(DSPCLK_GATE_D, 0);
7101 I915_WRITE(RAMCLK_GATE_D, 0);
7102 I915_WRITE16(DEUC, 0);
7103 } else if (IS_BROADWATER(dev)) {
7104 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7105 I965_RCC_CLOCK_GATE_DISABLE |
7106 I965_RCPB_CLOCK_GATE_DISABLE |
7107 I965_ISC_CLOCK_GATE_DISABLE |
7108 I965_FBC_CLOCK_GATE_DISABLE);
7109 I915_WRITE(RENCLK_GATE_D2, 0);
7110 } else if (IS_GEN3(dev)) {
7111 u32 dstate = I915_READ(D_STATE);
7112 7369
7113 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7370static void crestline_init_clock_gating(struct drm_device *dev)
7114 DSTATE_DOT_CLOCK_GATING; 7371{
7115 I915_WRITE(D_STATE, dstate); 7372 struct drm_i915_private *dev_priv = dev->dev_private;
7116 } else if (IS_I85X(dev) || IS_I865G(dev)) { 7373
7117 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7374 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7118 } else if (IS_I830(dev)) { 7375 I915_WRITE(RENCLK_GATE_D2, 0);
7119 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7376 I915_WRITE(DSPCLK_GATE_D, 0);
7120 } 7377 I915_WRITE(RAMCLK_GATE_D, 0);
7378 I915_WRITE16(DEUC, 0);
7379}
7380
7381static void broadwater_init_clock_gating(struct drm_device *dev)
7382{
7383 struct drm_i915_private *dev_priv = dev->dev_private;
7384
7385 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7386 I965_RCC_CLOCK_GATE_DISABLE |
7387 I965_RCPB_CLOCK_GATE_DISABLE |
7388 I965_ISC_CLOCK_GATE_DISABLE |
7389 I965_FBC_CLOCK_GATE_DISABLE);
7390 I915_WRITE(RENCLK_GATE_D2, 0);
7391}
7392
7393static void gen3_init_clock_gating(struct drm_device *dev)
7394{
7395 struct drm_i915_private *dev_priv = dev->dev_private;
7396 u32 dstate = I915_READ(D_STATE);
7397
7398 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7399 DSTATE_DOT_CLOCK_GATING;
7400 I915_WRITE(D_STATE, dstate);
7401}
7402
7403static void i85x_init_clock_gating(struct drm_device *dev)
7404{
7405 struct drm_i915_private *dev_priv = dev->dev_private;
7406
7407 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7408}
7409
7410static void i830_init_clock_gating(struct drm_device *dev)
7411{
7412 struct drm_i915_private *dev_priv = dev->dev_private;
7413
7414 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7415}
7416
7417static void ibx_init_clock_gating(struct drm_device *dev)
7418{
7419 struct drm_i915_private *dev_priv = dev->dev_private;
7420
7421 /*
7422 * On Ibex Peak and Cougar Point, we need to disable clock
7423 * gating for the panel power sequencer or it will fail to
7424 * start up when no ports are active.
7425 */
7426 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7427}
7428
7429static void cpt_init_clock_gating(struct drm_device *dev)
7430{
7431 struct drm_i915_private *dev_priv = dev->dev_private;
7432
7433 /*
7434 * On Ibex Peak and Cougar Point, we need to disable clock
7435 * gating for the panel power sequencer or it will fail to
7436 * start up when no ports are active.
7437 */
7438 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7439 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7440 DPLS_EDP_PPS_FIX_DIS);
7121} 7441}
7122 7442
7123static void ironlake_teardown_rc6(struct drm_device *dev) 7443static void ironlake_teardown_rc6(struct drm_device *dev)
@@ -7187,9 +7507,12 @@ void ironlake_enable_rc6(struct drm_device *dev)
7187 if (!i915_enable_rc6) 7507 if (!i915_enable_rc6)
7188 return; 7508 return;
7189 7509
7510 mutex_lock(&dev->struct_mutex);
7190 ret = ironlake_setup_rc6(dev); 7511 ret = ironlake_setup_rc6(dev);
7191 if (ret) 7512 if (ret) {
7513 mutex_unlock(&dev->struct_mutex);
7192 return; 7514 return;
7515 }
7193 7516
7194 /* 7517 /*
7195 * GPU can automatically power down the render unit if given a page 7518 * GPU can automatically power down the render unit if given a page
@@ -7198,6 +7521,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
7198 ret = BEGIN_LP_RING(6); 7521 ret = BEGIN_LP_RING(6);
7199 if (ret) { 7522 if (ret) {
7200 ironlake_teardown_rc6(dev); 7523 ironlake_teardown_rc6(dev);
7524 mutex_unlock(&dev->struct_mutex);
7201 return; 7525 return;
7202 } 7526 }
7203 7527
@@ -7213,10 +7537,33 @@ void ironlake_enable_rc6(struct drm_device *dev)
7213 OUT_RING(MI_FLUSH); 7537 OUT_RING(MI_FLUSH);
7214 ADVANCE_LP_RING(); 7538 ADVANCE_LP_RING();
7215 7539
7540 /*
7541 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7542 * does an implicit flush, combined with MI_FLUSH above, it should be
7543 * safe to assume that renderctx is valid
7544 */
7545 ret = intel_wait_ring_idle(LP_RING(dev_priv));
7546 if (ret) {
7547 DRM_ERROR("failed to enable ironlake power power savings\n");
7548 ironlake_teardown_rc6(dev);
7549 mutex_unlock(&dev->struct_mutex);
7550 return;
7551 }
7552
7216 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 7553 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7217 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7554 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7555 mutex_unlock(&dev->struct_mutex);
7218} 7556}
7219 7557
7558void intel_init_clock_gating(struct drm_device *dev)
7559{
7560 struct drm_i915_private *dev_priv = dev->dev_private;
7561
7562 dev_priv->display.init_clock_gating(dev);
7563
7564 if (dev_priv->display.init_pch_clock_gating)
7565 dev_priv->display.init_pch_clock_gating(dev);
7566}
7220 7567
7221/* Set up chip specific display functions */ 7568/* Set up chip specific display functions */
7222static void intel_init_display(struct drm_device *dev) 7569static void intel_init_display(struct drm_device *dev)
@@ -7224,10 +7571,13 @@ static void intel_init_display(struct drm_device *dev)
7224 struct drm_i915_private *dev_priv = dev->dev_private; 7571 struct drm_i915_private *dev_priv = dev->dev_private;
7225 7572
7226 /* We always want a DPMS function */ 7573 /* We always want a DPMS function */
7227 if (HAS_PCH_SPLIT(dev)) 7574 if (HAS_PCH_SPLIT(dev)) {
7228 dev_priv->display.dpms = ironlake_crtc_dpms; 7575 dev_priv->display.dpms = ironlake_crtc_dpms;
7229 else 7576 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7577 } else {
7230 dev_priv->display.dpms = i9xx_crtc_dpms; 7578 dev_priv->display.dpms = i9xx_crtc_dpms;
7579 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7580 }
7231 7581
7232 if (I915_HAS_FBC(dev)) { 7582 if (I915_HAS_FBC(dev)) {
7233 if (HAS_PCH_SPLIT(dev)) { 7583 if (HAS_PCH_SPLIT(dev)) {
@@ -7271,6 +7621,11 @@ static void intel_init_display(struct drm_device *dev)
7271 7621
7272 /* For FIFO watermark updates */ 7622 /* For FIFO watermark updates */
7273 if (HAS_PCH_SPLIT(dev)) { 7623 if (HAS_PCH_SPLIT(dev)) {
7624 if (HAS_PCH_IBX(dev))
7625 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7626 else if (HAS_PCH_CPT(dev))
7627 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7628
7274 if (IS_GEN5(dev)) { 7629 if (IS_GEN5(dev)) {
7275 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 7630 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7276 dev_priv->display.update_wm = ironlake_update_wm; 7631 dev_priv->display.update_wm = ironlake_update_wm;
@@ -7279,6 +7634,8 @@ static void intel_init_display(struct drm_device *dev)
7279 "Disable CxSR\n"); 7634 "Disable CxSR\n");
7280 dev_priv->display.update_wm = NULL; 7635 dev_priv->display.update_wm = NULL;
7281 } 7636 }
7637 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7638 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7282 } else if (IS_GEN6(dev)) { 7639 } else if (IS_GEN6(dev)) {
7283 if (SNB_READ_WM0_LATENCY()) { 7640 if (SNB_READ_WM0_LATENCY()) {
7284 dev_priv->display.update_wm = sandybridge_update_wm; 7641 dev_priv->display.update_wm = sandybridge_update_wm;
@@ -7287,6 +7644,20 @@ static void intel_init_display(struct drm_device *dev)
7287 "Disable CxSR\n"); 7644 "Disable CxSR\n");
7288 dev_priv->display.update_wm = NULL; 7645 dev_priv->display.update_wm = NULL;
7289 } 7646 }
7647 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7648 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7649 } else if (IS_IVYBRIDGE(dev)) {
7650 /* FIXME: detect B0+ stepping and use auto training */
7651 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7652 if (SNB_READ_WM0_LATENCY()) {
7653 dev_priv->display.update_wm = sandybridge_update_wm;
7654 } else {
7655 DRM_DEBUG_KMS("Failed to read display plane latency. "
7656 "Disable CxSR\n");
7657 dev_priv->display.update_wm = NULL;
7658 }
7659 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7660
7290 } else 7661 } else
7291 dev_priv->display.update_wm = NULL; 7662 dev_priv->display.update_wm = NULL;
7292 } else if (IS_PINEVIEW(dev)) { 7663 } else if (IS_PINEVIEW(dev)) {
@@ -7304,18 +7675,30 @@ static void intel_init_display(struct drm_device *dev)
7304 dev_priv->display.update_wm = NULL; 7675 dev_priv->display.update_wm = NULL;
7305 } else 7676 } else
7306 dev_priv->display.update_wm = pineview_update_wm; 7677 dev_priv->display.update_wm = pineview_update_wm;
7307 } else if (IS_G4X(dev)) 7678 } else if (IS_G4X(dev)) {
7308 dev_priv->display.update_wm = g4x_update_wm; 7679 dev_priv->display.update_wm = g4x_update_wm;
7309 else if (IS_GEN4(dev)) 7680 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7681 } else if (IS_GEN4(dev)) {
7310 dev_priv->display.update_wm = i965_update_wm; 7682 dev_priv->display.update_wm = i965_update_wm;
7311 else if (IS_GEN3(dev)) { 7683 if (IS_CRESTLINE(dev))
7684 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7685 else if (IS_BROADWATER(dev))
7686 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7687 } else if (IS_GEN3(dev)) {
7312 dev_priv->display.update_wm = i9xx_update_wm; 7688 dev_priv->display.update_wm = i9xx_update_wm;
7313 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7689 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7690 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7691 } else if (IS_I865G(dev)) {
7692 dev_priv->display.update_wm = i830_update_wm;
7693 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7694 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7314 } else if (IS_I85X(dev)) { 7695 } else if (IS_I85X(dev)) {
7315 dev_priv->display.update_wm = i9xx_update_wm; 7696 dev_priv->display.update_wm = i9xx_update_wm;
7316 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 7697 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7698 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7317 } else { 7699 } else {
7318 dev_priv->display.update_wm = i830_update_wm; 7700 dev_priv->display.update_wm = i830_update_wm;
7701 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7319 if (IS_845G(dev)) 7702 if (IS_845G(dev))
7320 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7703 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7321 else 7704 else
@@ -7441,12 +7824,11 @@ void intel_modeset_init(struct drm_device *dev)
7441 intel_crtc_init(dev, i); 7824 intel_crtc_init(dev, i);
7442 } 7825 }
7443 7826
7444 intel_setup_outputs(dev);
7445
7446 intel_enable_clock_gating(dev);
7447
7448 /* Just disable it once at startup */ 7827 /* Just disable it once at startup */
7449 i915_disable_vga(dev); 7828 i915_disable_vga(dev);
7829 intel_setup_outputs(dev);
7830
7831 intel_init_clock_gating(dev);
7450 7832
7451 if (IS_IRONLAKE_M(dev)) { 7833 if (IS_IRONLAKE_M(dev)) {
7452 ironlake_enable_drps(dev); 7834 ironlake_enable_drps(dev);
@@ -7456,12 +7838,15 @@ void intel_modeset_init(struct drm_device *dev)
7456 if (IS_GEN6(dev)) 7838 if (IS_GEN6(dev))
7457 gen6_enable_rps(dev_priv); 7839 gen6_enable_rps(dev_priv);
7458 7840
7459 if (IS_IRONLAKE_M(dev))
7460 ironlake_enable_rc6(dev);
7461
7462 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 7841 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7463 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 7842 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7464 (unsigned long)dev); 7843 (unsigned long)dev);
7844}
7845
7846void intel_modeset_gem_init(struct drm_device *dev)
7847{
7848 if (IS_IRONLAKE_M(dev))
7849 ironlake_enable_rc6(dev);
7465 7850
7466 intel_setup_overlay(dev); 7851 intel_setup_overlay(dev);
7467} 7852}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d20712d527f..831d7a4a0d18 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -140,7 +140,6 @@ struct intel_fbdev {
140struct intel_encoder { 140struct intel_encoder {
141 struct drm_encoder base; 141 struct drm_encoder base;
142 int type; 142 int type;
143 bool load_detect_temp;
144 bool needs_tv_clock; 143 bool needs_tv_clock;
145 void (*hot_plug)(struct intel_encoder *); 144 void (*hot_plug)(struct intel_encoder *);
146 int crtc_mask; 145 int crtc_mask;
@@ -291,13 +290,19 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
291 struct drm_file *file_priv); 290 struct drm_file *file_priv);
292extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 291extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
293extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 292extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
294extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 293
295 struct drm_connector *connector, 294struct intel_load_detect_pipe {
296 struct drm_display_mode *mode, 295 struct drm_framebuffer *release_fb;
297 int *dpms_mode); 296 bool load_detect_temp;
297 int dpms_mode;
298};
299extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
300 struct drm_connector *connector,
301 struct drm_display_mode *mode,
302 struct intel_load_detect_pipe *old);
298extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 303extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
299 struct drm_connector *connector, 304 struct drm_connector *connector,
300 int dpms_mode); 305 struct intel_load_detect_pipe *old);
301 306
302extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 307extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
303extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); 308extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
@@ -339,4 +344,6 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
339 344
340extern void intel_fb_output_poll_changed(struct drm_device *dev); 345extern void intel_fb_output_poll_changed(struct drm_device *dev);
341extern void intel_fb_restore_mode(struct drm_device *dev); 346extern void intel_fb_restore_mode(struct drm_device *dev);
347
348extern void intel_init_clock_gating(struct drm_device *dev);
342#endif /* __INTEL_DRV_H__ */ 349#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e9e6f71418a4..95c4b1429935 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err; 237 goto err;
238 } 238 }
239 obj->agp_type = AGP_USER_CACHED_MEMORY; 239 obj->cache_level = I915_CACHE_LLC;
240 240
241 ret = i915_gem_object_pin(obj, 4096, true); 241 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret) 242 if (ret)
@@ -286,7 +286,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
286 286
287 if (INTEL_INFO(dev)->gen > 3) { 287 if (INTEL_INFO(dev)->gen > 3) {
288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
289 if (IS_GEN6(dev)) 289 if (IS_GEN6(dev) || IS_GEN7(dev))
290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291 I915_WRITE(MI_MODE, mode); 291 I915_WRITE(MI_MODE, mode);
292 } 292 }
@@ -551,10 +551,31 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
551 551
552void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 552void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
553{ 553{
554 struct drm_device *dev = ring->dev;
554 drm_i915_private_t *dev_priv = ring->dev->dev_private; 555 drm_i915_private_t *dev_priv = ring->dev->dev_private;
555 u32 mmio = IS_GEN6(ring->dev) ? 556 u32 mmio = 0;
556 RING_HWS_PGA_GEN6(ring->mmio_base) : 557
557 RING_HWS_PGA(ring->mmio_base); 558 /* The ring status page addresses are no longer next to the rest of
559 * the ring registers as of gen7.
560 */
561 if (IS_GEN7(dev)) {
562 switch (ring->id) {
563 case RING_RENDER:
564 mmio = RENDER_HWS_PGA_GEN7;
565 break;
566 case RING_BLT:
567 mmio = BLT_HWS_PGA_GEN7;
568 break;
569 case RING_BSD:
570 mmio = BSD_HWS_PGA_GEN7;
571 break;
572 }
573 } else if (IS_GEN6(ring->dev)) {
574 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
575 } else {
576 mmio = RING_HWS_PGA(ring->mmio_base);
577 }
578
558 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 579 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
559 POSTING_READ(mmio); 580 POSTING_READ(mmio);
560} 581}
@@ -600,7 +621,7 @@ ring_add_request(struct intel_ring_buffer *ring,
600} 621}
601 622
602static bool 623static bool
603ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 624gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
604{ 625{
605 struct drm_device *dev = ring->dev; 626 struct drm_device *dev = ring->dev;
606 drm_i915_private_t *dev_priv = dev->dev_private; 627 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -609,71 +630,67 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
609 return false; 630 return false;
610 631
611 spin_lock(&ring->irq_lock); 632 spin_lock(&ring->irq_lock);
612 if (ring->irq_refcount++ == 0) 633 if (ring->irq_refcount++ == 0) {
613 ironlake_enable_irq(dev_priv, flag); 634 ring->irq_mask &= ~rflag;
635 I915_WRITE_IMR(ring, ring->irq_mask);
636 ironlake_enable_irq(dev_priv, gflag);
637 }
614 spin_unlock(&ring->irq_lock); 638 spin_unlock(&ring->irq_lock);
615 639
616 return true; 640 return true;
617} 641}
618 642
619static void 643static void
620ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 644gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
621{ 645{
622 struct drm_device *dev = ring->dev; 646 struct drm_device *dev = ring->dev;
623 drm_i915_private_t *dev_priv = dev->dev_private; 647 drm_i915_private_t *dev_priv = dev->dev_private;
624 648
625 spin_lock(&ring->irq_lock); 649 spin_lock(&ring->irq_lock);
626 if (--ring->irq_refcount == 0) 650 if (--ring->irq_refcount == 0) {
627 ironlake_disable_irq(dev_priv, flag); 651 ring->irq_mask |= rflag;
652 I915_WRITE_IMR(ring, ring->irq_mask);
653 ironlake_disable_irq(dev_priv, gflag);
654 }
628 spin_unlock(&ring->irq_lock); 655 spin_unlock(&ring->irq_lock);
629} 656}
630 657
631static bool 658static bool
632gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 659bsd_ring_get_irq(struct intel_ring_buffer *ring)
633{ 660{
634 struct drm_device *dev = ring->dev; 661 struct drm_device *dev = ring->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private; 662 drm_i915_private_t *dev_priv = dev->dev_private;
636 663
637 if (!dev->irq_enabled) 664 if (!dev->irq_enabled)
638 return false; 665 return false;
639 666
640 spin_lock(&ring->irq_lock); 667 spin_lock(&ring->irq_lock);
641 if (ring->irq_refcount++ == 0) { 668 if (ring->irq_refcount++ == 0) {
642 ring->irq_mask &= ~rflag; 669 if (IS_G4X(dev))
643 I915_WRITE_IMR(ring, ring->irq_mask); 670 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
644 ironlake_enable_irq(dev_priv, gflag); 671 else
672 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
645 } 673 }
646 spin_unlock(&ring->irq_lock); 674 spin_unlock(&ring->irq_lock);
647 675
648 return true; 676 return true;
649} 677}
650
651static void 678static void
652gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 679bsd_ring_put_irq(struct intel_ring_buffer *ring)
653{ 680{
654 struct drm_device *dev = ring->dev; 681 struct drm_device *dev = ring->dev;
655 drm_i915_private_t *dev_priv = dev->dev_private; 682 drm_i915_private_t *dev_priv = dev->dev_private;
656 683
657 spin_lock(&ring->irq_lock); 684 spin_lock(&ring->irq_lock);
658 if (--ring->irq_refcount == 0) { 685 if (--ring->irq_refcount == 0) {
659 ring->irq_mask |= rflag; 686 if (IS_G4X(dev))
660 I915_WRITE_IMR(ring, ring->irq_mask); 687 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
661 ironlake_disable_irq(dev_priv, gflag); 688 else
689 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
662 } 690 }
663 spin_unlock(&ring->irq_lock); 691 spin_unlock(&ring->irq_lock);
664} 692}
665 693
666static bool
667bsd_ring_get_irq(struct intel_ring_buffer *ring)
668{
669 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
670}
671static void
672bsd_ring_put_irq(struct intel_ring_buffer *ring)
673{
674 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
675}
676
677static int 694static int
678ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 695ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
679{ 696{
@@ -759,7 +776,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
759 ret = -ENOMEM; 776 ret = -ENOMEM;
760 goto err; 777 goto err;
761 } 778 }
762 obj->agp_type = AGP_USER_CACHED_MEMORY; 779 obj->cache_level = I915_CACHE_LLC;
763 780
764 ret = i915_gem_object_pin(obj, 4096, true); 781 ret = i915_gem_object_pin(obj, 4096, true);
765 if (ret != 0) { 782 if (ret != 0) {
@@ -800,6 +817,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
800 INIT_LIST_HEAD(&ring->request_list); 817 INIT_LIST_HEAD(&ring->request_list);
801 INIT_LIST_HEAD(&ring->gpu_write_list); 818 INIT_LIST_HEAD(&ring->gpu_write_list);
802 819
820 init_waitqueue_head(&ring->irq_queue);
803 spin_lock_init(&ring->irq_lock); 821 spin_lock_init(&ring->irq_lock);
804 ring->irq_mask = ~0; 822 ring->irq_mask = ~0;
805 823
@@ -872,7 +890,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
872 890
873 /* Disable the ring buffer. The ring must be idle at this point */ 891 /* Disable the ring buffer. The ring must be idle at this point */
874 dev_priv = ring->dev->dev_private; 892 dev_priv = ring->dev->dev_private;
875 ret = intel_wait_ring_buffer(ring, ring->size - 8); 893 ret = intel_wait_ring_idle(ring);
876 if (ret) 894 if (ret)
877 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 895 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
878 ring->name, ret); 896 ring->name, ret);
@@ -1333,7 +1351,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1333 drm_i915_private_t *dev_priv = dev->dev_private; 1351 drm_i915_private_t *dev_priv = dev->dev_private;
1334 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1352 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1335 1353
1336 if (IS_GEN6(dev)) 1354 if (IS_GEN6(dev) || IS_GEN7(dev))
1337 *ring = gen6_bsd_ring; 1355 *ring = gen6_bsd_ring;
1338 else 1356 else
1339 *ring = bsd_ring; 1357 *ring = bsd_ring;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f23cc5f037a6..c0e0ee63fbf4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,27 +14,24 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) 17#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) 18#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
19 19
20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
22 22
23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
25 25
26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
28 28
29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
31 31
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) 33#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
34 34#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
37#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
38 35
39struct intel_ring_buffer { 36struct intel_ring_buffer {
40 const char *name; 37 const char *name;
@@ -164,7 +161,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
164#define I915_BREADCRUMB_INDEX 0x21 161#define I915_BREADCRUMB_INDEX 0x21
165 162
166void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 163void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
164
167int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 165int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
166static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
167{
168 return intel_wait_ring_buffer(ring, ring->space - 8);
169}
170
168int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 171int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
169 172
170static inline void intel_ring_emit(struct intel_ring_buffer *ring, 173static inline void intel_ring_emit(struct intel_ring_buffer *ring,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4324f33212d6..754086f83941 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2544,21 +2544,19 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2544 if (!intel_sdvo) 2544 if (!intel_sdvo)
2545 return false; 2545 return false;
2546 2546
2547 intel_sdvo->sdvo_reg = sdvo_reg;
2548 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2549 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2547 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2550 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2548 kfree(intel_sdvo); 2551 kfree(intel_sdvo);
2549 return false; 2552 return false;
2550 } 2553 }
2551 2554
2552 intel_sdvo->sdvo_reg = sdvo_reg; 2555 /* encoder type will be decided later */
2553
2554 intel_encoder = &intel_sdvo->base; 2556 intel_encoder = &intel_sdvo->base;
2555 intel_encoder->type = INTEL_OUTPUT_SDVO; 2557 intel_encoder->type = INTEL_OUTPUT_SDVO;
2556 /* encoder type will be decided later */
2557 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); 2558 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
2558 2559
2559 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2560 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2561
2562 /* Read the regs to test if we can talk to the device */ 2560 /* Read the regs to test if we can talk to the device */
2563 for (i = 0; i < 0x40; i++) { 2561 for (i = 0; i < 0x40; i++) {
2564 u8 byte; 2562 u8 byte;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6b22c1dcc015..113e4e7264cd 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1361,15 +1361,14 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1361 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1361 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1362 type = intel_tv_detect_type(intel_tv, connector); 1362 type = intel_tv_detect_type(intel_tv, connector);
1363 } else if (force) { 1363 } else if (force) {
1364 struct drm_crtc *crtc; 1364 struct intel_load_detect_pipe tmp;
1365 int dpms_mode;
1366 1365
1367 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1366 if (intel_get_load_detect_pipe(&intel_tv->base, connector,
1368 &mode, &dpms_mode); 1367 &mode, &tmp)) {
1369 if (crtc) {
1370 type = intel_tv_detect_type(intel_tv, connector); 1368 type = intel_tv_detect_type(intel_tv, connector);
1371 intel_release_load_detect_pipe(&intel_tv->base, connector, 1369 intel_release_load_detect_pipe(&intel_tv->base,
1372 dpms_mode); 1370 connector,
1371 &tmp);
1373 } else 1372 } else
1374 return connector_status_unknown; 1373 return connector_status_unknown;
1375 } else 1374 } else
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index de70959b9ed5..ca1639918f57 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,6 +11,8 @@ config DRM_NOUVEAU
11 select FRAMEBUFFER_CONSOLE if !EXPERT 11 select FRAMEBUFFER_CONSOLE if !EXPERT
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 select ACPI_WMI if ACPI
15 select MXM_WMI if ACPI
14 help 16 help
15 Choose this option for open-source nVidia support. 17 Choose this option for open-source nVidia support.
16 18
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e12c97fd8db8..0583677e4581 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -20,6 +20,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
20 nv40_graph.o nv50_graph.o nvc0_graph.o \ 20 nv40_graph.o nv50_graph.o nvc0_graph.o \
21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
22 nv84_crypt.o \ 22 nv84_crypt.o \
23 nva3_copy.o nvc0_copy.o \
24 nv40_mpeg.o nv50_mpeg.o \
23 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 25 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
24 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ 26 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
25 nv50_cursor.o nv50_display.o \ 27 nv50_cursor.o nv50_display.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index a54238058dc5..f0d459bb46e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -4,6 +4,8 @@
4#include <acpi/acpi_drivers.h> 4#include <acpi/acpi_drivers.h>
5#include <acpi/acpi_bus.h> 5#include <acpi/acpi_bus.h>
6#include <acpi/video.h> 6#include <acpi/video.h>
7#include <acpi/acpi.h>
8#include <linux/mxm-wmi.h>
7 9
8#include "drmP.h" 10#include "drmP.h"
9#include "drm.h" 11#include "drm.h"
@@ -35,15 +37,71 @@
35 37
36static struct nouveau_dsm_priv { 38static struct nouveau_dsm_priv {
37 bool dsm_detected; 39 bool dsm_detected;
40 bool optimus_detected;
38 acpi_handle dhandle; 41 acpi_handle dhandle;
39 acpi_handle rom_handle; 42 acpi_handle rom_handle;
40} nouveau_dsm_priv; 43} nouveau_dsm_priv;
41 44
45#define NOUVEAU_DSM_HAS_MUX 0x1
46#define NOUVEAU_DSM_HAS_OPT 0x2
47
42static const char nouveau_dsm_muid[] = { 48static const char nouveau_dsm_muid[] = {
43 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 49 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
44 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, 50 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
45}; 51};
46 52
53static const char nouveau_op_dsm_muid[] = {
54 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
55 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
56};
57
58static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
59{
60 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
61 struct acpi_object_list input;
62 union acpi_object params[4];
63 union acpi_object *obj;
64 int err;
65
66 input.count = 4;
67 input.pointer = params;
68 params[0].type = ACPI_TYPE_BUFFER;
69 params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
70 params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
71 params[1].type = ACPI_TYPE_INTEGER;
72 params[1].integer.value = 0x00000100;
73 params[2].type = ACPI_TYPE_INTEGER;
74 params[2].integer.value = func;
75 params[3].type = ACPI_TYPE_BUFFER;
76 params[3].buffer.length = 0;
77
78 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
79 if (err) {
80 printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
81 return err;
82 }
83
84 obj = (union acpi_object *)output.pointer;
85
86 if (obj->type == ACPI_TYPE_INTEGER)
87 if (obj->integer.value == 0x80000002) {
88 return -ENODEV;
89 }
90
91 if (obj->type == ACPI_TYPE_BUFFER) {
92 if (obj->buffer.length == 4 && result) {
93 *result = 0;
94 *result |= obj->buffer.pointer[0];
95 *result |= (obj->buffer.pointer[1] << 8);
96 *result |= (obj->buffer.pointer[2] << 16);
97 *result |= (obj->buffer.pointer[3] << 24);
98 }
99 }
100
101 kfree(output.pointer);
102 return 0;
103}
104
47static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) 105static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
48{ 106{
49 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 107 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -92,6 +150,8 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
92 150
93static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) 151static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
94{ 152{
153 mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
154 mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
95 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); 155 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
96} 156}
97 157
@@ -148,11 +208,11 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
148 .get_client_id = nouveau_dsm_get_client_id, 208 .get_client_id = nouveau_dsm_get_client_id,
149}; 209};
150 210
151static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) 211static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
152{ 212{
153 acpi_handle dhandle, nvidia_handle; 213 acpi_handle dhandle, nvidia_handle;
154 acpi_status status; 214 acpi_status status;
155 int ret; 215 int ret, retval = 0;
156 uint32_t result; 216 uint32_t result;
157 217
158 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 218 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -166,11 +226,17 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
166 226
167 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, 227 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
168 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); 228 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
169 if (ret < 0) 229 if (ret == 0)
170 return false; 230 retval |= NOUVEAU_DSM_HAS_MUX;
171 231
172 nouveau_dsm_priv.dhandle = dhandle; 232 ret = nouveau_optimus_dsm(dhandle, 0, 0, &result);
173 return true; 233 if (ret == 0)
234 retval |= NOUVEAU_DSM_HAS_OPT;
235
236 if (retval)
237 nouveau_dsm_priv.dhandle = dhandle;
238
239 return retval;
174} 240}
175 241
176static bool nouveau_dsm_detect(void) 242static bool nouveau_dsm_detect(void)
@@ -179,22 +245,42 @@ static bool nouveau_dsm_detect(void)
179 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 245 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
180 struct pci_dev *pdev = NULL; 246 struct pci_dev *pdev = NULL;
181 int has_dsm = 0; 247 int has_dsm = 0;
248 int has_optimus;
182 int vga_count = 0; 249 int vga_count = 0;
250 bool guid_valid;
251 int retval;
252 bool ret = false;
253
254 /* lookup the MXM GUID */
255 guid_valid = mxm_wmi_supported();
183 256
257 if (guid_valid)
258 printk("MXM: GUID detected in BIOS\n");
259
260 /* now do DSM detection */
184 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 261 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
185 vga_count++; 262 vga_count++;
186 263
187 has_dsm |= (nouveau_dsm_pci_probe(pdev) == true); 264 retval = nouveau_dsm_pci_probe(pdev);
265 printk("ret val is %d\n", retval);
266 if (retval & NOUVEAU_DSM_HAS_MUX)
267 has_dsm |= 1;
268 if (retval & NOUVEAU_DSM_HAS_OPT)
269 has_optimus = 1;
188 } 270 }
189 271
190 if (vga_count == 2 && has_dsm) { 272 if (vga_count == 2 && has_dsm && guid_valid) {
191 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); 273 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
192 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 274 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
193 acpi_method_name); 275 acpi_method_name);
194 nouveau_dsm_priv.dsm_detected = true; 276 nouveau_dsm_priv.dsm_detected = true;
195 return true; 277 ret = true;
196 } 278 }
197 return false; 279
280 if (has_optimus == 1)
281 nouveau_dsm_priv.optimus_detected = true;
282
283 return ret;
198} 284}
199 285
200void nouveau_register_dsm_handler(void) 286void nouveau_register_dsm_handler(void)
@@ -247,7 +333,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
247 acpi_status status; 333 acpi_status status;
248 acpi_handle dhandle, rom_handle; 334 acpi_handle dhandle, rom_handle;
249 335
250 if (!nouveau_dsm_priv.dsm_detected) 336 if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
251 return false; 337 return false;
252 338
253 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 339 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 90aef64b76f2..729d5fd7c88d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5049,11 +5049,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
5049 pll_lim->vco1.max_n = record[11]; 5049 pll_lim->vco1.max_n = record[11];
5050 pll_lim->min_p = record[12]; 5050 pll_lim->min_p = record[12];
5051 pll_lim->max_p = record[13]; 5051 pll_lim->max_p = record[13];
5052 /* where did this go to?? */ 5052 pll_lim->refclk = ROM16(entry[9]) * 1000;
5053 if ((entry[0] & 0xf0) == 0x80)
5054 pll_lim->refclk = 27000;
5055 else
5056 pll_lim->refclk = 100000;
5057 } 5053 }
5058 5054
5059 /* 5055 /*
@@ -6035,6 +6031,7 @@ parse_dcb_connector_table(struct nvbios *bios)
6035 case DCB_CONNECTOR_DVI_I: 6031 case DCB_CONNECTOR_DVI_I:
6036 case DCB_CONNECTOR_DVI_D: 6032 case DCB_CONNECTOR_DVI_D:
6037 case DCB_CONNECTOR_LVDS: 6033 case DCB_CONNECTOR_LVDS:
6034 case DCB_CONNECTOR_LVDS_SPWG:
6038 case DCB_CONNECTOR_DP: 6035 case DCB_CONNECTOR_DP:
6039 case DCB_CONNECTOR_eDP: 6036 case DCB_CONNECTOR_eDP:
6040 case DCB_CONNECTOR_HDMI_0: 6037 case DCB_CONNECTOR_HDMI_0:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 8a54fa7edf5c..050c314119df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -82,6 +82,7 @@ enum dcb_connector_type {
82 DCB_CONNECTOR_DVI_I = 0x30, 82 DCB_CONNECTOR_DVI_I = 0x30,
83 DCB_CONNECTOR_DVI_D = 0x31, 83 DCB_CONNECTOR_DVI_D = 0x31,
84 DCB_CONNECTOR_LVDS = 0x40, 84 DCB_CONNECTOR_LVDS = 0x40,
85 DCB_CONNECTOR_LVDS_SPWG = 0x41,
85 DCB_CONNECTOR_DP = 0x46, 86 DCB_CONNECTOR_DP = 0x46,
86 DCB_CONNECTOR_eDP = 0x47, 87 DCB_CONNECTOR_eDP = 0x47,
87 DCB_CONNECTOR_HDMI_0 = 0x60, 88 DCB_CONNECTOR_HDMI_0 = 0x60,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 4cea35c57d15..a7583a8ddb01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -268,9 +268,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
268 struct drm_device *dev = chan->dev; 268 struct drm_device *dev = chan->dev;
269 struct drm_nouveau_private *dev_priv = dev->dev_private; 269 struct drm_nouveau_private *dev_priv = dev->dev_private;
270 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 270 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
271 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
272 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
273 unsigned long flags; 271 unsigned long flags;
272 int i;
274 273
275 /* decrement the refcount, and we're done if there's still refs */ 274 /* decrement the refcount, and we're done if there's still refs */
276 if (likely(!atomic_dec_and_test(&chan->users))) { 275 if (likely(!atomic_dec_and_test(&chan->users))) {
@@ -294,19 +293,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
294 /* boot it off the hardware */ 293 /* boot it off the hardware */
295 pfifo->reassign(dev, false); 294 pfifo->reassign(dev, false);
296 295
297 /* We want to give pgraph a chance to idle and get rid of all
298 * potential errors. We need to do this without the context
299 * switch lock held, otherwise the irq handler is unable to
300 * process them.
301 */
302 if (pgraph->channel(dev) == chan)
303 nouveau_wait_for_idle(dev);
304
305 /* destroy the engine specific contexts */ 296 /* destroy the engine specific contexts */
306 pfifo->destroy_context(chan); 297 pfifo->destroy_context(chan);
307 pgraph->destroy_context(chan); 298 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
308 if (pcrypt->destroy_context) 299 if (chan->engctx[i])
309 pcrypt->destroy_context(chan); 300 dev_priv->eng[i]->context_del(chan, i);
301 }
310 302
311 pfifo->reassign(dev, true); 303 pfifo->reassign(dev, true);
312 304
@@ -414,7 +406,7 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
414 struct nouveau_channel *chan; 406 struct nouveau_channel *chan;
415 int ret; 407 int ret;
416 408
417 if (dev_priv->engine.graph.accel_blocked) 409 if (!dev_priv->eng[NVOBJ_ENGINE_GR])
418 return -ENODEV; 410 return -ENODEV;
419 411
420 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 412 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7ae151109a66..1595d0b6e815 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -442,7 +442,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
442 } 442 }
443 443
444 /* LVDS always needs gpu scaling */ 444 /* LVDS always needs gpu scaling */
445 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS && 445 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
446 value == DRM_MODE_SCALE_NONE) 446 value == DRM_MODE_SCALE_NONE)
447 return -EINVAL; 447 return -EINVAL;
448 448
@@ -650,6 +650,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
650 ret = get_slave_funcs(encoder)->get_modes(encoder, connector); 650 ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
651 651
652 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || 652 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
653 nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
653 nv_connector->dcb->type == DCB_CONNECTOR_eDP) 654 nv_connector->dcb->type == DCB_CONNECTOR_eDP)
654 ret += nouveau_connector_scaler_modes_add(connector); 655 ret += nouveau_connector_scaler_modes_add(connector);
655 656
@@ -810,6 +811,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
810 type = DRM_MODE_CONNECTOR_HDMIA; 811 type = DRM_MODE_CONNECTOR_HDMIA;
811 break; 812 break;
812 case DCB_CONNECTOR_LVDS: 813 case DCB_CONNECTOR_LVDS:
814 case DCB_CONNECTOR_LVDS_SPWG:
813 type = DRM_MODE_CONNECTOR_LVDS; 815 type = DRM_MODE_CONNECTOR_LVDS;
814 funcs = &nouveau_connector_funcs_lvds; 816 funcs = &nouveau_connector_funcs_lvds;
815 break; 817 break;
@@ -838,7 +840,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
838 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 840 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
839 841
840 /* Check if we need dithering enabled */ 842 /* Check if we need dithering enabled */
841 if (dcb->type == DCB_CONNECTOR_LVDS) { 843 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
842 bool dummy, is_24bit = false; 844 bool dummy, is_24bit = false;
843 845
844 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); 846 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
@@ -883,7 +885,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
883 nv_connector->use_dithering ? 885 nv_connector->use_dithering ?
884 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 886 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
885 887
886 if (dcb->type != DCB_CONNECTOR_LVDS) { 888 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
887 if (dev_priv->card_type >= NV_50) 889 if (dev_priv->card_type >= NV_50)
888 connector->polled = DRM_CONNECTOR_POLL_HPD; 890 connector->polled = DRM_CONNECTOR_POLL_HPD;
889 else 891 else
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 764c15d537ba..eb514ea29377 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -276,7 +276,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
276 struct nouveau_fence *fence; 276 struct nouveau_fence *fence;
277 int ret; 277 int ret;
278 278
279 if (dev_priv->engine.graph.accel_blocked) 279 if (!dev_priv->channel)
280 return -ENODEV; 280 return -ENODEV;
281 281
282 s = kzalloc(sizeof(*s), GFP_KERNEL); 282 s = kzalloc(sizeof(*s), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 155ebdcbf06f..02c6f37d8bd7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -162,11 +162,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
162 struct drm_device *dev = pci_get_drvdata(pdev); 162 struct drm_device *dev = pci_get_drvdata(pdev);
163 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct drm_nouveau_private *dev_priv = dev->dev_private;
164 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 164 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
165 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
166 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 165 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
167 struct nouveau_channel *chan; 166 struct nouveau_channel *chan;
168 struct drm_crtc *crtc; 167 struct drm_crtc *crtc;
169 int ret, i; 168 int ret, i, e;
170 169
171 if (pm_state.event == PM_EVENT_PRETHAW) 170 if (pm_state.event == PM_EVENT_PRETHAW)
172 return 0; 171 return 0;
@@ -206,12 +205,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
206 nouveau_channel_idle(chan); 205 nouveau_channel_idle(chan);
207 } 206 }
208 207
209 pgraph->fifo_access(dev, false);
210 nouveau_wait_for_idle(dev);
211 pfifo->reassign(dev, false); 208 pfifo->reassign(dev, false);
212 pfifo->disable(dev); 209 pfifo->disable(dev);
213 pfifo->unload_context(dev); 210 pfifo->unload_context(dev);
214 pgraph->unload_context(dev); 211
212 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
213 if (dev_priv->eng[e]) {
214 ret = dev_priv->eng[e]->fini(dev, e);
215 if (ret)
216 goto out_abort;
217 }
218 }
215 219
216 ret = pinstmem->suspend(dev); 220 ret = pinstmem->suspend(dev);
217 if (ret) { 221 if (ret) {
@@ -242,9 +246,12 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
242 246
243out_abort: 247out_abort:
244 NV_INFO(dev, "Re-enabling acceleration..\n"); 248 NV_INFO(dev, "Re-enabling acceleration..\n");
249 for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
250 if (dev_priv->eng[e])
251 dev_priv->eng[e]->init(dev, e);
252 }
245 pfifo->enable(dev); 253 pfifo->enable(dev);
246 pfifo->reassign(dev, true); 254 pfifo->reassign(dev, true);
247 pgraph->fifo_access(dev, true);
248 return ret; 255 return ret;
249} 256}
250 257
@@ -299,8 +306,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
299 engine->mc.init(dev); 306 engine->mc.init(dev);
300 engine->timer.init(dev); 307 engine->timer.init(dev);
301 engine->fb.init(dev); 308 engine->fb.init(dev);
302 engine->graph.init(dev); 309 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
303 engine->crypt.init(dev); 310 if (dev_priv->eng[i])
311 dev_priv->eng[i]->init(dev, i);
312 }
304 engine->fifo.init(dev); 313 engine->fifo.init(dev);
305 314
306 nouveau_irq_postinstall(dev); 315 nouveau_irq_postinstall(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index a76514a209b3..9c56331941e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -150,13 +150,12 @@ enum nouveau_flags {
150 150
151#define NVOBJ_ENGINE_SW 0 151#define NVOBJ_ENGINE_SW 0
152#define NVOBJ_ENGINE_GR 1 152#define NVOBJ_ENGINE_GR 1
153#define NVOBJ_ENGINE_PPP 2 153#define NVOBJ_ENGINE_CRYPT 2
154#define NVOBJ_ENGINE_COPY 3 154#define NVOBJ_ENGINE_COPY0 3
155#define NVOBJ_ENGINE_VP 4 155#define NVOBJ_ENGINE_COPY1 4
156#define NVOBJ_ENGINE_CRYPT 5 156#define NVOBJ_ENGINE_MPEG 5
157#define NVOBJ_ENGINE_BSP 6 157#define NVOBJ_ENGINE_DISPLAY 15
158#define NVOBJ_ENGINE_DISPLAY 0xcafe0001 158#define NVOBJ_ENGINE_NR 16
159#define NVOBJ_ENGINE_INT 0xdeadbeef
160 159
161#define NVOBJ_FLAG_DONT_MAP (1 << 0) 160#define NVOBJ_FLAG_DONT_MAP (1 << 0)
162#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 161#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -245,11 +244,8 @@ struct nouveau_channel {
245 struct nouveau_gpuobj *cache; 244 struct nouveau_gpuobj *cache;
246 void *fifo_priv; 245 void *fifo_priv;
247 246
248 /* PGRAPH context */ 247 /* Execution engine contexts */
249 /* XXX may be merge 2 pointers as private data ??? */ 248 void *engctx[NVOBJ_ENGINE_NR];
250 struct nouveau_gpuobj *ramin_grctx;
251 struct nouveau_gpuobj *crypt_ctx;
252 void *pgraph_ctx;
253 249
254 /* NV50 VM */ 250 /* NV50 VM */
255 struct nouveau_vm *vm; 251 struct nouveau_vm *vm;
@@ -298,6 +294,18 @@ struct nouveau_channel {
298 } debugfs; 294 } debugfs;
299}; 295};
300 296
297struct nouveau_exec_engine {
298 void (*destroy)(struct drm_device *, int engine);
299 int (*init)(struct drm_device *, int engine);
300 int (*fini)(struct drm_device *, int engine);
301 int (*context_new)(struct nouveau_channel *, int engine);
302 void (*context_del)(struct nouveau_channel *, int engine);
303 int (*object_new)(struct nouveau_channel *, int engine,
304 u32 handle, u16 class);
305 void (*set_tile_region)(struct drm_device *dev, int i);
306 void (*tlb_flush)(struct drm_device *, int engine);
307};
308
301struct nouveau_instmem_engine { 309struct nouveau_instmem_engine {
302 void *priv; 310 void *priv;
303 311
@@ -364,30 +372,6 @@ struct nouveau_fifo_engine {
364 void (*tlb_flush)(struct drm_device *dev); 372 void (*tlb_flush)(struct drm_device *dev);
365}; 373};
366 374
367struct nouveau_pgraph_engine {
368 bool accel_blocked;
369 bool registered;
370 int grctx_size;
371 void *priv;
372
373 /* NV2x/NV3x context table (0x400780) */
374 struct nouveau_gpuobj *ctx_table;
375
376 int (*init)(struct drm_device *);
377 void (*takedown)(struct drm_device *);
378
379 void (*fifo_access)(struct drm_device *, bool);
380
381 struct nouveau_channel *(*channel)(struct drm_device *);
382 int (*create_context)(struct nouveau_channel *);
383 void (*destroy_context)(struct nouveau_channel *);
384 int (*load_context)(struct nouveau_channel *);
385 int (*unload_context)(struct drm_device *);
386 void (*tlb_flush)(struct drm_device *dev);
387
388 void (*set_tile_region)(struct drm_device *dev, int i);
389};
390
391struct nouveau_display_engine { 375struct nouveau_display_engine {
392 void *priv; 376 void *priv;
393 int (*early_init)(struct drm_device *); 377 int (*early_init)(struct drm_device *);
@@ -426,6 +410,19 @@ struct nouveau_pm_voltage {
426 int nr_level; 410 int nr_level;
427}; 411};
428 412
413struct nouveau_pm_memtiming {
414 int id;
415 u32 reg_100220;
416 u32 reg_100224;
417 u32 reg_100228;
418 u32 reg_10022c;
419 u32 reg_100230;
420 u32 reg_100234;
421 u32 reg_100238;
422 u32 reg_10023c;
423 u32 reg_100240;
424};
425
429#define NOUVEAU_PM_MAX_LEVEL 8 426#define NOUVEAU_PM_MAX_LEVEL 8
430struct nouveau_pm_level { 427struct nouveau_pm_level {
431 struct device_attribute dev_attr; 428 struct device_attribute dev_attr;
@@ -436,11 +433,13 @@ struct nouveau_pm_level {
436 u32 memory; 433 u32 memory;
437 u32 shader; 434 u32 shader;
438 u32 unk05; 435 u32 unk05;
436 u32 unk0a;
439 437
440 u8 voltage; 438 u8 voltage;
441 u8 fanspeed; 439 u8 fanspeed;
442 440
443 u16 memscript; 441 u16 memscript;
442 struct nouveau_pm_memtiming *timing;
444}; 443};
445 444
446struct nouveau_pm_temp_sensor_constants { 445struct nouveau_pm_temp_sensor_constants {
@@ -457,17 +456,6 @@ struct nouveau_pm_threshold_temp {
457 s16 fan_boost; 456 s16 fan_boost;
458}; 457};
459 458
460struct nouveau_pm_memtiming {
461 u32 reg_100220;
462 u32 reg_100224;
463 u32 reg_100228;
464 u32 reg_10022c;
465 u32 reg_100230;
466 u32 reg_100234;
467 u32 reg_100238;
468 u32 reg_10023c;
469};
470
471struct nouveau_pm_memtimings { 459struct nouveau_pm_memtimings {
472 bool supported; 460 bool supported;
473 struct nouveau_pm_memtiming *timing; 461 struct nouveau_pm_memtiming *timing;
@@ -499,16 +487,6 @@ struct nouveau_pm_engine {
499 int (*temp_get)(struct drm_device *); 487 int (*temp_get)(struct drm_device *);
500}; 488};
501 489
502struct nouveau_crypt_engine {
503 bool registered;
504
505 int (*init)(struct drm_device *);
506 void (*takedown)(struct drm_device *);
507 int (*create_context)(struct nouveau_channel *);
508 void (*destroy_context)(struct nouveau_channel *);
509 void (*tlb_flush)(struct drm_device *dev);
510};
511
512struct nouveau_vram_engine { 490struct nouveau_vram_engine {
513 int (*init)(struct drm_device *); 491 int (*init)(struct drm_device *);
514 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, 492 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
@@ -523,12 +501,10 @@ struct nouveau_engine {
523 struct nouveau_mc_engine mc; 501 struct nouveau_mc_engine mc;
524 struct nouveau_timer_engine timer; 502 struct nouveau_timer_engine timer;
525 struct nouveau_fb_engine fb; 503 struct nouveau_fb_engine fb;
526 struct nouveau_pgraph_engine graph;
527 struct nouveau_fifo_engine fifo; 504 struct nouveau_fifo_engine fifo;
528 struct nouveau_display_engine display; 505 struct nouveau_display_engine display;
529 struct nouveau_gpio_engine gpio; 506 struct nouveau_gpio_engine gpio;
530 struct nouveau_pm_engine pm; 507 struct nouveau_pm_engine pm;
531 struct nouveau_crypt_engine crypt;
532 struct nouveau_vram_engine vram; 508 struct nouveau_vram_engine vram;
533}; 509};
534 510
@@ -637,6 +613,7 @@ struct drm_nouveau_private {
637 enum nouveau_card_type card_type; 613 enum nouveau_card_type card_type;
638 /* exact chipset, derived from NV_PMC_BOOT_0 */ 614 /* exact chipset, derived from NV_PMC_BOOT_0 */
639 int chipset; 615 int chipset;
616 int stepping;
640 int flags; 617 int flags;
641 618
642 void __iomem *mmio; 619 void __iomem *mmio;
@@ -647,6 +624,7 @@ struct drm_nouveau_private {
647 u32 ramin_base; 624 u32 ramin_base;
648 bool ramin_available; 625 bool ramin_available;
649 struct drm_mm ramin_heap; 626 struct drm_mm ramin_heap;
627 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
650 struct list_head gpuobj_list; 628 struct list_head gpuobj_list;
651 struct list_head classes; 629 struct list_head classes;
652 630
@@ -745,10 +723,6 @@ struct drm_nouveau_private {
745 uint32_t crtc_owner; 723 uint32_t crtc_owner;
746 uint32_t dac_users[4]; 724 uint32_t dac_users[4];
747 725
748 struct nouveau_suspend_resume {
749 uint32_t *ramin_copy;
750 } susres;
751
752 struct backlight_device *backlight; 726 struct backlight_device *backlight;
753 727
754 struct { 728 struct {
@@ -757,8 +731,6 @@ struct drm_nouveau_private {
757 731
758 struct nouveau_fbdev *nfbdev; 732 struct nouveau_fbdev *nfbdev;
759 struct apertures_struct *apertures; 733 struct apertures_struct *apertures;
760
761 bool powered_down;
762}; 734};
763 735
764static inline struct drm_nouveau_private * 736static inline struct drm_nouveau_private *
@@ -883,17 +855,27 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan,
883extern void nouveau_channel_idle(struct nouveau_channel *chan); 855extern void nouveau_channel_idle(struct nouveau_channel *chan);
884 856
885/* nouveau_object.c */ 857/* nouveau_object.c */
886#define NVOBJ_CLASS(d,c,e) do { \ 858#define NVOBJ_ENGINE_ADD(d, e, p) do { \
859 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
860 dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
861} while (0)
862
863#define NVOBJ_ENGINE_DEL(d, e) do { \
864 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
865 dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \
866} while (0)
867
868#define NVOBJ_CLASS(d, c, e) do { \
887 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ 869 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
888 if (ret) \ 870 if (ret) \
889 return ret; \ 871 return ret; \
890} while(0) 872} while (0)
891 873
892#define NVOBJ_MTHD(d,c,m,e) do { \ 874#define NVOBJ_MTHD(d, c, m, e) do { \
893 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ 875 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
894 if (ret) \ 876 if (ret) \
895 return ret; \ 877 return ret; \
896} while(0) 878} while (0)
897 879
898extern int nouveau_gpuobj_early_init(struct drm_device *); 880extern int nouveau_gpuobj_early_init(struct drm_device *);
899extern int nouveau_gpuobj_init(struct drm_device *); 881extern int nouveau_gpuobj_init(struct drm_device *);
@@ -903,7 +885,7 @@ extern void nouveau_gpuobj_resume(struct drm_device *dev);
903extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); 885extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
904extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, 886extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
905 int (*exec)(struct nouveau_channel *, 887 int (*exec)(struct nouveau_channel *,
906 u32 class, u32 mthd, u32 data)); 888 u32 class, u32 mthd, u32 data));
907extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); 889extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
908extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); 890extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
909extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, 891extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
@@ -1137,81 +1119,50 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
1137extern int nvc0_fifo_unload_context(struct drm_device *); 1119extern int nvc0_fifo_unload_context(struct drm_device *);
1138 1120
1139/* nv04_graph.c */ 1121/* nv04_graph.c */
1140extern int nv04_graph_init(struct drm_device *); 1122extern int nv04_graph_create(struct drm_device *);
1141extern void nv04_graph_takedown(struct drm_device *);
1142extern void nv04_graph_fifo_access(struct drm_device *, bool); 1123extern void nv04_graph_fifo_access(struct drm_device *, bool);
1143extern struct nouveau_channel *nv04_graph_channel(struct drm_device *); 1124extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
1144extern int nv04_graph_create_context(struct nouveau_channel *);
1145extern void nv04_graph_destroy_context(struct nouveau_channel *);
1146extern int nv04_graph_load_context(struct nouveau_channel *);
1147extern int nv04_graph_unload_context(struct drm_device *);
1148extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, 1125extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1149 u32 class, u32 mthd, u32 data); 1126 u32 class, u32 mthd, u32 data);
1150extern struct nouveau_bitfield nv04_graph_nsource[]; 1127extern struct nouveau_bitfield nv04_graph_nsource[];
1151 1128
1152/* nv10_graph.c */ 1129/* nv10_graph.c */
1153extern int nv10_graph_init(struct drm_device *); 1130extern int nv10_graph_create(struct drm_device *);
1154extern void nv10_graph_takedown(struct drm_device *);
1155extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); 1131extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
1156extern int nv10_graph_create_context(struct nouveau_channel *);
1157extern void nv10_graph_destroy_context(struct nouveau_channel *);
1158extern int nv10_graph_load_context(struct nouveau_channel *);
1159extern int nv10_graph_unload_context(struct drm_device *);
1160extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
1161extern struct nouveau_bitfield nv10_graph_intr[]; 1132extern struct nouveau_bitfield nv10_graph_intr[];
1162extern struct nouveau_bitfield nv10_graph_nstatus[]; 1133extern struct nouveau_bitfield nv10_graph_nstatus[];
1163 1134
1164/* nv20_graph.c */ 1135/* nv20_graph.c */
1165extern int nv20_graph_create_context(struct nouveau_channel *); 1136extern int nv20_graph_create(struct drm_device *);
1166extern void nv20_graph_destroy_context(struct nouveau_channel *);
1167extern int nv20_graph_load_context(struct nouveau_channel *);
1168extern int nv20_graph_unload_context(struct drm_device *);
1169extern int nv20_graph_init(struct drm_device *);
1170extern void nv20_graph_takedown(struct drm_device *);
1171extern int nv30_graph_init(struct drm_device *);
1172extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
1173 1137
1174/* nv40_graph.c */ 1138/* nv40_graph.c */
1175extern int nv40_graph_init(struct drm_device *); 1139extern int nv40_graph_create(struct drm_device *);
1176extern void nv40_graph_takedown(struct drm_device *);
1177extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
1178extern int nv40_graph_create_context(struct nouveau_channel *);
1179extern void nv40_graph_destroy_context(struct nouveau_channel *);
1180extern int nv40_graph_load_context(struct nouveau_channel *);
1181extern int nv40_graph_unload_context(struct drm_device *);
1182extern void nv40_grctx_init(struct nouveau_grctx *); 1140extern void nv40_grctx_init(struct nouveau_grctx *);
1183extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
1184 1141
1185/* nv50_graph.c */ 1142/* nv50_graph.c */
1186extern int nv50_graph_init(struct drm_device *); 1143extern int nv50_graph_create(struct drm_device *);
1187extern void nv50_graph_takedown(struct drm_device *);
1188extern void nv50_graph_fifo_access(struct drm_device *, bool);
1189extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
1190extern int nv50_graph_create_context(struct nouveau_channel *);
1191extern void nv50_graph_destroy_context(struct nouveau_channel *);
1192extern int nv50_graph_load_context(struct nouveau_channel *);
1193extern int nv50_graph_unload_context(struct drm_device *);
1194extern int nv50_grctx_init(struct nouveau_grctx *); 1144extern int nv50_grctx_init(struct nouveau_grctx *);
1195extern void nv50_graph_tlb_flush(struct drm_device *dev);
1196extern void nv84_graph_tlb_flush(struct drm_device *dev);
1197extern struct nouveau_enum nv50_data_error_names[]; 1145extern struct nouveau_enum nv50_data_error_names[];
1146extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
1198 1147
1199/* nvc0_graph.c */ 1148/* nvc0_graph.c */
1200extern int nvc0_graph_init(struct drm_device *); 1149extern int nvc0_graph_create(struct drm_device *);
1201extern void nvc0_graph_takedown(struct drm_device *); 1150extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1202extern void nvc0_graph_fifo_access(struct drm_device *, bool);
1203extern struct nouveau_channel *nvc0_graph_channel(struct drm_device *);
1204extern int nvc0_graph_create_context(struct nouveau_channel *);
1205extern void nvc0_graph_destroy_context(struct nouveau_channel *);
1206extern int nvc0_graph_load_context(struct nouveau_channel *);
1207extern int nvc0_graph_unload_context(struct drm_device *);
1208 1151
1209/* nv84_crypt.c */ 1152/* nv84_crypt.c */
1210extern int nv84_crypt_init(struct drm_device *dev); 1153extern int nv84_crypt_create(struct drm_device *);
1211extern void nv84_crypt_fini(struct drm_device *dev); 1154
1212extern int nv84_crypt_create_context(struct nouveau_channel *); 1155/* nva3_copy.c */
1213extern void nv84_crypt_destroy_context(struct nouveau_channel *); 1156extern int nva3_copy_create(struct drm_device *dev);
1214extern void nv84_crypt_tlb_flush(struct drm_device *dev); 1157
1158/* nvc0_copy.c */
1159extern int nvc0_copy_create(struct drm_device *dev, int engine);
1160
1161/* nv40_mpeg.c */
1162extern int nv40_mpeg_create(struct drm_device *dev);
1163
1164/* nv50_mpeg.c */
1165extern int nv50_mpeg_create(struct drm_device *dev);
1215 1166
1216/* nv04_instmem.c */ 1167/* nv04_instmem.c */
1217extern int nv04_instmem_init(struct drm_device *); 1168extern int nv04_instmem_init(struct drm_device *);
@@ -1402,8 +1353,8 @@ bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
1402/* nv50_calc. */ 1353/* nv50_calc. */
1403int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, 1354int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
1404 int *N1, int *M1, int *N2, int *M2, int *P); 1355 int *N1, int *M1, int *N2, int *M2, int *P);
1405int nv50_calc_pll2(struct drm_device *, struct pll_lims *, 1356int nva3_calc_pll(struct drm_device *, struct pll_lims *,
1406 int clk, int *N, int *fN, int *M, int *P); 1357 int clk, int *N, int *fN, int *M, int *P);
1407 1358
1408#ifndef ioread32_native 1359#ifndef ioread32_native
1409#ifdef __BIG_ENDIAN 1360#ifdef __BIG_ENDIAN
@@ -1579,6 +1530,13 @@ nv_match_device(struct drm_device *dev, unsigned device,
1579 dev->pdev->subsystem_device == sub_device; 1530 dev->pdev->subsystem_device == sub_device;
1580} 1531}
1581 1532
1533static inline void *
1534nv_engine(struct drm_device *dev, int engine)
1535{
1536 struct drm_nouveau_private *dev_priv = dev->dev_private;
1537 return (void *)dev_priv->eng[engine];
1538}
1539
1582/* returns 1 if device is one of the nv4x using the 0x4497 object class, 1540/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1583 * helpful to determine a number of other hardware features 1541 * helpful to determine a number of other hardware features
1584 */ 1542 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 4a8ad1307fa4..86c2e374e938 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -87,10 +87,10 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | 87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
88 (state ? 0 : CP_BRA_IF_CLEAR)); 88 (state ? 0 : CP_BRA_IF_CLEAR));
89} 89}
90#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 90#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
91#ifdef CP_BRA_MOD 91#ifdef CP_BRA_MOD
92#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 92#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
93#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) 93#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
94#endif 94#endif
95 95
96static inline void 96static inline void
@@ -98,14 +98,14 @@ _cp_wait(struct nouveau_grctx *ctx, int flag, int state)
98{ 98{
99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); 99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
100} 100}
101#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 101#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
102 102
103static inline void 103static inline void
104_cp_set(struct nouveau_grctx *ctx, int flag, int state) 104_cp_set(struct nouveau_grctx *ctx, int flag, int state)
105{ 105{
106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); 106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
107} 107}
108#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 108#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
109 109
110static inline void 110static inline void
111cp_pos(struct nouveau_grctx *ctx, int offset) 111cp_pos(struct nouveau_grctx *ctx, int offset)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c3e953b08992..2960f583dc38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -51,8 +51,7 @@ nv10_mem_update_tile_region(struct drm_device *dev,
51 struct drm_nouveau_private *dev_priv = dev->dev_private; 51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 54 int i = tile - dev_priv->tile.reg, j;
55 int i = tile - dev_priv->tile.reg;
56 unsigned long save; 55 unsigned long save;
57 56
58 nouveau_fence_unref(&tile->fence); 57 nouveau_fence_unref(&tile->fence);
@@ -70,7 +69,10 @@ nv10_mem_update_tile_region(struct drm_device *dev,
70 nouveau_wait_for_idle(dev); 69 nouveau_wait_for_idle(dev);
71 70
72 pfb->set_tile_region(dev, i); 71 pfb->set_tile_region(dev, i);
73 pgraph->set_tile_region(dev, i); 72 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
73 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
74 dev_priv->eng[j]->set_tile_region(dev, i);
75 }
74 76
75 pfifo->cache_pull(dev, true); 77 pfifo->cache_pull(dev, true);
76 pfifo->reassign(dev, true); 78 pfifo->reassign(dev, true);
@@ -595,10 +597,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
595 if (!memtimings->timing) 597 if (!memtimings->timing)
596 return; 598 return;
597 599
598 /* Get "some number" from the timing reg for NV_40 600 /* Get "some number" from the timing reg for NV_40 and NV_50
599 * Used in calculations later */ 601 * Used in calculations later */
600 if(dev_priv->card_type == NV_40) { 602 if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) {
601 magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; 603 magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
602 } 604 }
603 605
604 entry = mem + mem[1]; 606 entry = mem + mem[1];
@@ -641,51 +643,68 @@ nouveau_mem_timing_init(struct drm_device *dev)
641 /* XXX: I don't trust the -1's and +1's... they must come 643 /* XXX: I don't trust the -1's and +1's... they must come
642 * from somewhere! */ 644 * from somewhere! */
643 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | 645 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
644 tUNK_18 << 16 | 646 max(tUNK_18, (u8) 1) << 16 |
645 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; 647 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
646 if(dev_priv->chipset == 0xa8) { 648 if (dev_priv->chipset == 0xa8) {
647 timing->reg_100224 |= (tUNK_2 - 1); 649 timing->reg_100224 |= (tUNK_2 - 1);
648 } else { 650 } else {
649 timing->reg_100224 |= (tUNK_2 + 2 - magic_number); 651 timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
650 } 652 }
651 653
652 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 654 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
653 if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { 655 if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa)
654 timing->reg_100228 |= (tUNK_19 - 1) << 24; 656 timing->reg_100228 |= (tUNK_19 - 1) << 24;
655 } 657 else
658 timing->reg_100228 |= magic_number << 24;
656 659
657 if(dev_priv->card_type == NV_40) { 660 if (dev_priv->card_type == NV_40) {
658 /* NV40: don't know what the rest of the regs are.. 661 /* NV40: don't know what the rest of the regs are..
659 * And don't need to know either */ 662 * And don't need to know either */
660 timing->reg_100228 |= 0x20200000 | magic_number << 24; 663 timing->reg_100228 |= 0x20200000;
661 } else if(dev_priv->card_type >= NV_50) { 664 } else if (dev_priv->card_type >= NV_50) {
662 /* XXX: reg_10022c */ 665 if (dev_priv->chipset < 0x98 ||
663 timing->reg_10022c = tUNK_2 - 1; 666 (dev_priv->chipset == 0x98 &&
667 dev_priv->stepping <= 0xa1)) {
668 timing->reg_10022c = (0x14 + tUNK_2) << 24 |
669 0x16 << 16 |
670 (tUNK_2 - 1) << 8 |
671 (tUNK_2 - 1);
672 } else {
673 /* XXX: reg_10022c for recentish cards */
674 timing->reg_10022c = tUNK_2 - 1;
675 }
664 676
665 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 677 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
666 tUNK_13 << 8 | tUNK_13); 678 tUNK_13 << 8 | tUNK_13);
667 679
668 timing->reg_100234 = (tRAS << 24 | tRC); 680 timing->reg_100234 = (tRAS << 24 | tRC);
669 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; 681 timing->reg_100234 += max(tUNK_10, tUNK_11) << 16;
670 682
671 if(dev_priv->chipset < 0xa3) { 683 if (dev_priv->chipset < 0x98 ||
684 (dev_priv->chipset == 0x98 &&
685 dev_priv->stepping <= 0xa1)) {
672 timing->reg_100234 |= (tUNK_2 + 2) << 8; 686 timing->reg_100234 |= (tUNK_2 + 2) << 8;
673 } else { 687 } else {
674 /* XXX: +6? */ 688 /* XXX: +6? */
675 timing->reg_100234 |= (tUNK_19 + 6) << 8; 689 timing->reg_100234 |= (tUNK_19 + 6) << 8;
676 } 690 }
677 691
678 /* XXX; reg_100238, reg_10023c 692 /* XXX; reg_100238
679 * reg_100238: 0x00?????? 693 * reg_100238: 0x00?????? */
680 * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
681 timing->reg_10023c = 0x202; 694 timing->reg_10023c = 0x202;
682 if(dev_priv->chipset < 0xa3) { 695 if (dev_priv->chipset < 0x98 ||
696 (dev_priv->chipset == 0x98 &&
697 dev_priv->stepping <= 0xa1)) {
683 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; 698 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
684 } else { 699 } else {
685 /* currently unknown 700 /* XXX: reg_10023c
701 * currently unknown
686 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ 702 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
687 } 703 }
704
705 /* XXX: reg_100240? */
688 } 706 }
707 timing->id = i;
689 708
690 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 709 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
691 timing->reg_100220, timing->reg_100224, 710 timing->reg_100220, timing->reg_100224,
@@ -693,10 +712,11 @@ nouveau_mem_timing_init(struct drm_device *dev)
693 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 712 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
694 timing->reg_100230, timing->reg_100234, 713 timing->reg_100230, timing->reg_100234,
695 timing->reg_100238, timing->reg_10023c); 714 timing->reg_100238, timing->reg_10023c);
715 NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240);
696 } 716 }
697 717
698 memtimings->nr_timing = entries; 718 memtimings->nr_timing = entries;
699 memtimings->supported = true; 719 memtimings->supported = (dev_priv->chipset <= 0x98);
700} 720}
701 721
702void 722void
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 67a16e01ffa6..8f97016f5b26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -361,20 +361,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
361 return 0; 361 return 0;
362} 362}
363 363
364
365static uint32_t
366nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
367{
368 struct drm_nouveau_private *dev_priv = dev->dev_private;
369
370 /*XXX: dodgy hack for now */
371 if (dev_priv->card_type >= NV_50)
372 return 24;
373 if (dev_priv->card_type >= NV_40)
374 return 32;
375 return 16;
376}
377
378/* 364/*
379 DMA objects are used to reference a piece of memory in the 365 DMA objects are used to reference a piece of memory in the
380 framebuffer, PCI or AGP address space. Each object is 16 bytes big 366 framebuffer, PCI or AGP address space. Each object is 16 bytes big
@@ -606,11 +592,11 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
606 set to 0? 592 set to 0?
607*/ 593*/
608static int 594static int
609nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 595nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
610 struct nouveau_gpuobj **gpuobj_ret)
611{ 596{
612 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 597 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
613 struct nouveau_gpuobj *gpuobj; 598 struct nouveau_gpuobj *gpuobj;
599 int ret;
614 600
615 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 601 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
616 if (!gpuobj) 602 if (!gpuobj)
@@ -624,8 +610,10 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
624 spin_lock(&dev_priv->ramin_lock); 610 spin_lock(&dev_priv->ramin_lock);
625 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 611 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
626 spin_unlock(&dev_priv->ramin_lock); 612 spin_unlock(&dev_priv->ramin_lock);
627 *gpuobj_ret = gpuobj; 613
628 return 0; 614 ret = nouveau_ramht_insert(chan, handle, gpuobj);
615 nouveau_gpuobj_ref(NULL, &gpuobj);
616 return ret;
629} 617}
630 618
631int 619int
@@ -634,101 +622,30 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
634 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 622 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
635 struct drm_device *dev = chan->dev; 623 struct drm_device *dev = chan->dev;
636 struct nouveau_gpuobj_class *oc; 624 struct nouveau_gpuobj_class *oc;
637 struct nouveau_gpuobj *gpuobj;
638 int ret; 625 int ret;
639 626
640 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); 627 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
641 628
642 list_for_each_entry(oc, &dev_priv->classes, head) { 629 list_for_each_entry(oc, &dev_priv->classes, head) {
643 if (oc->id == class) 630 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
644 goto found;
645 }
646
647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
648 return -EINVAL;
649 631
650found: 632 if (oc->id != class)
651 switch (oc->engine) { 633 continue;
652 case NVOBJ_ENGINE_SW:
653 if (dev_priv->card_type < NV_C0) {
654 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
655 if (ret)
656 return ret;
657 goto insert;
658 }
659 break;
660 case NVOBJ_ENGINE_GR:
661 if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
662 (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
663 struct nouveau_pgraph_engine *pgraph =
664 &dev_priv->engine.graph;
665 634
666 ret = pgraph->create_context(chan); 635 if (oc->engine == NVOBJ_ENGINE_SW)
667 if (ret) 636 return nouveau_gpuobj_sw_new(chan, handle, class);
668 return ret;
669 }
670 break;
671 case NVOBJ_ENGINE_CRYPT:
672 if (!chan->crypt_ctx) {
673 struct nouveau_crypt_engine *pcrypt =
674 &dev_priv->engine.crypt;
675 637
676 ret = pcrypt->create_context(chan); 638 if (!chan->engctx[oc->engine]) {
639 ret = eng->context_new(chan, oc->engine);
677 if (ret) 640 if (ret)
678 return ret; 641 return ret;
679 } 642 }
680 break;
681 }
682
683 /* we're done if this is fermi */
684 if (dev_priv->card_type >= NV_C0)
685 return 0;
686
687 ret = nouveau_gpuobj_new(dev, chan,
688 nouveau_gpuobj_class_instmem_size(dev, class),
689 16,
690 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
691 &gpuobj);
692 if (ret) {
693 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
694 return ret;
695 }
696 643
697 if (dev_priv->card_type >= NV_50) { 644 return eng->object_new(chan, oc->engine, handle, class);
698 nv_wo32(gpuobj, 0, class);
699 nv_wo32(gpuobj, 20, 0x00010000);
700 } else {
701 switch (class) {
702 case NV_CLASS_NULL:
703 nv_wo32(gpuobj, 0, 0x00001030);
704 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
705 break;
706 default:
707 if (dev_priv->card_type >= NV_40) {
708 nv_wo32(gpuobj, 0, class);
709#ifdef __BIG_ENDIAN
710 nv_wo32(gpuobj, 8, 0x01000000);
711#endif
712 } else {
713#ifdef __BIG_ENDIAN
714 nv_wo32(gpuobj, 0, class | 0x00080000);
715#else
716 nv_wo32(gpuobj, 0, class);
717#endif
718 }
719 }
720 } 645 }
721 dev_priv->engine.instmem.flush(dev);
722
723 gpuobj->engine = oc->engine;
724 gpuobj->class = oc->id;
725 646
726insert: 647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
727 ret = nouveau_ramht_insert(chan, handle, gpuobj); 648 return -EINVAL;
728 if (ret)
729 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
730 nouveau_gpuobj_ref(NULL, &gpuobj);
731 return ret;
732} 649}
733 650
734static int 651static int
@@ -746,9 +663,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
746 size = 0x2000; 663 size = 0x2000;
747 base = 0; 664 base = 0;
748 665
749 /* PGRAPH context */
750 size += dev_priv->engine.graph.grctx_size;
751
752 if (dev_priv->card_type == NV_50) { 666 if (dev_priv->card_type == NV_50) {
753 /* Various fixed table thingos */ 667 /* Various fixed table thingos */
754 size += 0x1400; /* mostly unknown stuff */ 668 size += 0x1400; /* mostly unknown stuff */
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 670e3cb697ec..922fb6b664ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -72,6 +72,68 @@ legacy_perf_init(struct drm_device *dev)
72 pm->nr_perflvl = 1; 72 pm->nr_perflvl = 1;
73} 73}
74 74
75static struct nouveau_pm_memtiming *
76nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
77 u16 memclk, u8 *entry, u8 recordlen, u8 entries)
78{
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
81 struct nvbios *bios = &dev_priv->vbios;
82 u8 ramcfg;
83 int i;
84
85 /* perf v2 has a separate "timing map" table, we have to match
86 * the target memory clock to a specific entry, *then* use
87 * ramcfg to select the correct subentry
88 */
89 if (P->version == 2) {
90 u8 *tmap = ROMPTR(bios, P->data[4]);
91 if (!tmap) {
92 NV_DEBUG(dev, "no timing map pointer\n");
93 return NULL;
94 }
95
96 if (tmap[0] != 0x10) {
97 NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]);
98 return NULL;
99 }
100
101 entry = tmap + tmap[1];
102 recordlen = tmap[2] + (tmap[4] * tmap[3]);
103 for (i = 0; i < tmap[5]; i++, entry += recordlen) {
104 if (memclk >= ROM16(entry[0]) &&
105 memclk <= ROM16(entry[2]))
106 break;
107 }
108
109 if (i == tmap[5]) {
110 NV_WARN(dev, "no match in timing map table\n");
111 return NULL;
112 }
113
114 entry += tmap[2];
115 recordlen = tmap[3];
116 entries = tmap[4];
117 }
118
119 ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
120 if (bios->ram_restrict_tbl_ptr)
121 ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg];
122
123 if (ramcfg >= entries) {
124 NV_WARN(dev, "ramcfg strap out of bounds!\n");
125 return NULL;
126 }
127
128 entry += ramcfg * recordlen;
129 if (entry[1] >= pm->memtimings.nr_timing) {
130 NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
131 return NULL;
132 }
133
134 return &pm->memtimings.timing[entry[1]];
135}
136
75void 137void
76nouveau_perf_init(struct drm_device *dev) 138nouveau_perf_init(struct drm_device *dev)
77{ 139{
@@ -124,6 +186,8 @@ nouveau_perf_init(struct drm_device *dev)
124 for (i = 0; i < entries; i++) { 186 for (i = 0; i < entries; i++) {
125 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 187 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
126 188
189 perflvl->timing = NULL;
190
127 if (entry[0] == 0xff) { 191 if (entry[0] == 0xff) {
128 entry += recordlen; 192 entry += recordlen;
129 continue; 193 continue;
@@ -174,9 +238,21 @@ nouveau_perf_init(struct drm_device *dev)
174#define subent(n) entry[perf[2] + ((n) * perf[3])] 238#define subent(n) entry[perf[2] + ((n) * perf[3])]
175 perflvl->fanspeed = 0; /*XXX*/ 239 perflvl->fanspeed = 0; /*XXX*/
176 perflvl->voltage = entry[2]; 240 perflvl->voltage = entry[2];
177 perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; 241 if (dev_priv->card_type == NV_50) {
178 perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; 242 perflvl->core = ROM16(subent(0)) & 0xfff;
179 perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; 243 perflvl->shader = ROM16(subent(1)) & 0xfff;
244 perflvl->memory = ROM16(subent(2)) & 0xfff;
245 } else {
246 perflvl->shader = ROM16(subent(3)) & 0xfff;
247 perflvl->core = perflvl->shader / 2;
248 perflvl->unk0a = ROM16(subent(4)) & 0xfff;
249 perflvl->memory = ROM16(subent(5)) & 0xfff;
250 }
251
252 perflvl->core *= 1000;
253 perflvl->shader *= 1000;
254 perflvl->memory *= 1000;
255 perflvl->unk0a *= 1000;
180 break; 256 break;
181 } 257 }
182 258
@@ -190,6 +266,16 @@ nouveau_perf_init(struct drm_device *dev)
190 } 266 }
191 } 267 }
192 268
269 /* get the corresponding memory timings */
270 if (version > 0x15) {
271 /* last 3 args are for < 0x40, ignored for >= 0x40 */
272 perflvl->timing =
273 nouveau_perf_timing(dev, &P,
274 perflvl->memory / 1000,
275 entry + perf[3],
276 perf[5], perf[4]);
277 }
278
193 snprintf(perflvl->name, sizeof(perflvl->name), 279 snprintf(perflvl->name, sizeof(perflvl->name),
194 "performance_level_%d", i); 280 "performance_level_%d", i);
195 perflvl->id = i; 281 perflvl->id = i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 4399e2f34db4..da8d994d5e8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -156,7 +156,7 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
156static void 156static void
157nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) 157nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
158{ 158{
159 char c[16], s[16], v[16], f[16]; 159 char c[16], s[16], v[16], f[16], t[16];
160 160
161 c[0] = '\0'; 161 c[0] = '\0';
162 if (perflvl->core) 162 if (perflvl->core)
@@ -174,8 +174,12 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
174 if (perflvl->fanspeed) 174 if (perflvl->fanspeed)
175 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); 175 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
176 176
177 snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000, 177 t[0] = '\0';
178 c, s, v, f); 178 if (perflvl->timing)
179 snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
180
181 snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000,
182 c, s, v, f, t);
179} 183}
180 184
181static ssize_t 185static ssize_t
@@ -449,7 +453,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
449#endif 453#endif
450} 454}
451 455
452#ifdef CONFIG_ACPI 456#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
453static int 457static int
454nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) 458nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
455{ 459{
@@ -476,10 +480,10 @@ nouveau_pm_init(struct drm_device *dev)
476 char info[256]; 480 char info[256];
477 int ret, i; 481 int ret, i;
478 482
483 nouveau_mem_timing_init(dev);
479 nouveau_volt_init(dev); 484 nouveau_volt_init(dev);
480 nouveau_perf_init(dev); 485 nouveau_perf_init(dev);
481 nouveau_temp_init(dev); 486 nouveau_temp_init(dev);
482 nouveau_mem_timing_init(dev);
483 487
484 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); 488 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
485 for (i = 0; i < pm->nr_perflvl; i++) { 489 for (i = 0; i < pm->nr_perflvl; i++) {
@@ -490,6 +494,7 @@ nouveau_pm_init(struct drm_device *dev)
490 /* determine current ("boot") performance level */ 494 /* determine current ("boot") performance level */
491 ret = nouveau_pm_perflvl_get(dev, &pm->boot); 495 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
492 if (ret == 0) { 496 if (ret == 0) {
497 strncpy(pm->boot.name, "boot", 4);
493 pm->cur = &pm->boot; 498 pm->cur = &pm->boot;
494 499
495 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); 500 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
@@ -507,7 +512,7 @@ nouveau_pm_init(struct drm_device *dev)
507 512
508 nouveau_sysfs_init(dev); 513 nouveau_sysfs_init(dev);
509 nouveau_hwmon_init(dev); 514 nouveau_hwmon_init(dev);
510#ifdef CONFIG_ACPI 515#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
511 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; 516 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
512 register_acpi_notifier(&pm->acpi_nb); 517 register_acpi_notifier(&pm->acpi_nb);
513#endif 518#endif
@@ -524,12 +529,12 @@ nouveau_pm_fini(struct drm_device *dev)
524 if (pm->cur != &pm->boot) 529 if (pm->cur != &pm->boot)
525 nouveau_pm_perflvl_set(dev, &pm->boot); 530 nouveau_pm_perflvl_set(dev, &pm->boot);
526 531
527 nouveau_mem_timing_fini(dev);
528 nouveau_temp_fini(dev); 532 nouveau_temp_fini(dev);
529 nouveau_perf_fini(dev); 533 nouveau_perf_fini(dev);
530 nouveau_volt_fini(dev); 534 nouveau_volt_fini(dev);
535 nouveau_mem_timing_fini(dev);
531 536
532#ifdef CONFIG_ACPI 537#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
533 unregister_acpi_notifier(&pm->acpi_nb); 538 unregister_acpi_notifier(&pm->acpi_nb);
534#endif 539#endif
535 nouveau_hwmon_fini(dev); 540 nouveau_hwmon_fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 04e8fb795269..f18cdfc3400f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -639,9 +639,9 @@
639# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240 639# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
640# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258 640# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
641 641
642#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) 642#define NV50_AUXCH_DATA_OUT(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
643#define NV50_AUXCH_DATA_OUT__SIZE 4 643#define NV50_AUXCH_DATA_OUT__SIZE 4
644#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) 644#define NV50_AUXCH_DATA_IN(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
645#define NV50_AUXCH_DATA_IN__SIZE 4 645#define NV50_AUXCH_DATA_IN__SIZE 4
646#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0) 646#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
647#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4) 647#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
@@ -829,7 +829,7 @@
829#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084 829#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
830#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 830#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
831#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff 831#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
832#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) 832#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
833#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 833#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
834#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 834#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
835#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 835#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
@@ -841,10 +841,10 @@
841#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000 841#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
842#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000 842#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
843#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 843#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
844#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) 844#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
845#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) 845#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
846#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) 846#define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
847#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) 847#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
848 848
849#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) 849#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
850#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000) 850#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 915fbce89595..38ea662568c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -65,14 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
65 engine->timer.takedown = nv04_timer_takedown; 65 engine->timer.takedown = nv04_timer_takedown;
66 engine->fb.init = nv04_fb_init; 66 engine->fb.init = nv04_fb_init;
67 engine->fb.takedown = nv04_fb_takedown; 67 engine->fb.takedown = nv04_fb_takedown;
68 engine->graph.init = nv04_graph_init;
69 engine->graph.takedown = nv04_graph_takedown;
70 engine->graph.fifo_access = nv04_graph_fifo_access;
71 engine->graph.channel = nv04_graph_channel;
72 engine->graph.create_context = nv04_graph_create_context;
73 engine->graph.destroy_context = nv04_graph_destroy_context;
74 engine->graph.load_context = nv04_graph_load_context;
75 engine->graph.unload_context = nv04_graph_unload_context;
76 engine->fifo.channels = 16; 68 engine->fifo.channels = 16;
77 engine->fifo.init = nv04_fifo_init; 69 engine->fifo.init = nv04_fifo_init;
78 engine->fifo.takedown = nv04_fifo_fini; 70 engine->fifo.takedown = nv04_fifo_fini;
@@ -98,8 +90,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
98 engine->pm.clock_get = nv04_pm_clock_get; 90 engine->pm.clock_get = nv04_pm_clock_get;
99 engine->pm.clock_pre = nv04_pm_clock_pre; 91 engine->pm.clock_pre = nv04_pm_clock_pre;
100 engine->pm.clock_set = nv04_pm_clock_set; 92 engine->pm.clock_set = nv04_pm_clock_set;
101 engine->crypt.init = nouveau_stub_init;
102 engine->crypt.takedown = nouveau_stub_takedown;
103 engine->vram.init = nouveau_mem_detect; 93 engine->vram.init = nouveau_mem_detect;
104 engine->vram.flags_valid = nouveau_mem_flags_valid; 94 engine->vram.flags_valid = nouveau_mem_flags_valid;
105 break; 95 break;
@@ -123,15 +113,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
123 engine->fb.init_tile_region = nv10_fb_init_tile_region; 113 engine->fb.init_tile_region = nv10_fb_init_tile_region;
124 engine->fb.set_tile_region = nv10_fb_set_tile_region; 114 engine->fb.set_tile_region = nv10_fb_set_tile_region;
125 engine->fb.free_tile_region = nv10_fb_free_tile_region; 115 engine->fb.free_tile_region = nv10_fb_free_tile_region;
126 engine->graph.init = nv10_graph_init;
127 engine->graph.takedown = nv10_graph_takedown;
128 engine->graph.channel = nv10_graph_channel;
129 engine->graph.create_context = nv10_graph_create_context;
130 engine->graph.destroy_context = nv10_graph_destroy_context;
131 engine->graph.fifo_access = nv04_graph_fifo_access;
132 engine->graph.load_context = nv10_graph_load_context;
133 engine->graph.unload_context = nv10_graph_unload_context;
134 engine->graph.set_tile_region = nv10_graph_set_tile_region;
135 engine->fifo.channels = 32; 116 engine->fifo.channels = 32;
136 engine->fifo.init = nv10_fifo_init; 117 engine->fifo.init = nv10_fifo_init;
137 engine->fifo.takedown = nv04_fifo_fini; 118 engine->fifo.takedown = nv04_fifo_fini;
@@ -157,8 +138,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
157 engine->pm.clock_get = nv04_pm_clock_get; 138 engine->pm.clock_get = nv04_pm_clock_get;
158 engine->pm.clock_pre = nv04_pm_clock_pre; 139 engine->pm.clock_pre = nv04_pm_clock_pre;
159 engine->pm.clock_set = nv04_pm_clock_set; 140 engine->pm.clock_set = nv04_pm_clock_set;
160 engine->crypt.init = nouveau_stub_init;
161 engine->crypt.takedown = nouveau_stub_takedown;
162 engine->vram.init = nouveau_mem_detect; 141 engine->vram.init = nouveau_mem_detect;
163 engine->vram.flags_valid = nouveau_mem_flags_valid; 142 engine->vram.flags_valid = nouveau_mem_flags_valid;
164 break; 143 break;
@@ -182,15 +161,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
182 engine->fb.init_tile_region = nv10_fb_init_tile_region; 161 engine->fb.init_tile_region = nv10_fb_init_tile_region;
183 engine->fb.set_tile_region = nv10_fb_set_tile_region; 162 engine->fb.set_tile_region = nv10_fb_set_tile_region;
184 engine->fb.free_tile_region = nv10_fb_free_tile_region; 163 engine->fb.free_tile_region = nv10_fb_free_tile_region;
185 engine->graph.init = nv20_graph_init;
186 engine->graph.takedown = nv20_graph_takedown;
187 engine->graph.channel = nv10_graph_channel;
188 engine->graph.create_context = nv20_graph_create_context;
189 engine->graph.destroy_context = nv20_graph_destroy_context;
190 engine->graph.fifo_access = nv04_graph_fifo_access;
191 engine->graph.load_context = nv20_graph_load_context;
192 engine->graph.unload_context = nv20_graph_unload_context;
193 engine->graph.set_tile_region = nv20_graph_set_tile_region;
194 engine->fifo.channels = 32; 164 engine->fifo.channels = 32;
195 engine->fifo.init = nv10_fifo_init; 165 engine->fifo.init = nv10_fifo_init;
196 engine->fifo.takedown = nv04_fifo_fini; 166 engine->fifo.takedown = nv04_fifo_fini;
@@ -216,8 +186,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
216 engine->pm.clock_get = nv04_pm_clock_get; 186 engine->pm.clock_get = nv04_pm_clock_get;
217 engine->pm.clock_pre = nv04_pm_clock_pre; 187 engine->pm.clock_pre = nv04_pm_clock_pre;
218 engine->pm.clock_set = nv04_pm_clock_set; 188 engine->pm.clock_set = nv04_pm_clock_set;
219 engine->crypt.init = nouveau_stub_init;
220 engine->crypt.takedown = nouveau_stub_takedown;
221 engine->vram.init = nouveau_mem_detect; 189 engine->vram.init = nouveau_mem_detect;
222 engine->vram.flags_valid = nouveau_mem_flags_valid; 190 engine->vram.flags_valid = nouveau_mem_flags_valid;
223 break; 191 break;
@@ -241,15 +209,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
241 engine->fb.init_tile_region = nv30_fb_init_tile_region; 209 engine->fb.init_tile_region = nv30_fb_init_tile_region;
242 engine->fb.set_tile_region = nv10_fb_set_tile_region; 210 engine->fb.set_tile_region = nv10_fb_set_tile_region;
243 engine->fb.free_tile_region = nv30_fb_free_tile_region; 211 engine->fb.free_tile_region = nv30_fb_free_tile_region;
244 engine->graph.init = nv30_graph_init;
245 engine->graph.takedown = nv20_graph_takedown;
246 engine->graph.fifo_access = nv04_graph_fifo_access;
247 engine->graph.channel = nv10_graph_channel;
248 engine->graph.create_context = nv20_graph_create_context;
249 engine->graph.destroy_context = nv20_graph_destroy_context;
250 engine->graph.load_context = nv20_graph_load_context;
251 engine->graph.unload_context = nv20_graph_unload_context;
252 engine->graph.set_tile_region = nv20_graph_set_tile_region;
253 engine->fifo.channels = 32; 212 engine->fifo.channels = 32;
254 engine->fifo.init = nv10_fifo_init; 213 engine->fifo.init = nv10_fifo_init;
255 engine->fifo.takedown = nv04_fifo_fini; 214 engine->fifo.takedown = nv04_fifo_fini;
@@ -277,8 +236,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
277 engine->pm.clock_set = nv04_pm_clock_set; 236 engine->pm.clock_set = nv04_pm_clock_set;
278 engine->pm.voltage_get = nouveau_voltage_gpio_get; 237 engine->pm.voltage_get = nouveau_voltage_gpio_get;
279 engine->pm.voltage_set = nouveau_voltage_gpio_set; 238 engine->pm.voltage_set = nouveau_voltage_gpio_set;
280 engine->crypt.init = nouveau_stub_init;
281 engine->crypt.takedown = nouveau_stub_takedown;
282 engine->vram.init = nouveau_mem_detect; 239 engine->vram.init = nouveau_mem_detect;
283 engine->vram.flags_valid = nouveau_mem_flags_valid; 240 engine->vram.flags_valid = nouveau_mem_flags_valid;
284 break; 241 break;
@@ -303,15 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
303 engine->fb.init_tile_region = nv30_fb_init_tile_region; 260 engine->fb.init_tile_region = nv30_fb_init_tile_region;
304 engine->fb.set_tile_region = nv40_fb_set_tile_region; 261 engine->fb.set_tile_region = nv40_fb_set_tile_region;
305 engine->fb.free_tile_region = nv30_fb_free_tile_region; 262 engine->fb.free_tile_region = nv30_fb_free_tile_region;
306 engine->graph.init = nv40_graph_init;
307 engine->graph.takedown = nv40_graph_takedown;
308 engine->graph.fifo_access = nv04_graph_fifo_access;
309 engine->graph.channel = nv40_graph_channel;
310 engine->graph.create_context = nv40_graph_create_context;
311 engine->graph.destroy_context = nv40_graph_destroy_context;
312 engine->graph.load_context = nv40_graph_load_context;
313 engine->graph.unload_context = nv40_graph_unload_context;
314 engine->graph.set_tile_region = nv40_graph_set_tile_region;
315 engine->fifo.channels = 32; 263 engine->fifo.channels = 32;
316 engine->fifo.init = nv40_fifo_init; 264 engine->fifo.init = nv40_fifo_init;
317 engine->fifo.takedown = nv04_fifo_fini; 265 engine->fifo.takedown = nv04_fifo_fini;
@@ -340,8 +288,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
340 engine->pm.voltage_get = nouveau_voltage_gpio_get; 288 engine->pm.voltage_get = nouveau_voltage_gpio_get;
341 engine->pm.voltage_set = nouveau_voltage_gpio_set; 289 engine->pm.voltage_set = nouveau_voltage_gpio_set;
342 engine->pm.temp_get = nv40_temp_get; 290 engine->pm.temp_get = nv40_temp_get;
343 engine->crypt.init = nouveau_stub_init;
344 engine->crypt.takedown = nouveau_stub_takedown;
345 engine->vram.init = nouveau_mem_detect; 291 engine->vram.init = nouveau_mem_detect;
346 engine->vram.flags_valid = nouveau_mem_flags_valid; 292 engine->vram.flags_valid = nouveau_mem_flags_valid;
347 break; 293 break;
@@ -368,19 +314,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
368 engine->timer.takedown = nv04_timer_takedown; 314 engine->timer.takedown = nv04_timer_takedown;
369 engine->fb.init = nv50_fb_init; 315 engine->fb.init = nv50_fb_init;
370 engine->fb.takedown = nv50_fb_takedown; 316 engine->fb.takedown = nv50_fb_takedown;
371 engine->graph.init = nv50_graph_init;
372 engine->graph.takedown = nv50_graph_takedown;
373 engine->graph.fifo_access = nv50_graph_fifo_access;
374 engine->graph.channel = nv50_graph_channel;
375 engine->graph.create_context = nv50_graph_create_context;
376 engine->graph.destroy_context = nv50_graph_destroy_context;
377 engine->graph.load_context = nv50_graph_load_context;
378 engine->graph.unload_context = nv50_graph_unload_context;
379 if (dev_priv->chipset == 0x50 ||
380 dev_priv->chipset == 0xac)
381 engine->graph.tlb_flush = nv50_graph_tlb_flush;
382 else
383 engine->graph.tlb_flush = nv84_graph_tlb_flush;
384 engine->fifo.channels = 128; 317 engine->fifo.channels = 128;
385 engine->fifo.init = nv50_fifo_init; 318 engine->fifo.init = nv50_fifo_init;
386 engine->fifo.takedown = nv50_fifo_takedown; 319 engine->fifo.takedown = nv50_fifo_takedown;
@@ -432,24 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
432 engine->pm.temp_get = nv84_temp_get; 365 engine->pm.temp_get = nv84_temp_get;
433 else 366 else
434 engine->pm.temp_get = nv40_temp_get; 367 engine->pm.temp_get = nv40_temp_get;
435 switch (dev_priv->chipset) {
436 case 0x84:
437 case 0x86:
438 case 0x92:
439 case 0x94:
440 case 0x96:
441 case 0xa0:
442 engine->crypt.init = nv84_crypt_init;
443 engine->crypt.takedown = nv84_crypt_fini;
444 engine->crypt.create_context = nv84_crypt_create_context;
445 engine->crypt.destroy_context = nv84_crypt_destroy_context;
446 engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
447 break;
448 default:
449 engine->crypt.init = nouveau_stub_init;
450 engine->crypt.takedown = nouveau_stub_takedown;
451 break;
452 }
453 engine->vram.init = nv50_vram_init; 368 engine->vram.init = nv50_vram_init;
454 engine->vram.get = nv50_vram_new; 369 engine->vram.get = nv50_vram_new;
455 engine->vram.put = nv50_vram_del; 370 engine->vram.put = nv50_vram_del;
@@ -472,14 +387,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
472 engine->timer.takedown = nv04_timer_takedown; 387 engine->timer.takedown = nv04_timer_takedown;
473 engine->fb.init = nvc0_fb_init; 388 engine->fb.init = nvc0_fb_init;
474 engine->fb.takedown = nvc0_fb_takedown; 389 engine->fb.takedown = nvc0_fb_takedown;
475 engine->graph.init = nvc0_graph_init;
476 engine->graph.takedown = nvc0_graph_takedown;
477 engine->graph.fifo_access = nvc0_graph_fifo_access;
478 engine->graph.channel = nvc0_graph_channel;
479 engine->graph.create_context = nvc0_graph_create_context;
480 engine->graph.destroy_context = nvc0_graph_destroy_context;
481 engine->graph.load_context = nvc0_graph_load_context;
482 engine->graph.unload_context = nvc0_graph_unload_context;
483 engine->fifo.channels = 128; 390 engine->fifo.channels = 128;
484 engine->fifo.init = nvc0_fifo_init; 391 engine->fifo.init = nvc0_fifo_init;
485 engine->fifo.takedown = nvc0_fifo_takedown; 392 engine->fifo.takedown = nvc0_fifo_takedown;
@@ -503,8 +410,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
503 engine->gpio.irq_register = nv50_gpio_irq_register; 410 engine->gpio.irq_register = nv50_gpio_irq_register;
504 engine->gpio.irq_unregister = nv50_gpio_irq_unregister; 411 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
505 engine->gpio.irq_enable = nv50_gpio_irq_enable; 412 engine->gpio.irq_enable = nv50_gpio_irq_enable;
506 engine->crypt.init = nouveau_stub_init;
507 engine->crypt.takedown = nouveau_stub_takedown;
508 engine->vram.init = nvc0_vram_init; 413 engine->vram.init = nvc0_vram_init;
509 engine->vram.get = nvc0_vram_new; 414 engine->vram.get = nvc0_vram_new;
510 engine->vram.put = nv50_vram_del; 415 engine->vram.put = nv50_vram_del;
@@ -593,7 +498,7 @@ nouveau_card_init(struct drm_device *dev)
593{ 498{
594 struct drm_nouveau_private *dev_priv = dev->dev_private; 499 struct drm_nouveau_private *dev_priv = dev->dev_private;
595 struct nouveau_engine *engine; 500 struct nouveau_engine *engine;
596 int ret; 501 int ret, e = 0;
597 502
598 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 503 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
599 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, 504 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
@@ -658,23 +563,80 @@ nouveau_card_init(struct drm_device *dev)
658 if (ret) 563 if (ret)
659 goto out_timer; 564 goto out_timer;
660 565
661 if (nouveau_noaccel) 566 switch (dev_priv->card_type) {
662 engine->graph.accel_blocked = true; 567 case NV_04:
663 else { 568 nv04_graph_create(dev);
664 /* PGRAPH */ 569 break;
665 ret = engine->graph.init(dev); 570 case NV_10:
666 if (ret) 571 nv10_graph_create(dev);
667 goto out_fb; 572 break;
573 case NV_20:
574 case NV_30:
575 nv20_graph_create(dev);
576 break;
577 case NV_40:
578 nv40_graph_create(dev);
579 break;
580 case NV_50:
581 nv50_graph_create(dev);
582 break;
583 case NV_C0:
584 nvc0_graph_create(dev);
585 break;
586 default:
587 break;
588 }
668 589
669 /* PCRYPT */ 590 switch (dev_priv->chipset) {
670 ret = engine->crypt.init(dev); 591 case 0x84:
671 if (ret) 592 case 0x86:
672 goto out_graph; 593 case 0x92:
594 case 0x94:
595 case 0x96:
596 case 0xa0:
597 nv84_crypt_create(dev);
598 break;
599 }
600
601 switch (dev_priv->card_type) {
602 case NV_50:
603 switch (dev_priv->chipset) {
604 case 0xa3:
605 case 0xa5:
606 case 0xa8:
607 case 0xaf:
608 nva3_copy_create(dev);
609 break;
610 }
611 break;
612 case NV_C0:
613 nvc0_copy_create(dev, 0);
614 nvc0_copy_create(dev, 1);
615 break;
616 default:
617 break;
618 }
619
620 if (dev_priv->card_type == NV_40)
621 nv40_mpeg_create(dev);
622 else
623 if (dev_priv->card_type == NV_50 &&
624 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
625 nv50_mpeg_create(dev);
626
627 if (!nouveau_noaccel) {
628 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
629 if (dev_priv->eng[e]) {
630 ret = dev_priv->eng[e]->init(dev, e);
631 if (ret)
632 goto out_engine;
633 }
634 }
673 635
674 /* PFIFO */ 636 /* PFIFO */
675 ret = engine->fifo.init(dev); 637 ret = engine->fifo.init(dev);
676 if (ret) 638 if (ret)
677 goto out_crypt; 639 goto out_engine;
678 } 640 }
679 641
680 ret = engine->display.create(dev); 642 ret = engine->display.create(dev);
@@ -691,7 +653,7 @@ nouveau_card_init(struct drm_device *dev)
691 653
692 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 654 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
693 655
694 if (!engine->graph.accel_blocked) { 656 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
695 ret = nouveau_fence_init(dev); 657 ret = nouveau_fence_init(dev);
696 if (ret) 658 if (ret)
697 goto out_irq; 659 goto out_irq;
@@ -715,13 +677,16 @@ out_vblank:
715out_fifo: 677out_fifo:
716 if (!nouveau_noaccel) 678 if (!nouveau_noaccel)
717 engine->fifo.takedown(dev); 679 engine->fifo.takedown(dev);
718out_crypt: 680out_engine:
719 if (!nouveau_noaccel) 681 if (!nouveau_noaccel) {
720 engine->crypt.takedown(dev); 682 for (e = e - 1; e >= 0; e--) {
721out_graph: 683 if (!dev_priv->eng[e])
722 if (!nouveau_noaccel) 684 continue;
723 engine->graph.takedown(dev); 685 dev_priv->eng[e]->fini(dev, e);
724out_fb: 686 dev_priv->eng[e]->destroy(dev,e );
687 }
688 }
689
725 engine->fb.takedown(dev); 690 engine->fb.takedown(dev);
726out_timer: 691out_timer:
727 engine->timer.takedown(dev); 692 engine->timer.takedown(dev);
@@ -751,16 +716,21 @@ static void nouveau_card_takedown(struct drm_device *dev)
751{ 716{
752 struct drm_nouveau_private *dev_priv = dev->dev_private; 717 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 struct nouveau_engine *engine = &dev_priv->engine; 718 struct nouveau_engine *engine = &dev_priv->engine;
719 int e;
754 720
755 if (!engine->graph.accel_blocked) { 721 if (dev_priv->channel) {
756 nouveau_fence_fini(dev); 722 nouveau_fence_fini(dev);
757 nouveau_channel_put_unlocked(&dev_priv->channel); 723 nouveau_channel_put_unlocked(&dev_priv->channel);
758 } 724 }
759 725
760 if (!nouveau_noaccel) { 726 if (!nouveau_noaccel) {
761 engine->fifo.takedown(dev); 727 engine->fifo.takedown(dev);
762 engine->crypt.takedown(dev); 728 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
763 engine->graph.takedown(dev); 729 if (dev_priv->eng[e]) {
730 dev_priv->eng[e]->fini(dev, e);
731 dev_priv->eng[e]->destroy(dev,e );
732 }
733 }
764 } 734 }
765 engine->fb.takedown(dev); 735 engine->fb.takedown(dev);
766 engine->timer.takedown(dev); 736 engine->timer.takedown(dev);
@@ -866,7 +836,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
866#ifdef CONFIG_X86 836#ifdef CONFIG_X86
867 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 837 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
868#endif 838#endif
869 839
870 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); 840 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
871 return 0; 841 return 0;
872} 842}
@@ -918,11 +888,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
918 888
919 /* Time to determine the card architecture */ 889 /* Time to determine the card architecture */
920 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); 890 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
891 dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */
921 892
922 /* We're dealing with >=NV10 */ 893 /* We're dealing with >=NV10 */
923 if ((reg0 & 0x0f000000) > 0) { 894 if ((reg0 & 0x0f000000) > 0) {
924 /* Bit 27-20 contain the architecture in hex */ 895 /* Bit 27-20 contain the architecture in hex */
925 dev_priv->chipset = (reg0 & 0xff00000) >> 20; 896 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
897 dev_priv->stepping = (reg0 & 0xff);
926 /* NV04 or NV05 */ 898 /* NV04 or NV05 */
927 } else if ((reg0 & 0xff00fff0) == 0x20004000) { 899 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
928 if (reg0 & 0x00f00000) 900 if (reg0 & 0x00f00000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 2e06b55cfdc1..c48a9fc2b47b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -53,8 +53,7 @@ struct nouveau_vm {
53 int refcount; 53 int refcount;
54 54
55 struct list_head pgd_list; 55 struct list_head pgd_list;
56 atomic_t pgraph_refs; 56 atomic_t engref[16];
57 atomic_t pcrypt_refs;
58 57
59 struct nouveau_vm_pgt *pgt; 58 struct nouveau_vm_pgt *pgt;
60 u32 fpde; 59 u32 fpde;
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 04fdc00a67d5..75e872741d92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -159,8 +159,16 @@ nouveau_volt_init(struct drm_device *dev)
159 headerlen = volt[1]; 159 headerlen = volt[1];
160 recordlen = volt[2]; 160 recordlen = volt[2];
161 entries = volt[3]; 161 entries = volt[3];
162 vidshift = hweight8(volt[5]);
163 vidmask = volt[4]; 162 vidmask = volt[4];
163 /* no longer certain what volt[5] is, if it's related to
164 * the vid shift then it's definitely not a function of
165 * how many bits are set.
166 *
167 * after looking at a number of nva3+ vbios images, they
168 * all seem likely to have a static shift of 2.. lets
169 * go with that for now until proven otherwise.
170 */
171 vidshift = 2;
164 break; 172 break;
165 default: 173 default:
166 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); 174 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 748b9d9c2949..3c78bc81357e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -790,8 +790,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
790 if (atomic) { 790 if (atomic) {
791 drm_fb = passed_fb; 791 drm_fb = passed_fb;
792 fb = nouveau_framebuffer(passed_fb); 792 fb = nouveau_framebuffer(passed_fb);
793 } 793 } else {
794 else {
795 /* If not atomic, we can go ahead and pin, and unpin the 794 /* If not atomic, we can go ahead and pin, and unpin the
796 * old fb we were passed. 795 * old fb we were passed.
797 */ 796 */
@@ -944,14 +943,14 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
944 struct drm_gem_object *gem; 943 struct drm_gem_object *gem;
945 int ret = 0; 944 int ret = 0;
946 945
947 if (width != 64 || height != 64)
948 return -EINVAL;
949
950 if (!buffer_handle) { 946 if (!buffer_handle) {
951 nv_crtc->cursor.hide(nv_crtc, true); 947 nv_crtc->cursor.hide(nv_crtc, true);
952 return 0; 948 return 0;
953 } 949 }
954 950
951 if (width != 64 || height != 64)
952 return -EINVAL;
953
955 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); 954 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
956 if (!gem) 955 if (!gem)
957 return -ENOENT; 956 return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index af75015068d6..3626ee7db3ba 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,9 +28,11 @@
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_util.h" 30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
31 32
32static int nv04_graph_register(struct drm_device *dev); 33struct nv04_graph_engine {
33static void nv04_graph_isr(struct drm_device *dev); 34 struct nouveau_exec_engine base;
35};
34 36
35static uint32_t nv04_graph_ctx_regs[] = { 37static uint32_t nv04_graph_ctx_regs[] = {
36 0x0040053c, 38 0x0040053c,
@@ -350,7 +352,7 @@ struct graph_state {
350 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; 352 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
351}; 353};
352 354
353struct nouveau_channel * 355static struct nouveau_channel *
354nv04_graph_channel(struct drm_device *dev) 356nv04_graph_channel(struct drm_device *dev)
355{ 357{
356 struct drm_nouveau_private *dev_priv = dev->dev_private; 358 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -365,26 +367,6 @@ nv04_graph_channel(struct drm_device *dev)
365 return dev_priv->channels.ptr[chid]; 367 return dev_priv->channels.ptr[chid];
366} 368}
367 369
368static void
369nv04_graph_context_switch(struct drm_device *dev)
370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
373 struct nouveau_channel *chan = NULL;
374 int chid;
375
376 nouveau_wait_for_idle(dev);
377
378 /* If previous context is valid, we need to save it */
379 pgraph->unload_context(dev);
380
381 /* Load context for next channel */
382 chid = dev_priv->engine.fifo.channel_id(dev);
383 chan = dev_priv->channels.ptr[chid];
384 if (chan)
385 nv04_graph_load_context(chan);
386}
387
388static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) 370static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
389{ 371{
390 int i; 372 int i;
@@ -397,48 +379,11 @@ static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
397 return NULL; 379 return NULL;
398} 380}
399 381
400int nv04_graph_create_context(struct nouveau_channel *chan) 382static int
401{ 383nv04_graph_load_context(struct nouveau_channel *chan)
402 struct graph_state *pgraph_ctx;
403 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
404
405 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
406 GFP_KERNEL);
407 if (pgraph_ctx == NULL)
408 return -ENOMEM;
409
410 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
411
412 return 0;
413}
414
415void nv04_graph_destroy_context(struct nouveau_channel *chan)
416{
417 struct drm_device *dev = chan->dev;
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
420 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
421 unsigned long flags;
422
423 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
424 pgraph->fifo_access(dev, false);
425
426 /* Unload the context if it's the currently active one */
427 if (pgraph->channel(dev) == chan)
428 pgraph->unload_context(dev);
429
430 /* Free the context resources */
431 kfree(pgraph_ctx);
432 chan->pgraph_ctx = NULL;
433
434 pgraph->fifo_access(dev, true);
435 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
436}
437
438int nv04_graph_load_context(struct nouveau_channel *chan)
439{ 384{
385 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
440 struct drm_device *dev = chan->dev; 386 struct drm_device *dev = chan->dev;
441 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
442 uint32_t tmp; 387 uint32_t tmp;
443 int i; 388 int i;
444 389
@@ -456,20 +401,19 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
456 return 0; 401 return 0;
457} 402}
458 403
459int 404static int
460nv04_graph_unload_context(struct drm_device *dev) 405nv04_graph_unload_context(struct drm_device *dev)
461{ 406{
462 struct drm_nouveau_private *dev_priv = dev->dev_private; 407 struct drm_nouveau_private *dev_priv = dev->dev_private;
463 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
464 struct nouveau_channel *chan = NULL; 408 struct nouveau_channel *chan = NULL;
465 struct graph_state *ctx; 409 struct graph_state *ctx;
466 uint32_t tmp; 410 uint32_t tmp;
467 int i; 411 int i;
468 412
469 chan = pgraph->channel(dev); 413 chan = nv04_graph_channel(dev);
470 if (!chan) 414 if (!chan)
471 return 0; 415 return 0;
472 ctx = chan->pgraph_ctx; 416 ctx = chan->engctx[NVOBJ_ENGINE_GR];
473 417
474 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) 418 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
475 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]); 419 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
@@ -481,23 +425,85 @@ nv04_graph_unload_context(struct drm_device *dev)
481 return 0; 425 return 0;
482} 426}
483 427
484int nv04_graph_init(struct drm_device *dev) 428static int
429nv04_graph_context_new(struct nouveau_channel *chan, int engine)
485{ 430{
431 struct graph_state *pgraph_ctx;
432 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
433
434 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
435 if (pgraph_ctx == NULL)
436 return -ENOMEM;
437
438 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
439
440 chan->engctx[engine] = pgraph_ctx;
441 return 0;
442}
443
444static void
445nv04_graph_context_del(struct nouveau_channel *chan, int engine)
446{
447 struct drm_device *dev = chan->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 448 struct drm_nouveau_private *dev_priv = dev->dev_private;
487 uint32_t tmp; 449 struct graph_state *pgraph_ctx = chan->engctx[engine];
450 unsigned long flags;
451
452 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
453 nv04_graph_fifo_access(dev, false);
454
455 /* Unload the context if it's the currently active one */
456 if (nv04_graph_channel(dev) == chan)
457 nv04_graph_unload_context(dev);
458
459 nv04_graph_fifo_access(dev, true);
460 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
461
462 /* Free the context resources */
463 kfree(pgraph_ctx);
464 chan->engctx[engine] = NULL;
465}
466
467int
468nv04_graph_object_new(struct nouveau_channel *chan, int engine,
469 u32 handle, u16 class)
470{
471 struct drm_device *dev = chan->dev;
472 struct nouveau_gpuobj *obj = NULL;
488 int ret; 473 int ret;
489 474
475 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
476 if (ret)
477 return ret;
478 obj->engine = 1;
479 obj->class = class;
480
481#ifdef __BIG_ENDIAN
482 nv_wo32(obj, 0x00, 0x00080000 | class);
483#else
484 nv_wo32(obj, 0x00, class);
485#endif
486 nv_wo32(obj, 0x04, 0x00000000);
487 nv_wo32(obj, 0x08, 0x00000000);
488 nv_wo32(obj, 0x0c, 0x00000000);
489
490 ret = nouveau_ramht_insert(chan, handle, obj);
491 nouveau_gpuobj_ref(NULL, &obj);
492 return ret;
493}
494
495static int
496nv04_graph_init(struct drm_device *dev, int engine)
497{
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 uint32_t tmp;
500
490 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 501 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
491 ~NV_PMC_ENABLE_PGRAPH); 502 ~NV_PMC_ENABLE_PGRAPH);
492 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 503 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
493 NV_PMC_ENABLE_PGRAPH); 504 NV_PMC_ENABLE_PGRAPH);
494 505
495 ret = nv04_graph_register(dev);
496 if (ret)
497 return ret;
498
499 /* Enable PGRAPH interrupts */ 506 /* Enable PGRAPH interrupts */
500 nouveau_irq_register(dev, 12, nv04_graph_isr);
501 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); 507 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
502 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 508 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
503 509
@@ -507,7 +513,7 @@ int nv04_graph_init(struct drm_device *dev)
507 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ 513 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
508 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000); 514 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
509 /*1231C000 blob, 001 haiku*/ 515 /*1231C000 blob, 001 haiku*/
510 //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ 516 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
511 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100); 517 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
512 /*0x72111100 blob , 01 haiku*/ 518 /*0x72111100 blob , 01 haiku*/
513 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ 519 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
@@ -531,10 +537,12 @@ int nv04_graph_init(struct drm_device *dev)
531 return 0; 537 return 0;
532} 538}
533 539
534void nv04_graph_takedown(struct drm_device *dev) 540static int
541nv04_graph_fini(struct drm_device *dev, int engine)
535{ 542{
543 nv04_graph_unload_context(dev);
536 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 544 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
537 nouveau_irq_unregister(dev, 12); 545 return 0;
538} 546}
539 547
540void 548void
@@ -969,13 +977,138 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
969 return 1; 977 return 1;
970} 978}
971 979
972static int 980static struct nouveau_bitfield nv04_graph_intr[] = {
973nv04_graph_register(struct drm_device *dev) 981 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
982 {}
983};
984
985static struct nouveau_bitfield nv04_graph_nstatus[] = {
986 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
987 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
988 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
989 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
990 {}
991};
992
993struct nouveau_bitfield nv04_graph_nsource[] = {
994 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
995 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
996 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
997 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
998 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
999 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1000 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1001 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1002 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1003 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1004 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1005 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1006 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1007 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1008 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1009 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1010 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1011 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1012 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1013 {}
1014};
1015
1016static void
1017nv04_graph_context_switch(struct drm_device *dev)
974{ 1018{
975 struct drm_nouveau_private *dev_priv = dev->dev_private; 1019 struct drm_nouveau_private *dev_priv = dev->dev_private;
1020 struct nouveau_channel *chan = NULL;
1021 int chid;
976 1022
977 if (dev_priv->engine.graph.registered) 1023 nouveau_wait_for_idle(dev);
978 return 0; 1024
1025 /* If previous context is valid, we need to save it */
1026 nv04_graph_unload_context(dev);
1027
1028 /* Load context for next channel */
1029 chid = dev_priv->engine.fifo.channel_id(dev);
1030 chan = dev_priv->channels.ptr[chid];
1031 if (chan)
1032 nv04_graph_load_context(chan);
1033}
1034
1035static void
1036nv04_graph_isr(struct drm_device *dev)
1037{
1038 u32 stat;
1039
1040 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1041 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1042 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1043 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1044 u32 chid = (addr & 0x0f000000) >> 24;
1045 u32 subc = (addr & 0x0000e000) >> 13;
1046 u32 mthd = (addr & 0x00001ffc);
1047 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1048 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1049 u32 show = stat;
1050
1051 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1052 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1053 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1054 show &= ~NV_PGRAPH_INTR_NOTIFY;
1055 }
1056 }
1057
1058 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1059 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1060 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1061 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1062 nv04_graph_context_switch(dev);
1063 }
1064
1065 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1066 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1067
1068 if (show && nouveau_ratelimit()) {
1069 NV_INFO(dev, "PGRAPH -");
1070 nouveau_bitfield_print(nv04_graph_intr, show);
1071 printk(" nsource:");
1072 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1073 printk(" nstatus:");
1074 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1075 printk("\n");
1076 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1077 "mthd 0x%04x data 0x%08x\n",
1078 chid, subc, class, mthd, data);
1079 }
1080 }
1081}
1082
1083static void
1084nv04_graph_destroy(struct drm_device *dev, int engine)
1085{
1086 struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
1087
1088 nouveau_irq_unregister(dev, 12);
1089
1090 NVOBJ_ENGINE_DEL(dev, GR);
1091 kfree(pgraph);
1092}
1093
1094int
1095nv04_graph_create(struct drm_device *dev)
1096{
1097 struct nv04_graph_engine *pgraph;
1098
1099 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1100 if (!pgraph)
1101 return -ENOMEM;
1102
1103 pgraph->base.destroy = nv04_graph_destroy;
1104 pgraph->base.init = nv04_graph_init;
1105 pgraph->base.fini = nv04_graph_fini;
1106 pgraph->base.context_new = nv04_graph_context_new;
1107 pgraph->base.context_del = nv04_graph_context_del;
1108 pgraph->base.object_new = nv04_graph_object_new;
1109
1110 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1111 nouveau_irq_register(dev, 12, nv04_graph_isr);
979 1112
980 /* dvd subpicture */ 1113 /* dvd subpicture */
981 NVOBJ_CLASS(dev, 0x0038, GR); 1114 NVOBJ_CLASS(dev, 0x0038, GR);
@@ -1222,93 +1355,5 @@ nv04_graph_register(struct drm_device *dev)
1222 NVOBJ_CLASS(dev, 0x506e, SW); 1355 NVOBJ_CLASS(dev, 0x506e, SW);
1223 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); 1356 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
1224 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 1357 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1225
1226 dev_priv->engine.graph.registered = true;
1227 return 0; 1358 return 0;
1228};
1229
1230static struct nouveau_bitfield nv04_graph_intr[] = {
1231 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1232 {}
1233};
1234
1235static struct nouveau_bitfield nv04_graph_nstatus[] =
1236{
1237 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1238 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1239 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1240 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1241 {}
1242};
1243
1244struct nouveau_bitfield nv04_graph_nsource[] =
1245{
1246 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1247 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1248 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1249 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1250 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1251 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1252 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1253 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1254 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1255 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1256 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1257 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1258 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1259 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1260 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1261 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1262 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1263 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1264 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1265 {}
1266};
1267
1268static void
1269nv04_graph_isr(struct drm_device *dev)
1270{
1271 u32 stat;
1272
1273 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1274 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1275 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1276 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1277 u32 chid = (addr & 0x0f000000) >> 24;
1278 u32 subc = (addr & 0x0000e000) >> 13;
1279 u32 mthd = (addr & 0x00001ffc);
1280 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1281 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1282 u32 show = stat;
1283
1284 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1285 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1286 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1287 show &= ~NV_PGRAPH_INTR_NOTIFY;
1288 }
1289 }
1290
1291 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1292 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1293 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1294 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1295 nv04_graph_context_switch(dev);
1296 }
1297
1298 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1299 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1300
1301 if (show && nouveau_ratelimit()) {
1302 NV_INFO(dev, "PGRAPH -");
1303 nouveau_bitfield_print(nv04_graph_intr, show);
1304 printk(" nsource:");
1305 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1306 printk(" nstatus:");
1307 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1308 printk("\n");
1309 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1310 "mthd 0x%04x data 0x%08x\n",
1311 chid, subc, class, mthd, data);
1312 }
1313 }
1314} 1359}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index b8e3edb5c063..b8611b955313 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -95,6 +95,9 @@ nv04_instmem_takedown(struct drm_device *dev)
95 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); 95 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
96 nouveau_gpuobj_ref(NULL, &dev_priv->ramro); 96 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
97 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); 97 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
98
99 if (drm_mm_initialized(&dev_priv->ramin_heap))
100 drm_mm_takedown(&dev_priv->ramin_heap);
98} 101}
99 102
100int 103int
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8c92edb7bbcd..0930c6cb88e0 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -28,10 +28,9 @@
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_util.h" 29#include "nouveau_util.h"
30 30
31static int nv10_graph_register(struct drm_device *); 31struct nv10_graph_engine {
32static void nv10_graph_isr(struct drm_device *); 32 struct nouveau_exec_engine base;
33 33};
34#define NV10_FIFO_NUMBER 32
35 34
36struct pipe_state { 35struct pipe_state {
37 uint32_t pipe_0x0000[0x040/4]; 36 uint32_t pipe_0x0000[0x040/4];
@@ -414,9 +413,9 @@ struct graph_state {
414 413
415static void nv10_graph_save_pipe(struct nouveau_channel *chan) 414static void nv10_graph_save_pipe(struct nouveau_channel *chan)
416{ 415{
417 struct drm_device *dev = chan->dev; 416 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
418 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
419 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 417 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
418 struct drm_device *dev = chan->dev;
420 419
421 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); 420 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
422 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); 421 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
@@ -432,9 +431,9 @@ static void nv10_graph_save_pipe(struct nouveau_channel *chan)
432 431
433static void nv10_graph_load_pipe(struct nouveau_channel *chan) 432static void nv10_graph_load_pipe(struct nouveau_channel *chan)
434{ 433{
435 struct drm_device *dev = chan->dev; 434 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
436 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
437 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 435 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
436 struct drm_device *dev = chan->dev;
438 uint32_t xfmode0, xfmode1; 437 uint32_t xfmode0, xfmode1;
439 int i; 438 int i;
440 439
@@ -482,9 +481,9 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan)
482 481
483static void nv10_graph_create_pipe(struct nouveau_channel *chan) 482static void nv10_graph_create_pipe(struct nouveau_channel *chan)
484{ 483{
485 struct drm_device *dev = chan->dev; 484 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
486 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
487 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; 485 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
486 struct drm_device *dev = chan->dev;
488 uint32_t *fifo_pipe_state_addr; 487 uint32_t *fifo_pipe_state_addr;
489 int i; 488 int i;
490#define PIPE_INIT(addr) \ 489#define PIPE_INIT(addr) \
@@ -661,8 +660,6 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
661 uint32_t inst) 660 uint32_t inst)
662{ 661{
663 struct drm_device *dev = chan->dev; 662 struct drm_device *dev = chan->dev;
664 struct drm_nouveau_private *dev_priv = dev->dev_private;
665 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
666 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; 663 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
667 uint32_t ctx_user, ctx_switch[5]; 664 uint32_t ctx_user, ctx_switch[5];
668 int i, subchan = -1; 665 int i, subchan = -1;
@@ -711,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
711 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); 708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
712 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
713 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
714 pgraph->fifo_access(dev, true); 711 nv04_graph_fifo_access(dev, true);
715 pgraph->fifo_access(dev, false); 712 nv04_graph_fifo_access(dev, false);
716 713
717 /* Restore the FIFO state */ 714 /* Restore the FIFO state */
718 for (i = 0; i < ARRAY_SIZE(fifo); i++) 715 for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -729,11 +726,12 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
729 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user); 726 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
730} 727}
731 728
732int nv10_graph_load_context(struct nouveau_channel *chan) 729static int
730nv10_graph_load_context(struct nouveau_channel *chan)
733{ 731{
734 struct drm_device *dev = chan->dev; 732 struct drm_device *dev = chan->dev;
735 struct drm_nouveau_private *dev_priv = dev->dev_private; 733 struct drm_nouveau_private *dev_priv = dev->dev_private;
736 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 734 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
737 uint32_t tmp; 735 uint32_t tmp;
738 int i; 736 int i;
739 737
@@ -757,21 +755,20 @@ int nv10_graph_load_context(struct nouveau_channel *chan)
757 return 0; 755 return 0;
758} 756}
759 757
760int 758static int
761nv10_graph_unload_context(struct drm_device *dev) 759nv10_graph_unload_context(struct drm_device *dev)
762{ 760{
763 struct drm_nouveau_private *dev_priv = dev->dev_private; 761 struct drm_nouveau_private *dev_priv = dev->dev_private;
764 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
765 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 762 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
766 struct nouveau_channel *chan; 763 struct nouveau_channel *chan;
767 struct graph_state *ctx; 764 struct graph_state *ctx;
768 uint32_t tmp; 765 uint32_t tmp;
769 int i; 766 int i;
770 767
771 chan = pgraph->channel(dev); 768 chan = nv10_graph_channel(dev);
772 if (!chan) 769 if (!chan)
773 return 0; 770 return 0;
774 ctx = chan->pgraph_ctx; 771 ctx = chan->engctx[NVOBJ_ENGINE_GR];
775 772
776 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 773 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
777 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); 774 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
@@ -805,7 +802,7 @@ nv10_graph_context_switch(struct drm_device *dev)
805 /* Load context for next channel */ 802 /* Load context for next channel */
806 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 803 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
807 chan = dev_priv->channels.ptr[chid]; 804 chan = dev_priv->channels.ptr[chid];
808 if (chan && chan->pgraph_ctx) 805 if (chan && chan->engctx[NVOBJ_ENGINE_GR])
809 nv10_graph_load_context(chan); 806 nv10_graph_load_context(chan);
810} 807}
811 808
@@ -836,7 +833,8 @@ nv10_graph_channel(struct drm_device *dev)
836 return dev_priv->channels.ptr[chid]; 833 return dev_priv->channels.ptr[chid];
837} 834}
838 835
839int nv10_graph_create_context(struct nouveau_channel *chan) 836static int
837nv10_graph_context_new(struct nouveau_channel *chan, int engine)
840{ 838{
841 struct drm_device *dev = chan->dev; 839 struct drm_device *dev = chan->dev;
842 struct drm_nouveau_private *dev_priv = dev->dev_private; 840 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -844,11 +842,10 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
844 842
845 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); 843 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
846 844
847 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), 845 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
848 GFP_KERNEL);
849 if (pgraph_ctx == NULL) 846 if (pgraph_ctx == NULL)
850 return -ENOMEM; 847 return -ENOMEM;
851 848 chan->engctx[engine] = pgraph_ctx;
852 849
853 NV_WRITE_CTX(0x00400e88, 0x08000000); 850 NV_WRITE_CTX(0x00400e88, 0x08000000);
854 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); 851 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -873,30 +870,30 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
873 return 0; 870 return 0;
874} 871}
875 872
876void nv10_graph_destroy_context(struct nouveau_channel *chan) 873static void
874nv10_graph_context_del(struct nouveau_channel *chan, int engine)
877{ 875{
878 struct drm_device *dev = chan->dev; 876 struct drm_device *dev = chan->dev;
879 struct drm_nouveau_private *dev_priv = dev->dev_private; 877 struct drm_nouveau_private *dev_priv = dev->dev_private;
880 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 878 struct graph_state *pgraph_ctx = chan->engctx[engine];
881 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
882 unsigned long flags; 879 unsigned long flags;
883 880
884 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 881 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
885 pgraph->fifo_access(dev, false); 882 nv04_graph_fifo_access(dev, false);
886 883
887 /* Unload the context if it's the currently active one */ 884 /* Unload the context if it's the currently active one */
888 if (pgraph->channel(dev) == chan) 885 if (nv10_graph_channel(dev) == chan)
889 pgraph->unload_context(dev); 886 nv10_graph_unload_context(dev);
887
888 nv04_graph_fifo_access(dev, true);
889 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
890 890
891 /* Free the context resources */ 891 /* Free the context resources */
892 chan->engctx[engine] = NULL;
892 kfree(pgraph_ctx); 893 kfree(pgraph_ctx);
893 chan->pgraph_ctx = NULL;
894
895 pgraph->fifo_access(dev, true);
896 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
897} 894}
898 895
899void 896static void
900nv10_graph_set_tile_region(struct drm_device *dev, int i) 897nv10_graph_set_tile_region(struct drm_device *dev, int i)
901{ 898{
902 struct drm_nouveau_private *dev_priv = dev->dev_private; 899 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -907,22 +904,18 @@ nv10_graph_set_tile_region(struct drm_device *dev, int i)
907 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); 904 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
908} 905}
909 906
910int nv10_graph_init(struct drm_device *dev) 907static int
908nv10_graph_init(struct drm_device *dev, int engine)
911{ 909{
912 struct drm_nouveau_private *dev_priv = dev->dev_private; 910 struct drm_nouveau_private *dev_priv = dev->dev_private;
913 uint32_t tmp; 911 u32 tmp;
914 int ret, i; 912 int i;
915 913
916 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 914 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
917 ~NV_PMC_ENABLE_PGRAPH); 915 ~NV_PMC_ENABLE_PGRAPH);
918 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 916 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
919 NV_PMC_ENABLE_PGRAPH); 917 NV_PMC_ENABLE_PGRAPH);
920 918
921 ret = nv10_graph_register(dev);
922 if (ret)
923 return ret;
924
925 nouveau_irq_register(dev, 12, nv10_graph_isr);
926 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 919 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
927 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 920 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
928 921
@@ -963,18 +956,20 @@ int nv10_graph_init(struct drm_device *dev)
963 return 0; 956 return 0;
964} 957}
965 958
966void nv10_graph_takedown(struct drm_device *dev) 959static int
960nv10_graph_fini(struct drm_device *dev, int engine)
967{ 961{
962 nv10_graph_unload_context(dev);
968 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 963 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
969 nouveau_irq_unregister(dev, 12); 964 return 0;
970} 965}
971 966
972static int 967static int
973nv17_graph_mthd_lma_window(struct nouveau_channel *chan, 968nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
974 u32 class, u32 mthd, u32 data) 969 u32 class, u32 mthd, u32 data)
975{ 970{
971 struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
976 struct drm_device *dev = chan->dev; 972 struct drm_device *dev = chan->dev;
977 struct graph_state *ctx = chan->pgraph_ctx;
978 struct pipe_state *pipe = &ctx->pipe_state; 973 struct pipe_state *pipe = &ctx->pipe_state;
979 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; 974 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
980 uint32_t xfmode0, xfmode1; 975 uint32_t xfmode0, xfmode1;
@@ -1061,64 +1056,13 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
1061 return 0; 1056 return 0;
1062} 1057}
1063 1058
1064static int
1065nv10_graph_register(struct drm_device *dev)
1066{
1067 struct drm_nouveau_private *dev_priv = dev->dev_private;
1068
1069 if (dev_priv->engine.graph.registered)
1070 return 0;
1071
1072 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1073 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1074 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1075 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1076 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1077 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1078 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1079 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1080 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1081 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1082 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1083 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1084 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1085 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1086 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1087 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1088 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1089 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1090
1091 /* celcius */
1092 if (dev_priv->chipset <= 0x10) {
1093 NVOBJ_CLASS(dev, 0x0056, GR);
1094 } else
1095 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1096 NVOBJ_CLASS(dev, 0x0096, GR);
1097 } else {
1098 NVOBJ_CLASS(dev, 0x0099, GR);
1099 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1100 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1101 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1102 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1103 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1104 }
1105
1106 /* nvsw */
1107 NVOBJ_CLASS(dev, 0x506e, SW);
1108 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1109
1110 dev_priv->engine.graph.registered = true;
1111 return 0;
1112}
1113
1114struct nouveau_bitfield nv10_graph_intr[] = { 1059struct nouveau_bitfield nv10_graph_intr[] = {
1115 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 1060 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1116 { NV_PGRAPH_INTR_ERROR, "ERROR" }, 1061 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1117 {} 1062 {}
1118}; 1063};
1119 1064
1120struct nouveau_bitfield nv10_graph_nstatus[] = 1065struct nouveau_bitfield nv10_graph_nstatus[] = {
1121{
1122 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 1066 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1123 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, 1067 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1124 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, 1068 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
@@ -1173,3 +1117,73 @@ nv10_graph_isr(struct drm_device *dev)
1173 } 1117 }
1174 } 1118 }
1175} 1119}
1120
1121static void
1122nv10_graph_destroy(struct drm_device *dev, int engine)
1123{
1124 struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
1125
1126 nouveau_irq_unregister(dev, 12);
1127 kfree(pgraph);
1128}
1129
1130int
1131nv10_graph_create(struct drm_device *dev)
1132{
1133 struct drm_nouveau_private *dev_priv = dev->dev_private;
1134 struct nv10_graph_engine *pgraph;
1135
1136 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1137 if (!pgraph)
1138 return -ENOMEM;
1139
1140 pgraph->base.destroy = nv10_graph_destroy;
1141 pgraph->base.init = nv10_graph_init;
1142 pgraph->base.fini = nv10_graph_fini;
1143 pgraph->base.context_new = nv10_graph_context_new;
1144 pgraph->base.context_del = nv10_graph_context_del;
1145 pgraph->base.object_new = nv04_graph_object_new;
1146 pgraph->base.set_tile_region = nv10_graph_set_tile_region;
1147
1148 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1149 nouveau_irq_register(dev, 12, nv10_graph_isr);
1150
1151 /* nvsw */
1152 NVOBJ_CLASS(dev, 0x506e, SW);
1153 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1154
1155 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1156 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1157 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1158 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1159 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1160 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1161 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1162 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1163 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1164 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1165 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1166 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1167 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1168 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1169 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1170 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1171 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1172
1173 /* celcius */
1174 if (dev_priv->chipset <= 0x10) {
1175 NVOBJ_CLASS(dev, 0x0056, GR);
1176 } else
1177 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1178 NVOBJ_CLASS(dev, 0x0096, GR);
1179 } else {
1180 NVOBJ_CLASS(dev, 0x0099, GR);
1181 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1182 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1183 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1184 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1185 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1186 }
1187
1188 return 0;
1189}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 8464b76798d5..affc7d7dd029 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -24,6 +24,14 @@
24 * 24 *
25 */ 25 */
26 26
27struct nv20_graph_engine {
28 struct nouveau_exec_engine base;
29 struct nouveau_gpuobj *ctxtab;
30 void (*grctx_init)(struct nouveau_gpuobj *);
31 u32 grctx_size;
32 u32 grctx_user;
33};
34
27#define NV20_GRCTX_SIZE (3580*4) 35#define NV20_GRCTX_SIZE (3580*4)
28#define NV25_GRCTX_SIZE (3529*4) 36#define NV25_GRCTX_SIZE (3529*4)
29#define NV2A_GRCTX_SIZE (3500*4) 37#define NV2A_GRCTX_SIZE (3500*4)
@@ -32,12 +40,54 @@
32#define NV34_GRCTX_SIZE (18140) 40#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396) 41#define NV35_36_GRCTX_SIZE (22396)
34 42
35static int nv20_graph_register(struct drm_device *); 43int
36static int nv30_graph_register(struct drm_device *); 44nv20_graph_unload_context(struct drm_device *dev)
37static void nv20_graph_isr(struct drm_device *); 45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
48 struct nouveau_channel *chan;
49 struct nouveau_gpuobj *grctx;
50 u32 tmp;
51
52 chan = nv10_graph_channel(dev);
53 if (!chan)
54 return 0;
55 grctx = chan->engctx[NVOBJ_ENGINE_GR];
56
57 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
58 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
59 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
60
61 nouveau_wait_for_idle(dev);
62
63 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
64 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
65 tmp |= (pfifo->channels - 1) << 24;
66 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
67 return 0;
68}
69
70static void
71nv20_graph_rdi(struct drm_device *dev)
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 int i, writecount = 32;
75 uint32_t rdi_index = 0x2c80000;
76
77 if (dev_priv->chipset == 0x20) {
78 rdi_index = 0x3d0000;
79 writecount = 15;
80 }
81
82 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
83 for (i = 0; i < writecount; i++)
84 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
85
86 nouveau_wait_for_idle(dev);
87}
38 88
39static void 89static void
40nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 90nv20_graph_context_init(struct nouveau_gpuobj *ctx)
41{ 91{
42 int i; 92 int i;
43 93
@@ -87,7 +137,7 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
87} 137}
88 138
89static void 139static void
90nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 140nv25_graph_context_init(struct nouveau_gpuobj *ctx)
91{ 141{
92 int i; 142 int i;
93 143
@@ -146,7 +196,7 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
146} 196}
147 197
148static void 198static void
149nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 199nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
150{ 200{
151 int i; 201 int i;
152 202
@@ -196,7 +246,7 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
196} 246}
197 247
198static void 248static void
199nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 249nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
200{ 250{
201 int i; 251 int i;
202 252
@@ -254,7 +304,7 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
254} 304}
255 305
256static void 306static void
257nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 307nv34_graph_context_init(struct nouveau_gpuobj *ctx)
258{ 308{
259 int i; 309 int i;
260 310
@@ -312,7 +362,7 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
312} 362}
313 363
314static void 364static void
315nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 365nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
316{ 366{
317 int i; 367 int i;
318 368
@@ -370,148 +420,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
370} 420}
371 421
372int 422int
373nv20_graph_create_context(struct nouveau_channel *chan) 423nv20_graph_context_new(struct nouveau_channel *chan, int engine)
374{ 424{
425 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
426 struct nouveau_gpuobj *grctx = NULL;
375 struct drm_device *dev = chan->dev; 427 struct drm_device *dev = chan->dev;
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
378 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
379 unsigned int idoffs = 0x28;
380 int ret; 428 int ret;
381 429
382 switch (dev_priv->chipset) { 430 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
383 case 0x20: 431 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
384 ctx_init = nv20_graph_context_init;
385 idoffs = 0;
386 break;
387 case 0x25:
388 case 0x28:
389 ctx_init = nv25_graph_context_init;
390 break;
391 case 0x2a:
392 ctx_init = nv2a_graph_context_init;
393 idoffs = 0;
394 break;
395 case 0x30:
396 case 0x31:
397 ctx_init = nv30_31_graph_context_init;
398 break;
399 case 0x34:
400 ctx_init = nv34_graph_context_init;
401 break;
402 case 0x35:
403 case 0x36:
404 ctx_init = nv35_36_graph_context_init;
405 break;
406 default:
407 BUG_ON(1);
408 }
409
410 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
411 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
412 if (ret) 432 if (ret)
413 return ret; 433 return ret;
414 434
415 /* Initialise default context values */ 435 /* Initialise default context values */
416 ctx_init(dev, chan->ramin_grctx); 436 pgraph->grctx_init(grctx);
417 437
418 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ 438 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
419 nv_wo32(chan->ramin_grctx, idoffs, 439 /* CTX_USER */
420 (chan->id << 24) | 0x1); /* CTX_USER */ 440 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
421 441
422 nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4); 442 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
443 chan->engctx[engine] = grctx;
423 return 0; 444 return 0;
424} 445}
425 446
426void 447void
427nv20_graph_destroy_context(struct nouveau_channel *chan) 448nv20_graph_context_del(struct nouveau_channel *chan, int engine)
428{ 449{
450 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
451 struct nouveau_gpuobj *grctx = chan->engctx[engine];
429 struct drm_device *dev = chan->dev; 452 struct drm_device *dev = chan->dev;
430 struct drm_nouveau_private *dev_priv = dev->dev_private; 453 struct drm_nouveau_private *dev_priv = dev->dev_private;
431 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
432 unsigned long flags; 454 unsigned long flags;
433 455
434 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 456 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
435 pgraph->fifo_access(dev, false); 457 nv04_graph_fifo_access(dev, false);
436 458
437 /* Unload the context if it's the currently active one */ 459 /* Unload the context if it's the currently active one */
438 if (pgraph->channel(dev) == chan) 460 if (nv10_graph_channel(dev) == chan)
439 pgraph->unload_context(dev); 461 nv20_graph_unload_context(dev);
440 462
441 pgraph->fifo_access(dev, true); 463 nv04_graph_fifo_access(dev, true);
442 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 464 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
443 465
444 /* Free the context resources */ 466 /* Free the context resources */
445 nv_wo32(pgraph->ctx_table, chan->id * 4, 0); 467 nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
446 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
447}
448
449int
450nv20_graph_load_context(struct nouveau_channel *chan)
451{
452 struct drm_device *dev = chan->dev;
453 uint32_t inst;
454 468
455 if (!chan->ramin_grctx) 469 nouveau_gpuobj_ref(NULL, &grctx);
456 return -EINVAL; 470 chan->engctx[engine] = NULL;
457 inst = chan->ramin_grctx->pinst >> 4;
458
459 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
460 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
461 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
462 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
463
464 nouveau_wait_for_idle(dev);
465 return 0;
466}
467
468int
469nv20_graph_unload_context(struct drm_device *dev)
470{
471 struct drm_nouveau_private *dev_priv = dev->dev_private;
472 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
473 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
474 struct nouveau_channel *chan;
475 uint32_t inst, tmp;
476
477 chan = pgraph->channel(dev);
478 if (!chan)
479 return 0;
480 inst = chan->ramin_grctx->pinst >> 4;
481
482 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
483 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
484 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
485
486 nouveau_wait_for_idle(dev);
487
488 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
489 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
490 tmp |= (pfifo->channels - 1) << 24;
491 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
492 return 0;
493} 471}
494 472
495static void 473static void
496nv20_graph_rdi(struct drm_device *dev)
497{
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 int i, writecount = 32;
500 uint32_t rdi_index = 0x2c80000;
501
502 if (dev_priv->chipset == 0x20) {
503 rdi_index = 0x3d0000;
504 writecount = 15;
505 }
506
507 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
508 for (i = 0; i < writecount; i++)
509 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
510
511 nouveau_wait_for_idle(dev);
512}
513
514void
515nv20_graph_set_tile_region(struct drm_device *dev, int i) 474nv20_graph_set_tile_region(struct drm_device *dev, int i)
516{ 475{
517 struct drm_nouveau_private *dev_priv = dev->dev_private; 476 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -536,56 +495,22 @@ nv20_graph_set_tile_region(struct drm_device *dev, int i)
536} 495}
537 496
538int 497int
539nv20_graph_init(struct drm_device *dev) 498nv20_graph_init(struct drm_device *dev, int engine)
540{ 499{
500 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
541 struct drm_nouveau_private *dev_priv = dev->dev_private; 501 struct drm_nouveau_private *dev_priv = dev->dev_private;
542 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
543 uint32_t tmp, vramsz; 502 uint32_t tmp, vramsz;
544 int ret, i; 503 int i;
545
546 switch (dev_priv->chipset) {
547 case 0x20:
548 pgraph->grctx_size = NV20_GRCTX_SIZE;
549 break;
550 case 0x25:
551 case 0x28:
552 pgraph->grctx_size = NV25_GRCTX_SIZE;
553 break;
554 case 0x2a:
555 pgraph->grctx_size = NV2A_GRCTX_SIZE;
556 break;
557 default:
558 NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
559 pgraph->accel_blocked = true;
560 return 0;
561 }
562 504
563 nv_wr32(dev, NV03_PMC_ENABLE, 505 nv_wr32(dev, NV03_PMC_ENABLE,
564 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); 506 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
565 nv_wr32(dev, NV03_PMC_ENABLE, 507 nv_wr32(dev, NV03_PMC_ENABLE,
566 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 508 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
567 509
568 if (!pgraph->ctx_table) { 510 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
569 /* Create Context Pointer Table */
570 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
571 NVOBJ_FLAG_ZERO_ALLOC,
572 &pgraph->ctx_table);
573 if (ret)
574 return ret;
575 }
576
577 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
578 pgraph->ctx_table->pinst >> 4);
579 511
580 nv20_graph_rdi(dev); 512 nv20_graph_rdi(dev);
581 513
582 ret = nv20_graph_register(dev);
583 if (ret) {
584 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
585 return ret;
586 }
587
588 nouveau_irq_register(dev, 12, nv20_graph_isr);
589 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 514 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
590 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 515 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
591 516
@@ -657,67 +582,20 @@ nv20_graph_init(struct drm_device *dev)
657 return 0; 582 return 0;
658} 583}
659 584
660void
661nv20_graph_takedown(struct drm_device *dev)
662{
663 struct drm_nouveau_private *dev_priv = dev->dev_private;
664 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
665
666 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
667 nouveau_irq_unregister(dev, 12);
668
669 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
670}
671
672int 585int
673nv30_graph_init(struct drm_device *dev) 586nv30_graph_init(struct drm_device *dev, int engine)
674{ 587{
588 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
675 struct drm_nouveau_private *dev_priv = dev->dev_private; 589 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 590 int i;
677 int ret, i;
678
679 switch (dev_priv->chipset) {
680 case 0x30:
681 case 0x31:
682 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
683 break;
684 case 0x34:
685 pgraph->grctx_size = NV34_GRCTX_SIZE;
686 break;
687 case 0x35:
688 case 0x36:
689 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
690 break;
691 default:
692 NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
693 pgraph->accel_blocked = true;
694 return 0;
695 }
696 591
697 nv_wr32(dev, NV03_PMC_ENABLE, 592 nv_wr32(dev, NV03_PMC_ENABLE,
698 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); 593 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
699 nv_wr32(dev, NV03_PMC_ENABLE, 594 nv_wr32(dev, NV03_PMC_ENABLE,
700 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 595 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
701 596
702 if (!pgraph->ctx_table) { 597 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
703 /* Create Context Pointer Table */
704 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
705 NVOBJ_FLAG_ZERO_ALLOC,
706 &pgraph->ctx_table);
707 if (ret)
708 return ret;
709 }
710
711 ret = nv30_graph_register(dev);
712 if (ret) {
713 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
714 return ret;
715 }
716 598
717 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
718 pgraph->ctx_table->pinst >> 4);
719
720 nouveau_irq_register(dev, 12, nv20_graph_isr);
721 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 599 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
722 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 600 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
723 601
@@ -775,85 +653,11 @@ nv30_graph_init(struct drm_device *dev)
775 return 0; 653 return 0;
776} 654}
777 655
778static int 656int
779nv20_graph_register(struct drm_device *dev) 657nv20_graph_fini(struct drm_device *dev, int engine)
780{
781 struct drm_nouveau_private *dev_priv = dev->dev_private;
782
783 if (dev_priv->engine.graph.registered)
784 return 0;
785
786 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
787 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
788 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
789 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
790 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
791 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
792 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
793 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
794 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
795 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
796 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
797 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
798 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
799 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
800 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
801
802 /* kelvin */
803 if (dev_priv->chipset < 0x25)
804 NVOBJ_CLASS(dev, 0x0097, GR);
805 else
806 NVOBJ_CLASS(dev, 0x0597, GR);
807
808 /* nvsw */
809 NVOBJ_CLASS(dev, 0x506e, SW);
810 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
811
812 dev_priv->engine.graph.registered = true;
813 return 0;
814}
815
816static int
817nv30_graph_register(struct drm_device *dev)
818{ 658{
819 struct drm_nouveau_private *dev_priv = dev->dev_private; 659 nv20_graph_unload_context(dev);
820 660 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
821 if (dev_priv->engine.graph.registered)
822 return 0;
823
824 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
825 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
826 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
827 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
828 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
829 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
830 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
831 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
832 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
833 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
834 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
835 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
836 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
837 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
838 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
839 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
840 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
841
842 /* rankine */
843 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
844 NVOBJ_CLASS(dev, 0x0397, GR);
845 else
846 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
847 NVOBJ_CLASS(dev, 0x0697, GR);
848 else
849 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
850 NVOBJ_CLASS(dev, 0x0497, GR);
851
852 /* nvsw */
853 NVOBJ_CLASS(dev, 0x506e, SW);
854 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
855
856 dev_priv->engine.graph.registered = true;
857 return 0; 661 return 0;
858} 662}
859 663
@@ -897,3 +701,135 @@ nv20_graph_isr(struct drm_device *dev)
897 } 701 }
898 } 702 }
899} 703}
704
705static void
706nv20_graph_destroy(struct drm_device *dev, int engine)
707{
708 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
709
710 nouveau_irq_unregister(dev, 12);
711 nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
712
713 NVOBJ_ENGINE_DEL(dev, GR);
714 kfree(pgraph);
715}
716
717int
718nv20_graph_create(struct drm_device *dev)
719{
720 struct drm_nouveau_private *dev_priv = dev->dev_private;
721 struct nv20_graph_engine *pgraph;
722 int ret;
723
724 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
725 if (!pgraph)
726 return -ENOMEM;
727
728 pgraph->base.destroy = nv20_graph_destroy;
729 pgraph->base.fini = nv20_graph_fini;
730 pgraph->base.context_new = nv20_graph_context_new;
731 pgraph->base.context_del = nv20_graph_context_del;
732 pgraph->base.object_new = nv04_graph_object_new;
733 pgraph->base.set_tile_region = nv20_graph_set_tile_region;
734
735 pgraph->grctx_user = 0x0028;
736 if (dev_priv->card_type == NV_20) {
737 pgraph->base.init = nv20_graph_init;
738 switch (dev_priv->chipset) {
739 case 0x20:
740 pgraph->grctx_init = nv20_graph_context_init;
741 pgraph->grctx_size = NV20_GRCTX_SIZE;
742 pgraph->grctx_user = 0x0000;
743 break;
744 case 0x25:
745 case 0x28:
746 pgraph->grctx_init = nv25_graph_context_init;
747 pgraph->grctx_size = NV25_GRCTX_SIZE;
748 break;
749 case 0x2a:
750 pgraph->grctx_init = nv2a_graph_context_init;
751 pgraph->grctx_size = NV2A_GRCTX_SIZE;
752 pgraph->grctx_user = 0x0000;
753 break;
754 default:
755 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
756 return 0;
757 }
758 } else {
759 pgraph->base.init = nv30_graph_init;
760 switch (dev_priv->chipset) {
761 case 0x30:
762 case 0x31:
763 pgraph->grctx_init = nv30_31_graph_context_init;
764 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
765 break;
766 case 0x34:
767 pgraph->grctx_init = nv34_graph_context_init;
768 pgraph->grctx_size = NV34_GRCTX_SIZE;
769 break;
770 case 0x35:
771 case 0x36:
772 pgraph->grctx_init = nv35_36_graph_context_init;
773 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
774 break;
775 default:
776 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
777 return 0;
778 }
779 }
780
781 /* Create Context Pointer Table */
782 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
783 &pgraph->ctxtab);
784 if (ret) {
785 kfree(pgraph);
786 return ret;
787 }
788
789 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
790 nouveau_irq_register(dev, 12, nv20_graph_isr);
791
792 /* nvsw */
793 NVOBJ_CLASS(dev, 0x506e, SW);
794 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
795
796 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
797 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
798 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
799 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
800 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
801 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
802 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
803 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
804 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
805 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
806 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
807 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
808 if (dev_priv->card_type == NV_20) {
809 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
810 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
811
812 /* kelvin */
813 if (dev_priv->chipset < 0x25)
814 NVOBJ_CLASS(dev, 0x0097, GR);
815 else
816 NVOBJ_CLASS(dev, 0x0597, GR);
817 } else {
818 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
819 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
820 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
821 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
822
823 /* rankine */
824 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
825 NVOBJ_CLASS(dev, 0x0397, GR);
826 else
827 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
828 NVOBJ_CLASS(dev, 0x0697, GR);
829 else
830 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
831 NVOBJ_CLASS(dev, 0x0497, GR);
832 }
833
834 return 0;
835}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 49b9a35a9cd6..68cb2d991c88 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -115,6 +115,7 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); 115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); 116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); 117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
118 nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
118 119
119 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 120 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
120 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); 121 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
@@ -186,6 +187,7 @@ nv40_fifo_unload_context(struct drm_device *dev)
186 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); 187 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
187 nv_wi32(dev, fc + 72, tmp); 188 nv_wi32(dev, fc + 72, tmp);
188#endif 189#endif
190 nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
189 191
190 nv40_fifo_do_load_context(dev, pfifo->channels - 1); 192 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
191 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 193 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index fceb44c0ec74..5beb01b8ace1 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -28,14 +28,18 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_grctx.h" 30#include "nouveau_grctx.h"
31#include "nouveau_ramht.h"
31 32
32static int nv40_graph_register(struct drm_device *); 33struct nv40_graph_engine {
33static void nv40_graph_isr(struct drm_device *); 34 struct nouveau_exec_engine base;
35 u32 grctx_size;
36};
34 37
35struct nouveau_channel * 38static struct nouveau_channel *
36nv40_graph_channel(struct drm_device *dev) 39nv40_graph_channel(struct drm_device *dev)
37{ 40{
38 struct drm_nouveau_private *dev_priv = dev->dev_private; 41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *grctx;
39 uint32_t inst; 43 uint32_t inst;
40 int i; 44 int i;
41 45
@@ -45,74 +49,17 @@ nv40_graph_channel(struct drm_device *dev)
45 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; 49 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
46 50
47 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 51 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
48 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 52 if (!dev_priv->channels.ptr[i])
53 continue;
49 54
50 if (chan && chan->ramin_grctx && 55 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
51 chan->ramin_grctx->pinst == inst) 56 if (grctx && grctx->pinst == inst)
52 return chan; 57 return dev_priv->channels.ptr[i];
53 } 58 }
54 59
55 return NULL; 60 return NULL;
56} 61}
57 62
58int
59nv40_graph_create_context(struct nouveau_channel *chan)
60{
61 struct drm_device *dev = chan->dev;
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
64 struct nouveau_grctx ctx = {};
65 unsigned long flags;
66 int ret;
67
68 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
69 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
70 if (ret)
71 return ret;
72
73 /* Initialise default context values */
74 ctx.dev = chan->dev;
75 ctx.mode = NOUVEAU_GRCTX_VALS;
76 ctx.data = chan->ramin_grctx;
77 nv40_grctx_init(&ctx);
78
79 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
80
81 /* init grctx pointer in ramfc, and on PFIFO if channel is
82 * already active there
83 */
84 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
85 nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
86 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
87 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
88 nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
89 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
90 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
91 return 0;
92}
93
94void
95nv40_graph_destroy_context(struct nouveau_channel *chan)
96{
97 struct drm_device *dev = chan->dev;
98 struct drm_nouveau_private *dev_priv = dev->dev_private;
99 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 pgraph->fifo_access(dev, false);
104
105 /* Unload the context if it's the currently active one */
106 if (pgraph->channel(dev) == chan)
107 pgraph->unload_context(dev);
108
109 pgraph->fifo_access(dev, true);
110 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
111
112 /* Free the context resources */
113 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
114}
115
116static int 63static int
117nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) 64nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
118{ 65{
@@ -154,57 +101,115 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
154 return 0; 101 return 0;
155} 102}
156 103
157/* Restore the context for a specific channel into PGRAPH */ 104static int
158int 105nv40_graph_unload_context(struct drm_device *dev)
159nv40_graph_load_context(struct nouveau_channel *chan)
160{ 106{
161 struct drm_device *dev = chan->dev;
162 uint32_t inst; 107 uint32_t inst;
163 int ret; 108 int ret;
164 109
165 if (!chan->ramin_grctx) 110 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
166 return -EINVAL; 111 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
167 inst = chan->ramin_grctx->pinst >> 4; 112 return 0;
113 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
114
115 ret = nv40_graph_transfer_context(dev, inst, 1);
116
117 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
118 return ret;
119}
120
121static int
122nv40_graph_context_new(struct nouveau_channel *chan, int engine)
123{
124 struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
125 struct drm_device *dev = chan->dev;
126 struct drm_nouveau_private *dev_priv = dev->dev_private;
127 struct nouveau_gpuobj *grctx = NULL;
128 struct nouveau_grctx ctx = {};
129 unsigned long flags;
130 int ret;
168 131
169 ret = nv40_graph_transfer_context(dev, inst, 0); 132 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
133 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
170 if (ret) 134 if (ret)
171 return ret; 135 return ret;
172 136
173 /* 0x40032C, no idea of it's exact function. Could simply be a 137 /* Initialise default context values */
174 * record of the currently active PGRAPH context. It's currently 138 ctx.dev = chan->dev;
175 * unknown as to what bit 24 does. The nv ddx has it set, so we will 139 ctx.mode = NOUVEAU_GRCTX_VALS;
176 * set it here too. 140 ctx.data = grctx;
177 */ 141 nv40_grctx_init(&ctx);
178 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 142
179 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 143 nv_wo32(grctx, 0, grctx->vinst);
180 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | 144
181 NV40_PGRAPH_CTXCTL_CUR_LOADED); 145 /* init grctx pointer in ramfc, and on PFIFO if channel is
182 /* 0x32E0 records the instance address of the active FIFO's PGRAPH 146 * already active there
183 * context. If at any time this doesn't match 0x40032C, you will
184 * receive PGRAPH_INTR_CONTEXT_SWITCH
185 */ 147 */
186 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); 148 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
149 nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
150 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
151 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
152 nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
153 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
154 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
155
156 chan->engctx[engine] = grctx;
187 return 0; 157 return 0;
188} 158}
189 159
190int 160static void
191nv40_graph_unload_context(struct drm_device *dev) 161nv40_graph_context_del(struct nouveau_channel *chan, int engine)
192{ 162{
193 uint32_t inst; 163 struct nouveau_gpuobj *grctx = chan->engctx[engine];
194 int ret; 164 struct drm_device *dev = chan->dev;
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 unsigned long flags;
195 167
196 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); 168 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
197 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) 169 nv04_graph_fifo_access(dev, false);
198 return 0;
199 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
200 170
201 ret = nv40_graph_transfer_context(dev, inst, 1); 171 /* Unload the context if it's the currently active one */
172 if (nv40_graph_channel(dev) == chan)
173 nv40_graph_unload_context(dev);
202 174
203 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); 175 nv04_graph_fifo_access(dev, true);
176 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
177
178 /* Free the context resources */
179 nouveau_gpuobj_ref(NULL, &grctx);
180 chan->engctx[engine] = NULL;
181}
182
183int
184nv40_graph_object_new(struct nouveau_channel *chan, int engine,
185 u32 handle, u16 class)
186{
187 struct drm_device *dev = chan->dev;
188 struct nouveau_gpuobj *obj = NULL;
189 int ret;
190
191 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
192 if (ret)
193 return ret;
194 obj->engine = 1;
195 obj->class = class;
196
197 nv_wo32(obj, 0x00, class);
198 nv_wo32(obj, 0x04, 0x00000000);
199#ifndef __BIG_ENDIAN
200 nv_wo32(obj, 0x08, 0x00000000);
201#else
202 nv_wo32(obj, 0x08, 0x01000000);
203#endif
204 nv_wo32(obj, 0x0c, 0x00000000);
205 nv_wo32(obj, 0x10, 0x00000000);
206
207 ret = nouveau_ramht_insert(chan, handle, obj);
208 nouveau_gpuobj_ref(NULL, &obj);
204 return ret; 209 return ret;
205} 210}
206 211
207void 212static void
208nv40_graph_set_tile_region(struct drm_device *dev, int i) 213nv40_graph_set_tile_region(struct drm_device *dev, int i)
209{ 214{
210 struct drm_nouveau_private *dev_priv = dev->dev_private; 215 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -257,14 +262,14 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
257 * C51 0x4e 262 * C51 0x4e
258 */ 263 */
259int 264int
260nv40_graph_init(struct drm_device *dev) 265nv40_graph_init(struct drm_device *dev, int engine)
261{ 266{
262 struct drm_nouveau_private *dev_priv = 267 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
263 (struct drm_nouveau_private *)dev->dev_private; 268 struct drm_nouveau_private *dev_priv = dev->dev_private;
264 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 269 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
265 struct nouveau_grctx ctx = {}; 270 struct nouveau_grctx ctx = {};
266 uint32_t vramsz, *cp; 271 uint32_t vramsz, *cp;
267 int ret, i, j; 272 int i, j;
268 273
269 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 274 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
270 ~NV_PMC_ENABLE_PGRAPH); 275 ~NV_PMC_ENABLE_PGRAPH);
@@ -280,7 +285,7 @@ nv40_graph_init(struct drm_device *dev)
280 ctx.data = cp; 285 ctx.data = cp;
281 ctx.ctxprog_max = 256; 286 ctx.ctxprog_max = 256;
282 nv40_grctx_init(&ctx); 287 nv40_grctx_init(&ctx);
283 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; 288 pgraph->grctx_size = ctx.ctxvals_pos * 4;
284 289
285 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 290 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
286 for (i = 0; i < ctx.ctxprog_len; i++) 291 for (i = 0; i < ctx.ctxprog_len; i++)
@@ -288,14 +293,9 @@ nv40_graph_init(struct drm_device *dev)
288 293
289 kfree(cp); 294 kfree(cp);
290 295
291 ret = nv40_graph_register(dev);
292 if (ret)
293 return ret;
294
295 /* No context present currently */ 296 /* No context present currently */
296 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 297 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
297 298
298 nouveau_irq_register(dev, 12, nv40_graph_isr);
299 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 299 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
300 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 300 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
301 301
@@ -428,47 +428,10 @@ nv40_graph_init(struct drm_device *dev)
428 return 0; 428 return 0;
429} 429}
430 430
431void nv40_graph_takedown(struct drm_device *dev)
432{
433 nouveau_irq_unregister(dev, 12);
434}
435
436static int 431static int
437nv40_graph_register(struct drm_device *dev) 432nv40_graph_fini(struct drm_device *dev, int engine)
438{ 433{
439 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 nv40_graph_unload_context(dev);
440
441 if (dev_priv->engine.graph.registered)
442 return 0;
443
444 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
445 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
446 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
447 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
448 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
449 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
450 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
451 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
452 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
453 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
454 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
455 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
456 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
457 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
458 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
459 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
460
461 /* curie */
462 if (nv44_graph_class(dev))
463 NVOBJ_CLASS(dev, 0x4497, GR);
464 else
465 NVOBJ_CLASS(dev, 0x4097, GR);
466
467 /* nvsw */
468 NVOBJ_CLASS(dev, 0x506e, SW);
469 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
470
471 dev_priv->engine.graph.registered = true;
472 return 0; 435 return 0;
473} 436}
474 437
@@ -476,17 +439,17 @@ static int
476nv40_graph_isr_chid(struct drm_device *dev, u32 inst) 439nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
477{ 440{
478 struct drm_nouveau_private *dev_priv = dev->dev_private; 441 struct drm_nouveau_private *dev_priv = dev->dev_private;
479 struct nouveau_channel *chan; 442 struct nouveau_gpuobj *grctx;
480 unsigned long flags; 443 unsigned long flags;
481 int i; 444 int i;
482 445
483 spin_lock_irqsave(&dev_priv->channels.lock, flags); 446 spin_lock_irqsave(&dev_priv->channels.lock, flags);
484 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 447 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
485 chan = dev_priv->channels.ptr[i]; 448 if (!dev_priv->channels.ptr[i])
486 if (!chan || !chan->ramin_grctx)
487 continue; 449 continue;
450 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
488 451
489 if (inst == chan->ramin_grctx->pinst) 452 if (grctx && grctx->pinst == inst)
490 break; 453 break;
491 } 454 }
492 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 455 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
@@ -537,3 +500,63 @@ nv40_graph_isr(struct drm_device *dev)
537 } 500 }
538 } 501 }
539} 502}
503
504static void
505nv40_graph_destroy(struct drm_device *dev, int engine)
506{
507 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
508
509 nouveau_irq_unregister(dev, 12);
510
511 NVOBJ_ENGINE_DEL(dev, GR);
512 kfree(pgraph);
513}
514
515int
516nv40_graph_create(struct drm_device *dev)
517{
518 struct nv40_graph_engine *pgraph;
519
520 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
521 if (!pgraph)
522 return -ENOMEM;
523
524 pgraph->base.destroy = nv40_graph_destroy;
525 pgraph->base.init = nv40_graph_init;
526 pgraph->base.fini = nv40_graph_fini;
527 pgraph->base.context_new = nv40_graph_context_new;
528 pgraph->base.context_del = nv40_graph_context_del;
529 pgraph->base.object_new = nv40_graph_object_new;
530 pgraph->base.set_tile_region = nv40_graph_set_tile_region;
531
532 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
533 nouveau_irq_register(dev, 12, nv40_graph_isr);
534
535 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
536 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
537 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
538 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
539 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
540 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
541 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
542 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
543 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
544 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
545 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
546 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
547 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
548 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
549 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
550 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
551
552 /* curie */
553 if (nv44_graph_class(dev))
554 NVOBJ_CLASS(dev, 0x4497, GR);
555 else
556 NVOBJ_CLASS(dev, 0x4097, GR);
557
558 /* nvsw */
559 NVOBJ_CLASS(dev, 0x506e, SW);
560 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
561 return 0;
562}
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c
new file mode 100644
index 000000000000..6d2af292a2e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c
@@ -0,0 +1,311 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_ramht.h"
28
29struct nv40_mpeg_engine {
30 struct nouveau_exec_engine base;
31};
32
33static int
34nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
35{
36 struct drm_device *dev = chan->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
38 struct nouveau_gpuobj *ctx = NULL;
39 unsigned long flags;
40 int ret;
41
42 NV_DEBUG(dev, "ch%d\n", chan->id);
43
44 ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
45 NVOBJ_FLAG_ZERO_FREE, &ctx);
46 if (ret)
47 return ret;
48
49 nv_wo32(ctx, 0x78, 0x02001ec1);
50
51 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
52 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
53 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
54 nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
55 nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
56 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
57 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
58
59 chan->engctx[engine] = ctx;
60 return 0;
61}
62
63static void
64nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
65{
66 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
67 struct nouveau_gpuobj *ctx = chan->engctx[engine];
68 struct drm_device *dev = chan->dev;
69 unsigned long flags;
70 u32 inst = 0x80000000 | (ctx->pinst >> 4);
71
72 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
73 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
74 if (nv_rd32(dev, 0x00b318) == inst)
75 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
76 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
77 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
78
79 nouveau_gpuobj_ref(NULL, &ctx);
80 chan->engctx[engine] = NULL;
81}
82
83static int
84nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
85 u32 handle, u16 class)
86{
87 struct drm_device *dev = chan->dev;
88 struct nouveau_gpuobj *obj = NULL;
89 int ret;
90
91 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
92 NVOBJ_FLAG_ZERO_FREE, &obj);
93 if (ret)
94 return ret;
95 obj->engine = 2;
96 obj->class = class;
97
98 nv_wo32(obj, 0x00, class);
99
100 ret = nouveau_ramht_insert(chan, handle, obj);
101 nouveau_gpuobj_ref(NULL, &obj);
102 return ret;
103}
104
105static int
106nv40_mpeg_init(struct drm_device *dev, int engine)
107{
108 struct drm_nouveau_private *dev_priv = dev->dev_private;
109 struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
110 int i;
111
112 /* VPE init */
113 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
114 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
115 nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
116 nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
117
118 for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
119 pmpeg->base.set_tile_region(dev, i);
120
121 /* PMPEG init */
122 nv_wr32(dev, 0x00b32c, 0x00000000);
123 nv_wr32(dev, 0x00b314, 0x00000100);
124 nv_wr32(dev, 0x00b220, 0x00000044);
125 nv_wr32(dev, 0x00b300, 0x02001ec1);
126 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
127
128 nv_wr32(dev, 0x00b100, 0xffffffff);
129 nv_wr32(dev, 0x00b140, 0xffffffff);
130
131 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
132 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
133 return -EBUSY;
134 }
135
136 return 0;
137}
138
139static int
140nv40_mpeg_fini(struct drm_device *dev, int engine)
141{
142 /*XXX: context save? */
143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
144 nv_wr32(dev, 0x00b140, 0x00000000);
145 return 0;
146}
147
148static int
149nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
150{
151 struct drm_device *dev = chan->dev;
152 u32 inst = data << 4;
153 u32 dma0 = nv_ri32(dev, inst + 0);
154 u32 dma1 = nv_ri32(dev, inst + 4);
155 u32 dma2 = nv_ri32(dev, inst + 8);
156 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
157 u32 size = dma1 + 1;
158
159 /* only allow linear DMA objects */
160 if (!(dma0 & 0x00002000))
161 return -EINVAL;
162
163 if (mthd == 0x0190) {
164 /* DMA_CMD */
165 nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
166 nv_wr32(dev, 0x00b334, base);
167 nv_wr32(dev, 0x00b324, size);
168 } else
169 if (mthd == 0x01a0) {
170 /* DMA_DATA */
171 nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
172 nv_wr32(dev, 0x00b360, base);
173 nv_wr32(dev, 0x00b364, size);
174 } else {
175 /* DMA_IMAGE, VRAM only */
176 if (dma0 & 0x000c0000)
177 return -EINVAL;
178
179 nv_wr32(dev, 0x00b370, base);
180 nv_wr32(dev, 0x00b374, size);
181 }
182
183 return 0;
184}
185
186static int
187nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
188{
189 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_gpuobj *ctx;
191 unsigned long flags;
192 int i;
193
194 spin_lock_irqsave(&dev_priv->channels.lock, flags);
195 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
196 if (!dev_priv->channels.ptr[i])
197 continue;
198
199 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
200 if (ctx && ctx->pinst == inst)
201 break;
202 }
203 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
204 return i;
205}
206
207static void
208nv40_vpe_set_tile_region(struct drm_device *dev, int i)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
212
213 nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
214 nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
215 nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
216}
217
218static void
219nv40_mpeg_isr(struct drm_device *dev)
220{
221 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
222 u32 chid = nv40_mpeg_isr_chid(dev, inst);
223 u32 stat = nv_rd32(dev, 0x00b100);
224 u32 type = nv_rd32(dev, 0x00b230);
225 u32 mthd = nv_rd32(dev, 0x00b234);
226 u32 data = nv_rd32(dev, 0x00b238);
227 u32 show = stat;
228
229 if (stat & 0x01000000) {
230 /* happens on initial binding of the object */
231 if (type == 0x00000020 && mthd == 0x0000) {
232 nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
233 show &= ~0x01000000;
234 }
235
236 if (type == 0x00000010) {
237 if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
238 show &= ~0x01000000;
239 }
240 }
241
242 nv_wr32(dev, 0x00b100, stat);
243 nv_wr32(dev, 0x00b230, 0x00000001);
244
245 if (show && nouveau_ratelimit()) {
246 NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
247 chid, inst, stat, type, mthd, data);
248 }
249}
250
251static void
252nv40_vpe_isr(struct drm_device *dev)
253{
254 if (nv_rd32(dev, 0x00b100))
255 nv40_mpeg_isr(dev);
256
257 if (nv_rd32(dev, 0x00b800)) {
258 u32 stat = nv_rd32(dev, 0x00b800);
259 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
260 nv_wr32(dev, 0xb800, stat);
261 }
262}
263
264static void
265nv40_mpeg_destroy(struct drm_device *dev, int engine)
266{
267 struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
268
269 nouveau_irq_unregister(dev, 0);
270
271 NVOBJ_ENGINE_DEL(dev, MPEG);
272 kfree(pmpeg);
273}
274
275int
276nv40_mpeg_create(struct drm_device *dev)
277{
278 struct nv40_mpeg_engine *pmpeg;
279
280 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
281 if (!pmpeg)
282 return -ENOMEM;
283
284 pmpeg->base.destroy = nv40_mpeg_destroy;
285 pmpeg->base.init = nv40_mpeg_init;
286 pmpeg->base.fini = nv40_mpeg_fini;
287 pmpeg->base.context_new = nv40_mpeg_context_new;
288 pmpeg->base.context_del = nv40_mpeg_context_del;
289 pmpeg->base.object_new = nv40_mpeg_object_new;
290
291 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
292 * all VPE engines, for this driver's purposes the PMPEG engine
293 * will be treated as the "master" and handle the global VPE
294 * bits too
295 */
296 pmpeg->base.set_tile_region = nv40_vpe_set_tile_region;
297 nouveau_irq_register(dev, 0, nv40_vpe_isr);
298
299 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
300 NVOBJ_CLASS(dev, 0x3174, MPEG);
301 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma);
302 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma);
303 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma);
304
305#if 0
306 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
307 NVOBJ_CLASS(dev, 0x4075, ME);
308#endif
309 return 0;
310
311}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
index de81151648f8..8cf63a8b30cd 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "drm_fixed.h"
27#include "nouveau_drv.h" 26#include "nouveau_drv.h"
28#include "nouveau_hw.h" 27#include "nouveau_hw.h"
29 28
@@ -47,45 +46,52 @@ nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
47} 46}
48 47
49int 48int
50nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, 49nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
51 int *N, int *fN, int *M, int *P) 50 int *pN, int *pfN, int *pM, int *P)
52{ 51{
53 fixed20_12 fb_div, a, b; 52 u32 best_err = ~0, err;
54 u32 refclk = pll->refclk / 10; 53 int M, lM, hM, N, fN;
55 u32 max_vco_freq = pll->vco1.maxfreq / 10;
56 u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10;
57 clk /= 10;
58 54
59 *P = max_vco_freq / clk; 55 *P = pll->vco1.maxfreq / clk;
60 if (*P > pll->max_p) 56 if (*P > pll->max_p)
61 *P = pll->max_p; 57 *P = pll->max_p;
62 if (*P < pll->min_p) 58 if (*P < pll->min_p)
63 *P = pll->min_p; 59 *P = pll->min_p;
64 60
65 /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ 61 lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq;
66 a.full = dfixed_const(refclk + max_vco_inputfreq); 62 lM = max(lM, (int)pll->vco1.min_m);
67 b.full = dfixed_const(max_vco_inputfreq); 63 hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq;
68 a.full = dfixed_div(a, b); 64 hM = min(hM, (int)pll->vco1.max_m);
69 a.full = dfixed_floor(a);
70 *M = dfixed_trunc(a);
71 65
72 /* fb_div = (vco * *M) / refclk; */ 66 for (M = lM; M <= hM; M++) {
73 fb_div.full = dfixed_const(clk * *P); 67 u32 tmp = clk * *P * M;
74 fb_div.full = dfixed_mul(fb_div, a); 68 N = tmp / pll->refclk;
75 a.full = dfixed_const(refclk); 69 fN = tmp % pll->refclk;
76 fb_div.full = dfixed_div(fb_div, a); 70 if (!pfN && fN >= pll->refclk / 2)
71 N++;
77 72
78 /* *N = floor(fb_div); */ 73 if (N < pll->vco1.min_n)
79 a.full = dfixed_floor(fb_div); 74 continue;
80 *N = dfixed_trunc(fb_div); 75 if (N > pll->vco1.max_n)
76 break;
81 77
82 /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ 78 err = abs(clk - (pll->refclk * N / M / *P));
83 b.full = dfixed_const(8192); 79 if (err < best_err) {
84 a.full = dfixed_mul(a, b); 80 best_err = err;
85 fb_div.full = dfixed_mul(fb_div, b); 81 *pN = N;
86 fb_div.full = fb_div.full - a.full; 82 *pM = M;
87 *fN = dfixed_trunc(fb_div) - 4096; 83 }
88 *fN &= 0xffff;
89 84
90 return clk; 85 if (pfN) {
86 *pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff;
87 return clk;
88 }
89 }
90
91 if (unlikely(best_err == ~0)) {
92 NV_ERROR(dev, "unable to find matching pll values\n");
93 return -EINVAL;
94 }
95
96 return pll->refclk * *pN / *pM / *P;
91} 97}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index a19ccaa025b3..ebabacf38da9 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -286,7 +286,7 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
286 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); 286 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
287 } else 287 } else
288 if (dev_priv->chipset < NV_C0) { 288 if (dev_priv->chipset < NV_C0) {
289 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 289 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
290 if (ret <= 0) 290 if (ret <= 0)
291 return 0; 291 return 0;
292 292
@@ -298,7 +298,7 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); 298 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
299 nv_wr32(dev, pll.reg + 8, N2); 299 nv_wr32(dev, pll.reg + 8, N2);
300 } else { 300 } else {
301 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 301 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
302 if (ret <= 0) 302 if (ret <= 0)
303 return 0; 303 return 0;
304 304
@@ -349,14 +349,14 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
349 struct drm_gem_object *gem; 349 struct drm_gem_object *gem;
350 int ret = 0, i; 350 int ret = 0, i;
351 351
352 if (width != 64 || height != 64)
353 return -EINVAL;
354
355 if (!buffer_handle) { 352 if (!buffer_handle) {
356 nv_crtc->cursor.hide(nv_crtc, true); 353 nv_crtc->cursor.hide(nv_crtc, true);
357 return 0; 354 return 0;
358 } 355 }
359 356
357 if (width != 64 || height != 64)
358 return -EINVAL;
359
360 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); 360 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
361 if (!gem) 361 if (!gem)
362 return -ENOENT; 362 return -ENOENT;
@@ -532,8 +532,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
532 if (atomic) { 532 if (atomic) {
533 drm_fb = passed_fb; 533 drm_fb = passed_fb;
534 fb = nouveau_framebuffer(passed_fb); 534 fb = nouveau_framebuffer(passed_fb);
535 } 535 } else {
536 else {
537 /* If not atomic, we can go ahead and pin, and unpin the 536 /* If not atomic, we can go ahead and pin, and unpin the
538 * old fb we were passed. 537 * old fb we were passed.
539 */ 538 */
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 75a376cc342a..74a3f6872701 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -517,13 +517,25 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
517 if (bios->fp.if_is_24bit) 517 if (bios->fp.if_is_24bit)
518 script |= 0x0200; 518 script |= 0x0200;
519 } else { 519 } else {
520 /* determine number of lvds links */
521 if (nv_connector && nv_connector->edid &&
522 nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
523 /* http://www.spwg.org */
524 if (((u8 *)nv_connector->edid)[121] == 2)
525 script |= 0x0100;
526 } else
520 if (pxclk >= bios->fp.duallink_transition_clk) { 527 if (pxclk >= bios->fp.duallink_transition_clk) {
521 script |= 0x0100; 528 script |= 0x0100;
529 }
530
531 /* determine panel depth */
532 if (script & 0x0100) {
522 if (bios->fp.strapless_is_24bit & 2) 533 if (bios->fp.strapless_is_24bit & 2)
523 script |= 0x0200; 534 script |= 0x0200;
524 } else 535 } else {
525 if (bios->fp.strapless_is_24bit & 1) 536 if (bios->fp.strapless_is_24bit & 1)
526 script |= 0x0200; 537 script |= 0x0200;
538 }
527 539
528 if (nv_connector && nv_connector->edid && 540 if (nv_connector && nv_connector->edid &&
529 (nv_connector->edid->revision >= 4) && 541 (nv_connector->edid->revision >= 4) &&
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index b02a5b1e7d37..e25cbb46789a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -31,10 +31,95 @@
31#include "nouveau_grctx.h" 31#include "nouveau_grctx.h"
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_vm.h" 33#include "nouveau_vm.h"
34#include "nouveau_ramht.h"
34#include "nv50_evo.h" 35#include "nv50_evo.h"
35 36
36static int nv50_graph_register(struct drm_device *); 37struct nv50_graph_engine {
37static void nv50_graph_isr(struct drm_device *); 38 struct nouveau_exec_engine base;
39 u32 ctxprog[512];
40 u32 ctxprog_size;
41 u32 grctx_size;
42};
43
44static void
45nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
46{
47 const uint32_t mask = 0x00010001;
48
49 if (enabled)
50 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
51 else
52 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
53}
54
55static struct nouveau_channel *
56nv50_graph_channel(struct drm_device *dev)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 uint32_t inst;
60 int i;
61
62 /* Be sure we're not in the middle of a context switch or bad things
63 * will happen, such as unloading the wrong pgraph context.
64 */
65 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
66 NV_ERROR(dev, "Ctxprog is still running\n");
67
68 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
69 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
70 return NULL;
71 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
72
73 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
74 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
75
76 if (chan && chan->ramin && chan->ramin->vinst == inst)
77 return chan;
78 }
79
80 return NULL;
81}
82
83static int
84nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
85{
86 uint32_t fifo = nv_rd32(dev, 0x400500);
87
88 nv_wr32(dev, 0x400500, fifo & ~1);
89 nv_wr32(dev, 0x400784, inst);
90 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
91 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
92 nv_wr32(dev, 0x400040, 0xffffffff);
93 (void)nv_rd32(dev, 0x400040);
94 nv_wr32(dev, 0x400040, 0x00000000);
95 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
96
97 if (nouveau_wait_for_idle(dev))
98 nv_wr32(dev, 0x40032c, inst | (1<<31));
99 nv_wr32(dev, 0x400500, fifo);
100
101 return 0;
102}
103
104static int
105nv50_graph_unload_context(struct drm_device *dev)
106{
107 uint32_t inst;
108
109 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
110 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
111 return 0;
112 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
113
114 nouveau_wait_for_idle(dev);
115 nv_wr32(dev, 0x400784, inst);
116 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
117 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
118 nouveau_wait_for_idle(dev);
119
120 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
121 return 0;
122}
38 123
39static void 124static void
40nv50_graph_init_reset(struct drm_device *dev) 125nv50_graph_init_reset(struct drm_device *dev)
@@ -52,7 +137,6 @@ nv50_graph_init_intr(struct drm_device *dev)
52{ 137{
53 NV_DEBUG(dev, "\n"); 138 NV_DEBUG(dev, "\n");
54 139
55 nouveau_irq_register(dev, 12, nv50_graph_isr);
56 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 140 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
57 nv_wr32(dev, 0x400138, 0xffffffff); 141 nv_wr32(dev, 0x400138, 0xffffffff);
58 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); 142 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -135,34 +219,14 @@ nv50_graph_init_zcull(struct drm_device *dev)
135static int 219static int
136nv50_graph_init_ctxctl(struct drm_device *dev) 220nv50_graph_init_ctxctl(struct drm_device *dev)
137{ 221{
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 222 struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
139 struct nouveau_grctx ctx = {};
140 uint32_t *cp;
141 int i; 223 int i;
142 224
143 NV_DEBUG(dev, "\n"); 225 NV_DEBUG(dev, "\n");
144 226
145 cp = kmalloc(512 * 4, GFP_KERNEL); 227 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
146 if (!cp) { 228 for (i = 0; i < pgraph->ctxprog_size; i++)
147 NV_ERROR(dev, "failed to allocate ctxprog\n"); 229 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
148 dev_priv->engine.graph.accel_blocked = true;
149 return 0;
150 }
151
152 ctx.dev = dev;
153 ctx.mode = NOUVEAU_GRCTX_PROG;
154 ctx.data = cp;
155 ctx.ctxprog_max = 512;
156 if (!nv50_grctx_init(&ctx)) {
157 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
158
159 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
160 for (i = 0; i < ctx.ctxprog_len; i++)
161 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
162 } else {
163 dev_priv->engine.graph.accel_blocked = true;
164 }
165 kfree(cp);
166 230
167 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 231 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
168 nv_wr32(dev, 0x400320, 4); 232 nv_wr32(dev, 0x400320, 4);
@@ -171,8 +235,8 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
171 return 0; 235 return 0;
172} 236}
173 237
174int 238static int
175nv50_graph_init(struct drm_device *dev) 239nv50_graph_init(struct drm_device *dev, int engine)
176{ 240{
177 int ret; 241 int ret;
178 242
@@ -186,105 +250,66 @@ nv50_graph_init(struct drm_device *dev)
186 if (ret) 250 if (ret)
187 return ret; 251 return ret;
188 252
189 ret = nv50_graph_register(dev);
190 if (ret)
191 return ret;
192 nv50_graph_init_intr(dev); 253 nv50_graph_init_intr(dev);
193 return 0; 254 return 0;
194} 255}
195 256
196void 257static int
197nv50_graph_takedown(struct drm_device *dev) 258nv50_graph_fini(struct drm_device *dev, int engine)
198{ 259{
199 NV_DEBUG(dev, "\n"); 260 NV_DEBUG(dev, "\n");
261 nv50_graph_unload_context(dev);
200 nv_wr32(dev, 0x40013c, 0x00000000); 262 nv_wr32(dev, 0x40013c, 0x00000000);
201 nouveau_irq_unregister(dev, 12); 263 return 0;
202}
203
204void
205nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
206{
207 const uint32_t mask = 0x00010001;
208
209 if (enabled)
210 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
211 else
212 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
213}
214
215struct nouveau_channel *
216nv50_graph_channel(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 uint32_t inst;
220 int i;
221
222 /* Be sure we're not in the middle of a context switch or bad things
223 * will happen, such as unloading the wrong pgraph context.
224 */
225 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
226 NV_ERROR(dev, "Ctxprog is still running\n");
227
228 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
229 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
230 return NULL;
231 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
232
233 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
234 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
235
236 if (chan && chan->ramin && chan->ramin->vinst == inst)
237 return chan;
238 }
239
240 return NULL;
241} 264}
242 265
243int 266static int
244nv50_graph_create_context(struct nouveau_channel *chan) 267nv50_graph_context_new(struct nouveau_channel *chan, int engine)
245{ 268{
246 struct drm_device *dev = chan->dev; 269 struct drm_device *dev = chan->dev;
247 struct drm_nouveau_private *dev_priv = dev->dev_private; 270 struct drm_nouveau_private *dev_priv = dev->dev_private;
248 struct nouveau_gpuobj *ramin = chan->ramin; 271 struct nouveau_gpuobj *ramin = chan->ramin;
249 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 272 struct nouveau_gpuobj *grctx = NULL;
273 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
250 struct nouveau_grctx ctx = {}; 274 struct nouveau_grctx ctx = {};
251 int hdr, ret; 275 int hdr, ret;
252 276
253 NV_DEBUG(dev, "ch%d\n", chan->id); 277 NV_DEBUG(dev, "ch%d\n", chan->id);
254 278
255 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, 279 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
256 NVOBJ_FLAG_ZERO_ALLOC | 280 NVOBJ_FLAG_ZERO_ALLOC |
257 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 281 NVOBJ_FLAG_ZERO_FREE, &grctx);
258 if (ret) 282 if (ret)
259 return ret; 283 return ret;
260 284
261 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 285 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
262 nv_wo32(ramin, hdr + 0x00, 0x00190002); 286 nv_wo32(ramin, hdr + 0x00, 0x00190002);
263 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + 287 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
264 pgraph->grctx_size - 1); 288 nv_wo32(ramin, hdr + 0x08, grctx->vinst);
265 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
266 nv_wo32(ramin, hdr + 0x0c, 0); 289 nv_wo32(ramin, hdr + 0x0c, 0);
267 nv_wo32(ramin, hdr + 0x10, 0); 290 nv_wo32(ramin, hdr + 0x10, 0);
268 nv_wo32(ramin, hdr + 0x14, 0x00010000); 291 nv_wo32(ramin, hdr + 0x14, 0x00010000);
269 292
270 ctx.dev = chan->dev; 293 ctx.dev = chan->dev;
271 ctx.mode = NOUVEAU_GRCTX_VALS; 294 ctx.mode = NOUVEAU_GRCTX_VALS;
272 ctx.data = chan->ramin_grctx; 295 ctx.data = grctx;
273 nv50_grctx_init(&ctx); 296 nv50_grctx_init(&ctx);
274 297
275 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 298 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
276 299
277 dev_priv->engine.instmem.flush(dev); 300 dev_priv->engine.instmem.flush(dev);
278 atomic_inc(&chan->vm->pgraph_refs); 301
302 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
303 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
279 return 0; 304 return 0;
280} 305}
281 306
282void 307static void
283nv50_graph_destroy_context(struct nouveau_channel *chan) 308nv50_graph_context_del(struct nouveau_channel *chan, int engine)
284{ 309{
310 struct nouveau_gpuobj *grctx = chan->engctx[engine];
285 struct drm_device *dev = chan->dev; 311 struct drm_device *dev = chan->dev;
286 struct drm_nouveau_private *dev_priv = dev->dev_private; 312 struct drm_nouveau_private *dev_priv = dev->dev_private;
287 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
288 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 313 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
289 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 314 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
290 unsigned long flags; 315 unsigned long flags;
@@ -296,72 +321,49 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
296 321
297 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 322 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
298 pfifo->reassign(dev, false); 323 pfifo->reassign(dev, false);
299 pgraph->fifo_access(dev, false); 324 nv50_graph_fifo_access(dev, false);
300 325
301 if (pgraph->channel(dev) == chan) 326 if (nv50_graph_channel(dev) == chan)
302 pgraph->unload_context(dev); 327 nv50_graph_unload_context(dev);
303 328
304 for (i = hdr; i < hdr + 24; i += 4) 329 for (i = hdr; i < hdr + 24; i += 4)
305 nv_wo32(chan->ramin, i, 0); 330 nv_wo32(chan->ramin, i, 0);
306 dev_priv->engine.instmem.flush(dev); 331 dev_priv->engine.instmem.flush(dev);
307 332
308 pgraph->fifo_access(dev, true); 333 nv50_graph_fifo_access(dev, true);
309 pfifo->reassign(dev, true); 334 pfifo->reassign(dev, true);
310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 335 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
311 336
312 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 337 nouveau_gpuobj_ref(NULL, &grctx);
313 338
314 atomic_dec(&chan->vm->pgraph_refs); 339 atomic_dec(&chan->vm->engref[engine]);
340 chan->engctx[engine] = NULL;
315} 341}
316 342
317static int 343static int
318nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) 344nv50_graph_object_new(struct nouveau_channel *chan, int engine,
319{ 345 u32 handle, u16 class)
320 uint32_t fifo = nv_rd32(dev, 0x400500);
321
322 nv_wr32(dev, 0x400500, fifo & ~1);
323 nv_wr32(dev, 0x400784, inst);
324 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
325 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
326 nv_wr32(dev, 0x400040, 0xffffffff);
327 (void)nv_rd32(dev, 0x400040);
328 nv_wr32(dev, 0x400040, 0x00000000);
329 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
330
331 if (nouveau_wait_for_idle(dev))
332 nv_wr32(dev, 0x40032c, inst | (1<<31));
333 nv_wr32(dev, 0x400500, fifo);
334
335 return 0;
336}
337
338int
339nv50_graph_load_context(struct nouveau_channel *chan)
340{
341 uint32_t inst = chan->ramin->vinst >> 12;
342
343 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
344 return nv50_graph_do_load_context(chan->dev, inst);
345}
346
347int
348nv50_graph_unload_context(struct drm_device *dev)
349{ 346{
350 uint32_t inst; 347 struct drm_device *dev = chan->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349 struct nouveau_gpuobj *obj = NULL;
350 int ret;
351 351
352 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 352 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
353 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 353 if (ret)
354 return 0; 354 return ret;
355 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 355 obj->engine = 1;
356 obj->class = class;
356 357
357 nouveau_wait_for_idle(dev); 358 nv_wo32(obj, 0x00, class);
358 nv_wr32(dev, 0x400784, inst); 359 nv_wo32(obj, 0x04, 0x00000000);
359 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 360 nv_wo32(obj, 0x08, 0x00000000);
360 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 361 nv_wo32(obj, 0x0c, 0x00000000);
361 nouveau_wait_for_idle(dev); 362 dev_priv->engine.instmem.flush(dev);
362 363
363 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 364 ret = nouveau_ramht_insert(chan, handle, obj);
364 return 0; 365 nouveau_gpuobj_ref(NULL, &obj);
366 return ret;
365} 367}
366 368
367static void 369static void
@@ -442,68 +444,15 @@ nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
442 return 0; 444 return 0;
443} 445}
444 446
445static int
446nv50_graph_register(struct drm_device *dev)
447{
448 struct drm_nouveau_private *dev_priv = dev->dev_private;
449
450 if (dev_priv->engine.graph.registered)
451 return 0;
452
453 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
454 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
455 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
456 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
457 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
458 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
459
460 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
461 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
462 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
463
464 /* tesla */
465 if (dev_priv->chipset == 0x50)
466 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
467 else
468 if (dev_priv->chipset < 0xa0)
469 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
470 else {
471 switch (dev_priv->chipset) {
472 case 0xa0:
473 case 0xaa:
474 case 0xac:
475 NVOBJ_CLASS(dev, 0x8397, GR);
476 break;
477 case 0xa3:
478 case 0xa5:
479 case 0xa8:
480 NVOBJ_CLASS(dev, 0x8597, GR);
481 break;
482 case 0xaf:
483 NVOBJ_CLASS(dev, 0x8697, GR);
484 break;
485 }
486 }
487
488 /* compute */
489 NVOBJ_CLASS(dev, 0x50c0, GR);
490 if (dev_priv->chipset > 0xa0 &&
491 dev_priv->chipset != 0xaa &&
492 dev_priv->chipset != 0xac)
493 NVOBJ_CLASS(dev, 0x85c0, GR);
494
495 dev_priv->engine.graph.registered = true;
496 return 0;
497}
498 447
499void 448static void
500nv50_graph_tlb_flush(struct drm_device *dev) 449nv50_graph_tlb_flush(struct drm_device *dev, int engine)
501{ 450{
502 nv50_vm_flush_engine(dev, 0); 451 nv50_vm_flush_engine(dev, 0);
503} 452}
504 453
505void 454static void
506nv84_graph_tlb_flush(struct drm_device *dev) 455nv84_graph_tlb_flush(struct drm_device *dev, int engine)
507{ 456{
508 struct drm_nouveau_private *dev_priv = dev->dev_private; 457 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 458 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -548,8 +497,7 @@ nv84_graph_tlb_flush(struct drm_device *dev)
548 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 497 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
549} 498}
550 499
551static struct nouveau_enum nv50_mp_exec_error_names[] = 500static struct nouveau_enum nv50_mp_exec_error_names[] = {
552{
553 { 3, "STACK_UNDERFLOW", NULL }, 501 { 3, "STACK_UNDERFLOW", NULL },
554 { 4, "QUADON_ACTIVE", NULL }, 502 { 4, "QUADON_ACTIVE", NULL },
555 { 8, "TIMEOUT", NULL }, 503 { 8, "TIMEOUT", NULL },
@@ -663,7 +611,7 @@ nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
663 nv_rd32(dev, addr + 0x20); 611 nv_rd32(dev, addr + 0x20);
664 pc = nv_rd32(dev, addr + 0x24); 612 pc = nv_rd32(dev, addr + 0x24);
665 oplow = nv_rd32(dev, addr + 0x70); 613 oplow = nv_rd32(dev, addr + 0x70);
666 ophigh= nv_rd32(dev, addr + 0x74); 614 ophigh = nv_rd32(dev, addr + 0x74);
667 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " 615 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
668 "TP %d MP %d: ", tpid, i); 616 "TP %d MP %d: ", tpid, i);
669 nouveau_enum_print(nv50_mp_exec_error_names, status); 617 nouveau_enum_print(nv50_mp_exec_error_names, status);
@@ -991,7 +939,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
991 return 1; 939 return 1;
992} 940}
993 941
994static int 942int
995nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 943nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
996{ 944{
997 struct drm_nouveau_private *dev_priv = dev->dev_private; 945 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -1073,3 +1021,101 @@ nv50_graph_isr(struct drm_device *dev)
1073 if (nv_rd32(dev, 0x400824) & (1 << 31)) 1021 if (nv_rd32(dev, 0x400824) & (1 << 31))
1074 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1022 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1075} 1023}
1024
1025static void
1026nv50_graph_destroy(struct drm_device *dev, int engine)
1027{
1028 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
1029
1030 NVOBJ_ENGINE_DEL(dev, GR);
1031
1032 nouveau_irq_unregister(dev, 12);
1033 kfree(pgraph);
1034}
1035
1036int
1037nv50_graph_create(struct drm_device *dev)
1038{
1039 struct drm_nouveau_private *dev_priv = dev->dev_private;
1040 struct nv50_graph_engine *pgraph;
1041 struct nouveau_grctx ctx = {};
1042 int ret;
1043
1044 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1045 if (!pgraph)
1046 return -ENOMEM;
1047
1048 ctx.dev = dev;
1049 ctx.mode = NOUVEAU_GRCTX_PROG;
1050 ctx.data = pgraph->ctxprog;
1051 ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1052
1053 ret = nv50_grctx_init(&ctx);
1054 if (ret) {
1055 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1056 kfree(pgraph);
1057 return 0;
1058 }
1059
1060 pgraph->grctx_size = ctx.ctxvals_pos * 4;
1061 pgraph->ctxprog_size = ctx.ctxprog_len;
1062
1063 pgraph->base.destroy = nv50_graph_destroy;
1064 pgraph->base.init = nv50_graph_init;
1065 pgraph->base.fini = nv50_graph_fini;
1066 pgraph->base.context_new = nv50_graph_context_new;
1067 pgraph->base.context_del = nv50_graph_context_del;
1068 pgraph->base.object_new = nv50_graph_object_new;
1069 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
1070 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
1071 else
1072 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
1073
1074 nouveau_irq_register(dev, 12, nv50_graph_isr);
1075
1076 /* NVSW really doesn't live here... */
1077 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1078 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1079 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1080 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1081 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1082 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1083
1084 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1085 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1086 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
1087 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
1088
1089 /* tesla */
1090 if (dev_priv->chipset == 0x50)
1091 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
1092 else
1093 if (dev_priv->chipset < 0xa0)
1094 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
1095 else {
1096 switch (dev_priv->chipset) {
1097 case 0xa0:
1098 case 0xaa:
1099 case 0xac:
1100 NVOBJ_CLASS(dev, 0x8397, GR);
1101 break;
1102 case 0xa3:
1103 case 0xa5:
1104 case 0xa8:
1105 NVOBJ_CLASS(dev, 0x8597, GR);
1106 break;
1107 case 0xaf:
1108 NVOBJ_CLASS(dev, 0x8697, GR);
1109 break;
1110 }
1111 }
1112
1113 /* compute */
1114 NVOBJ_CLASS(dev, 0x50c0, GR);
1115 if (dev_priv->chipset > 0xa0 &&
1116 dev_priv->chipset != 0xaa &&
1117 dev_priv->chipset != 0xac)
1118 NVOBJ_CLASS(dev, 0x85c0, GR);
1119
1120 return 0;
1121}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 336aab2a24a6..de9abff12b90 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -747,7 +747,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
747 gr_def(ctx, offset + 0x64, 0x0000001f); 747 gr_def(ctx, offset + 0x64, 0x0000001f);
748 gr_def(ctx, offset + 0x68, 0x0000000f); 748 gr_def(ctx, offset + 0x68, 0x0000000f);
749 gr_def(ctx, offset + 0x6c, 0x0000000f); 749 gr_def(ctx, offset + 0x6c, 0x0000000f);
750 } else if(dev_priv->chipset < 0xa0) { 750 } else if (dev_priv->chipset < 0xa0) {
751 cp_ctx(ctx, offset + 0x50, 1); 751 cp_ctx(ctx, offset + 0x50, 1);
752 cp_ctx(ctx, offset + 0x70, 1); 752 cp_ctx(ctx, offset + 0x70, 1);
753 } else { 753 } else {
@@ -924,7 +924,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
924 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ 924 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
925 } else { 925 } else {
926 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ 926 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
927 } 927 }
928 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ 928 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
929 if (dev_priv->chipset != 0x50) 929 if (dev_priv->chipset != 0x50)
930 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ 930 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
@@ -1803,9 +1803,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1803 xf_emit(ctx, 1, 0); /* 1ff */ 1803 xf_emit(ctx, 1, 0); /* 1ff */
1804 xf_emit(ctx, 8, 0); /* 0? */ 1804 xf_emit(ctx, 8, 0); /* 0? */
1805 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ 1805 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
1806 } 1806 } else {
1807 else
1808 {
1809 xf_emit(ctx, 0xc, 0); /* RO */ 1807 xf_emit(ctx, 0xc, 0); /* RO */
1810 /* SEEK */ 1808 /* SEEK */
1811 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ 1809 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
@@ -2836,7 +2834,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2836 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ 2834 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2837 if (IS_NVA3F(dev_priv->chipset)) 2835 if (IS_NVA3F(dev_priv->chipset))
2838 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2836 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2839 if(dev_priv->chipset == 0x50) 2837 if (dev_priv->chipset == 0x50)
2840 xf_emit(ctx, 1, 0); /* ff */ 2838 xf_emit(ctx, 1, 0); /* ff */
2841 else 2839 else
2842 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ 2840 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
new file mode 100644
index 000000000000..1dc5913f78c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -0,0 +1,256 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_ramht.h"
28
29struct nv50_mpeg_engine {
30 struct nouveau_exec_engine base;
31};
32
33static inline u32
34CTX_PTR(struct drm_device *dev, u32 offset)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37
38 if (dev_priv->chipset == 0x50)
39 offset += 0x0260;
40 else
41 offset += 0x0060;
42
43 return offset;
44}
45
46static int
47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct drm_device *dev = chan->dev;
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nouveau_gpuobj *ramin = chan->ramin;
52 struct nouveau_gpuobj *ctx = NULL;
53 int ret;
54
55 NV_DEBUG(dev, "ch%d\n", chan->id);
56
57 ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &ctx);
59 if (ret)
60 return ret;
61
62 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
63 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
64 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
65 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
66 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
67 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
68
69 nv_wo32(ctx, 0x70, 0x00801ec1);
70 nv_wo32(ctx, 0x7c, 0x0000037c);
71 dev_priv->engine.instmem.flush(dev);
72
73 chan->engctx[engine] = ctx;
74 return 0;
75}
76
77static void
78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
79{
80 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev;
83 unsigned long flags;
84 u32 inst, i;
85
86 if (!chan->ramin)
87 return;
88
89 inst = chan->ramin->vinst >> 12;
90 inst |= 0x80000000;
91
92 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
93 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
94 if (nv_rd32(dev, 0x00b318) == inst)
95 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
97 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
98
99 for (i = 0x00; i <= 0x14; i += 4)
100 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
101 nouveau_gpuobj_ref(NULL, &ctx);
102 chan->engctx[engine] = NULL;
103}
104
105static int
106nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
107 u32 handle, u16 class)
108{
109 struct drm_device *dev = chan->dev;
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111 struct nouveau_gpuobj *obj = NULL;
112 int ret;
113
114 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
115 if (ret)
116 return ret;
117 obj->engine = 2;
118 obj->class = class;
119
120 nv_wo32(obj, 0x00, class);
121 nv_wo32(obj, 0x04, 0x00000000);
122 nv_wo32(obj, 0x08, 0x00000000);
123 nv_wo32(obj, 0x0c, 0x00000000);
124 dev_priv->engine.instmem.flush(dev);
125
126 ret = nouveau_ramht_insert(chan, handle, obj);
127 nouveau_gpuobj_ref(NULL, &obj);
128 return ret;
129}
130
131static void
132nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
133{
134 nv50_vm_flush_engine(dev, 0x08);
135}
136
137static int
138nv50_mpeg_init(struct drm_device *dev, int engine)
139{
140 nv_wr32(dev, 0x00b32c, 0x00000000);
141 nv_wr32(dev, 0x00b314, 0x00000100);
142 nv_wr32(dev, 0x00b0e0, 0x0000001a);
143
144 nv_wr32(dev, 0x00b220, 0x00000044);
145 nv_wr32(dev, 0x00b300, 0x00801ec1);
146 nv_wr32(dev, 0x00b390, 0x00000000);
147 nv_wr32(dev, 0x00b394, 0x00000000);
148 nv_wr32(dev, 0x00b398, 0x00000000);
149 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
150
151 nv_wr32(dev, 0x00b100, 0xffffffff);
152 nv_wr32(dev, 0x00b140, 0xffffffff);
153
154 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
155 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
156 return -EBUSY;
157 }
158
159 return 0;
160}
161
162static int
163nv50_mpeg_fini(struct drm_device *dev, int engine)
164{
165 /*XXX: context save for s/r */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
167 nv_wr32(dev, 0x00b140, 0x00000000);
168 return 0;
169}
170
171static void
172nv50_mpeg_isr(struct drm_device *dev)
173{
174 u32 stat = nv_rd32(dev, 0x00b100);
175 u32 type = nv_rd32(dev, 0x00b230);
176 u32 mthd = nv_rd32(dev, 0x00b234);
177 u32 data = nv_rd32(dev, 0x00b238);
178 u32 show = stat;
179
180 if (stat & 0x01000000) {
181 /* happens on initial binding of the object */
182 if (type == 0x00000020 && mthd == 0x0000) {
183 nv_wr32(dev, 0x00b308, 0x00000100);
184 show &= ~0x01000000;
185 }
186 }
187
188 if (show && nouveau_ratelimit()) {
189 NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
190 stat, type, mthd, data);
191 }
192
193 nv_wr32(dev, 0x00b100, stat);
194 nv_wr32(dev, 0x00b230, 0x00000001);
195 nv50_fb_vm_trap(dev, 1);
196}
197
198static void
199nv50_vpe_isr(struct drm_device *dev)
200{
201 if (nv_rd32(dev, 0x00b100))
202 nv50_mpeg_isr(dev);
203
204 if (nv_rd32(dev, 0x00b800)) {
205 u32 stat = nv_rd32(dev, 0x00b800);
206 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
207 nv_wr32(dev, 0xb800, stat);
208 }
209}
210
211static void
212nv50_mpeg_destroy(struct drm_device *dev, int engine)
213{
214 struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
215
216 nouveau_irq_unregister(dev, 0);
217
218 NVOBJ_ENGINE_DEL(dev, MPEG);
219 kfree(pmpeg);
220}
221
222int
223nv50_mpeg_create(struct drm_device *dev)
224{
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nv50_mpeg_engine *pmpeg;
227
228 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
229 if (!pmpeg)
230 return -ENOMEM;
231
232 pmpeg->base.destroy = nv50_mpeg_destroy;
233 pmpeg->base.init = nv50_mpeg_init;
234 pmpeg->base.fini = nv50_mpeg_fini;
235 pmpeg->base.context_new = nv50_mpeg_context_new;
236 pmpeg->base.context_del = nv50_mpeg_context_del;
237 pmpeg->base.object_new = nv50_mpeg_object_new;
238 pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
239
240 if (dev_priv->chipset == 0x50) {
241 nouveau_irq_register(dev, 0, nv50_vpe_isr);
242 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
243 NVOBJ_CLASS(dev, 0x3174, MPEG);
244#if 0
245 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
246 NVOBJ_CLASS(dev, 0x4075, ME);
247#endif
248 } else {
249 nouveau_irq_register(dev, 0, nv50_mpeg_isr);
250 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
251 NVOBJ_CLASS(dev, 0x8274, MPEG);
252 }
253
254 return 0;
255
256}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 7dbb305d7e63..8a2810011bda 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -47,6 +47,21 @@ nv50_pm_clock_get(struct drm_device *dev, u32 id)
47 47
48 reg0 = nv_rd32(dev, pll.reg + 0); 48 reg0 = nv_rd32(dev, pll.reg + 0);
49 reg1 = nv_rd32(dev, pll.reg + 4); 49 reg1 = nv_rd32(dev, pll.reg + 4);
50
51 if ((reg0 & 0x80000000) == 0) {
52 if (id == PLL_SHADER) {
53 NV_DEBUG(dev, "Shader PLL is disabled. "
54 "Shader clock is twice the core\n");
55 ret = nv50_pm_clock_get(dev, PLL_CORE);
56 if (ret > 0)
57 return ret << 1;
58 } else if (id == PLL_MEMORY) {
59 NV_DEBUG(dev, "Memory PLL is disabled. "
60 "Memory clock is equal to the ref_clk\n");
61 return pll.refclk;
62 }
63 }
64
50 P = (reg0 & 0x00070000) >> 16; 65 P = (reg0 & 0x00070000) >> 16;
51 N = (reg1 & 0x0000ff00) >> 8; 66 N = (reg1 & 0x0000ff00) >> 8;
52 M = (reg1 & 0x000000ff); 67 M = (reg1 & 0x000000ff);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 6c2694490741..1a0dd491a0e4 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -151,8 +151,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
151 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 151 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
152 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 152 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
154 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 154 int i;
155 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
156 155
157 pinstmem->flush(vm->dev); 156 pinstmem->flush(vm->dev);
158 157
@@ -163,11 +162,10 @@ nv50_vm_flush(struct nouveau_vm *vm)
163 } 162 }
164 163
165 pfifo->tlb_flush(vm->dev); 164 pfifo->tlb_flush(vm->dev);
166 165 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
167 if (atomic_read(&vm->pgraph_refs)) 166 if (atomic_read(&vm->engref[i]))
168 pgraph->tlb_flush(vm->dev); 167 dev_priv->eng[i]->tlb_flush(vm->dev, i);
169 if (atomic_read(&vm->pcrypt_refs)) 168 }
170 pcrypt->tlb_flush(vm->dev);
171} 169}
172 170
173void 171void
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index fabc7fd30b1d..75b809a51748 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -26,46 +26,48 @@
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_util.h" 27#include "nouveau_util.h"
28#include "nouveau_vm.h" 28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
29 30
30static void nv84_crypt_isr(struct drm_device *); 31struct nv84_crypt_engine {
32 struct nouveau_exec_engine base;
33};
31 34
32int 35static int
33nv84_crypt_create_context(struct nouveau_channel *chan) 36nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
34{ 37{
35 struct drm_device *dev = chan->dev; 38 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 39 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *ramin = chan->ramin; 40 struct nouveau_gpuobj *ramin = chan->ramin;
41 struct nouveau_gpuobj *ctx;
38 int ret; 42 int ret;
39 43
40 NV_DEBUG(dev, "ch%d\n", chan->id); 44 NV_DEBUG(dev, "ch%d\n", chan->id);
41 45
42 ret = nouveau_gpuobj_new(dev, chan, 256, 0, 46 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
43 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, 47 NVOBJ_FLAG_ZERO_FREE, &ctx);
44 &chan->crypt_ctx);
45 if (ret) 48 if (ret)
46 return ret; 49 return ret;
47 50
48 nv_wo32(ramin, 0xa0, 0x00190000); 51 nv_wo32(ramin, 0xa0, 0x00190000);
49 nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); 52 nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
50 nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); 53 nv_wo32(ramin, 0xa8, ctx->vinst);
51 nv_wo32(ramin, 0xac, 0); 54 nv_wo32(ramin, 0xac, 0);
52 nv_wo32(ramin, 0xb0, 0); 55 nv_wo32(ramin, 0xb0, 0);
53 nv_wo32(ramin, 0xb4, 0); 56 nv_wo32(ramin, 0xb4, 0);
54
55 dev_priv->engine.instmem.flush(dev); 57 dev_priv->engine.instmem.flush(dev);
56 atomic_inc(&chan->vm->pcrypt_refs); 58
59 atomic_inc(&chan->vm->engref[engine]);
60 chan->engctx[engine] = ctx;
57 return 0; 61 return 0;
58} 62}
59 63
60void 64static void
61nv84_crypt_destroy_context(struct nouveau_channel *chan) 65nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
62{ 66{
67 struct nouveau_gpuobj *ctx = chan->engctx[engine];
63 struct drm_device *dev = chan->dev; 68 struct drm_device *dev = chan->dev;
64 u32 inst; 69 u32 inst;
65 70
66 if (!chan->crypt_ctx)
67 return;
68
69 inst = (chan->ramin->vinst >> 12); 71 inst = (chan->ramin->vinst >> 12);
70 inst |= 0x80000000; 72 inst |= 0x80000000;
71 73
@@ -80,43 +82,39 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan)
80 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); 82 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
81 nv_wr32(dev, 0x10200c, 0x00000010); 83 nv_wr32(dev, 0x10200c, 0x00000010);
82 84
83 nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); 85 nouveau_gpuobj_ref(NULL, &ctx);
84 atomic_dec(&chan->vm->pcrypt_refs);
85}
86 86
87void 87 atomic_dec(&chan->vm->engref[engine]);
88nv84_crypt_tlb_flush(struct drm_device *dev) 88 chan->engctx[engine] = NULL;
89{
90 nv50_vm_flush_engine(dev, 0x0a);
91} 89}
92 90
93int 91static int
94nv84_crypt_init(struct drm_device *dev) 92nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class)
95{ 94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; 97 struct nouveau_gpuobj *obj = NULL;
98 98 int ret;
99 if (!pcrypt->registered) {
100 NVOBJ_CLASS(dev, 0x74c1, CRYPT);
101 pcrypt->registered = true;
102 }
103 99
104 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 100 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
105 nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 101 if (ret)
102 return ret;
103 obj->engine = 5;
104 obj->class = class;
106 105
107 nouveau_irq_register(dev, 14, nv84_crypt_isr); 106 nv_wo32(obj, 0x00, class);
108 nv_wr32(dev, 0x102130, 0xffffffff); 107 dev_priv->engine.instmem.flush(dev);
109 nv_wr32(dev, 0x102140, 0xffffffbf);
110 108
111 nv_wr32(dev, 0x10200c, 0x00000010); 109 ret = nouveau_ramht_insert(chan, handle, obj);
112 return 0; 110 nouveau_gpuobj_ref(NULL, &obj);
111 return ret;
113} 112}
114 113
115void 114static void
116nv84_crypt_fini(struct drm_device *dev) 115nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
117{ 116{
118 nv_wr32(dev, 0x102140, 0x00000000); 117 nv50_vm_flush_engine(dev, 0x0a);
119 nouveau_irq_unregister(dev, 14);
120} 118}
121 119
122static void 120static void
@@ -138,3 +136,58 @@ nv84_crypt_isr(struct drm_device *dev)
138 136
139 nv50_fb_vm_trap(dev, show); 137 nv50_fb_vm_trap(dev, show);
140} 138}
139
140static int
141nv84_crypt_fini(struct drm_device *dev, int engine)
142{
143 nv_wr32(dev, 0x102140, 0x00000000);
144 return 0;
145}
146
147static int
148nv84_crypt_init(struct drm_device *dev, int engine)
149{
150 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
151 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
152
153 nv_wr32(dev, 0x102130, 0xffffffff);
154 nv_wr32(dev, 0x102140, 0xffffffbf);
155
156 nv_wr32(dev, 0x10200c, 0x00000010);
157 return 0;
158}
159
160static void
161nv84_crypt_destroy(struct drm_device *dev, int engine)
162{
163 struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
164
165 NVOBJ_ENGINE_DEL(dev, CRYPT);
166
167 nouveau_irq_unregister(dev, 14);
168 kfree(pcrypt);
169}
170
171int
172nv84_crypt_create(struct drm_device *dev)
173{
174 struct nv84_crypt_engine *pcrypt;
175
176 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
177 if (!pcrypt)
178 return -ENOMEM;
179
180 pcrypt->base.destroy = nv84_crypt_destroy;
181 pcrypt->base.init = nv84_crypt_init;
182 pcrypt->base.fini = nv84_crypt_fini;
183 pcrypt->base.context_new = nv84_crypt_context_new;
184 pcrypt->base.context_del = nv84_crypt_context_del;
185 pcrypt->base.object_new = nv84_crypt_object_new;
186 pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
187
188 nouveau_irq_register(dev, 14, nv84_crypt_isr);
189
190 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
191 NVOBJ_CLASS (dev, 0x74c1, CRYPT);
192 return 0;
193}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
new file mode 100644
index 000000000000..b86820a61220
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -0,0 +1,226 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nva3_copy.fuc.h"
32
33struct nva3_copy_engine {
34 struct nouveau_exec_engine base;
35};
36
37static int
38nva3_copy_context_new(struct nouveau_channel *chan, int engine)
39{
40 struct drm_device *dev = chan->dev;
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *ramin = chan->ramin;
43 struct nouveau_gpuobj *ctx = NULL;
44 int ret;
45
46 NV_DEBUG(dev, "ch%d\n", chan->id);
47
48 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
49 NVOBJ_FLAG_ZERO_FREE, &ctx);
50 if (ret)
51 return ret;
52
53 nv_wo32(ramin, 0xc0, 0x00190000);
54 nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
55 nv_wo32(ramin, 0xc8, ctx->vinst);
56 nv_wo32(ramin, 0xcc, 0x00000000);
57 nv_wo32(ramin, 0xd0, 0x00000000);
58 nv_wo32(ramin, 0xd4, 0x00000000);
59 dev_priv->engine.instmem.flush(dev);
60
61 atomic_inc(&chan->vm->engref[engine]);
62 chan->engctx[engine] = ctx;
63 return 0;
64}
65
66static int
67nva3_copy_object_new(struct nouveau_channel *chan, int engine,
68 u32 handle, u16 class)
69{
70 struct nouveau_gpuobj *ctx = chan->engctx[engine];
71
72 /* fuc engine doesn't need an object, our ramht code does.. */
73 ctx->engine = 3;
74 ctx->class = class;
75 return nouveau_ramht_insert(chan, handle, ctx);
76}
77
78static void
79nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80{
81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev;
83 u32 inst;
84
85 inst = (chan->ramin->vinst >> 12);
86 inst |= 0x40000000;
87
88 /* disable fifo access */
89 nv_wr32(dev, 0x104048, 0x00000000);
90 /* mark channel as unloaded if it's currently active */
91 if (nv_rd32(dev, 0x104050) == inst)
92 nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
93 /* mark next channel as invalid if it's about to be loaded */
94 if (nv_rd32(dev, 0x104054) == inst)
95 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
96 /* restore fifo access */
97 nv_wr32(dev, 0x104048, 0x00000003);
98
99 for (inst = 0xc0; inst <= 0xd4; inst += 4)
100 nv_wo32(chan->ramin, inst, 0x00000000);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103
104 atomic_dec(&chan->vm->engref[engine]);
105 chan->engctx[engine] = ctx;
106}
107
108static void
109nva3_copy_tlb_flush(struct drm_device *dev, int engine)
110{
111 nv50_vm_flush_engine(dev, 0x0d);
112}
113
114static int
115nva3_copy_init(struct drm_device *dev, int engine)
116{
117 int i;
118
119 nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
120 nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
121 nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
122
123 /* upload ucode */
124 nv_wr32(dev, 0x1041c0, 0x01000000);
125 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
126 nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
127
128 nv_wr32(dev, 0x104180, 0x01000000);
129 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
130 if ((i & 0x3f) == 0)
131 nv_wr32(dev, 0x104188, i >> 6);
132 nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
133 }
134
135 /* start it running */
136 nv_wr32(dev, 0x10410c, 0x00000000);
137 nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
138 nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
139 return 0;
140}
141
142static int
143nva3_copy_fini(struct drm_device *dev, int engine)
144{
145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146
147 /* trigger fuc context unload */
148 nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
149 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
150 nv_wr32(dev, 0x104000, 0x00000008);
151 nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
152
153 nv_wr32(dev, 0x104014, 0xffffffff);
154 return 0;
155}
156
157static struct nouveau_enum nva3_copy_isr_error_name[] = {
158 { 0x0001, "ILLEGAL_MTHD" },
159 { 0x0002, "INVALID_ENUM" },
160 { 0x0003, "INVALID_BITFIELD" },
161 {}
162};
163
164static void
165nva3_copy_isr(struct drm_device *dev)
166{
167 u32 dispatch = nv_rd32(dev, 0x10401c);
168 u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
169 u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
170 u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
171 u32 addr = nv_rd32(dev, 0x104040) >> 16;
172 u32 mthd = (addr & 0x07ff) << 2;
173 u32 subc = (addr & 0x3800) >> 11;
174 u32 data = nv_rd32(dev, 0x104044);
175 int chid = nv50_graph_isr_chid(dev, inst);
176
177 if (stat & 0x00000040) {
178 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
179 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
180 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
181 chid, inst, subc, mthd, data);
182 nv_wr32(dev, 0x104004, 0x00000040);
183 stat &= ~0x00000040;
184 }
185
186 if (stat) {
187 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
188 nv_wr32(dev, 0x104004, stat);
189 }
190 nv50_fb_vm_trap(dev, 1);
191}
192
193static void
194nva3_copy_destroy(struct drm_device *dev, int engine)
195{
196 struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
197
198 nouveau_irq_unregister(dev, 22);
199
200 NVOBJ_ENGINE_DEL(dev, COPY0);
201 kfree(pcopy);
202}
203
204int
205nva3_copy_create(struct drm_device *dev)
206{
207 struct nva3_copy_engine *pcopy;
208
209 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
210 if (!pcopy)
211 return -ENOMEM;
212
213 pcopy->base.destroy = nva3_copy_destroy;
214 pcopy->base.init = nva3_copy_init;
215 pcopy->base.fini = nva3_copy_fini;
216 pcopy->base.context_new = nva3_copy_context_new;
217 pcopy->base.context_del = nva3_copy_context_del;
218 pcopy->base.object_new = nva3_copy_object_new;
219 pcopy->base.tlb_flush = nva3_copy_tlb_flush;
220
221 nouveau_irq_register(dev, 22, nva3_copy_isr);
222
223 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
224 NVOBJ_CLASS(dev, 0x85b5, COPY0);
225 return 0;
226}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
new file mode 100644
index 000000000000..eaf35f8321ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -0,0 +1,870 @@
1/* fuc microcode for copy engine on nva3- chipsets
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build for nva3:nvc0
27 * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
28 *
29 * To build for nvc0-
30 * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
31 */
32
33ifdef(`NVA3',
34.section nva3_pcopy_data,
35.section nvc0_pcopy_data
36)
37
38ctx_object: .b32 0
39ifdef(`NVA3',
40ctx_dma:
41ctx_dma_query: .b32 0
42ctx_dma_src: .b32 0
43ctx_dma_dst: .b32 0
44,)
45.equ ctx_dma_count 3
46ctx_query_address_high: .b32 0
47ctx_query_address_low: .b32 0
48ctx_query_counter: .b32 0
49ctx_src_address_high: .b32 0
50ctx_src_address_low: .b32 0
51ctx_src_pitch: .b32 0
52ctx_src_tile_mode: .b32 0
53ctx_src_xsize: .b32 0
54ctx_src_ysize: .b32 0
55ctx_src_zsize: .b32 0
56ctx_src_zoff: .b32 0
57ctx_src_xoff: .b32 0
58ctx_src_yoff: .b32 0
59ctx_src_cpp: .b32 0
60ctx_dst_address_high: .b32 0
61ctx_dst_address_low: .b32 0
62ctx_dst_pitch: .b32 0
63ctx_dst_tile_mode: .b32 0
64ctx_dst_xsize: .b32 0
65ctx_dst_ysize: .b32 0
66ctx_dst_zsize: .b32 0
67ctx_dst_zoff: .b32 0
68ctx_dst_xoff: .b32 0
69ctx_dst_yoff: .b32 0
70ctx_dst_cpp: .b32 0
71ctx_format: .b32 0
72ctx_swz_const0: .b32 0
73ctx_swz_const1: .b32 0
74ctx_xcnt: .b32 0
75ctx_ycnt: .b32 0
76.align 256
77
78dispatch_table:
79// mthd 0x0000, NAME
80.b16 0x000 1
81.b32 ctx_object ~0xffffffff
82// mthd 0x0100, NOP
83.b16 0x040 1
84.b32 0x00010000 + cmd_nop ~0xffffffff
85// mthd 0x0140, PM_TRIGGER
86.b16 0x050 1
87.b32 0x00010000 + cmd_pm_trigger ~0xffffffff
88ifdef(`NVA3', `
89// mthd 0x0180-0x018c, DMA_
90.b16 0x060 ctx_dma_count
91dispatch_dma:
92.b32 0x00010000 + cmd_dma ~0xffffffff
93.b32 0x00010000 + cmd_dma ~0xffffffff
94.b32 0x00010000 + cmd_dma ~0xffffffff
95',)
96// mthd 0x0200-0x0218, SRC_TILE
97.b16 0x80 7
98.b32 ctx_src_tile_mode ~0x00000fff
99.b32 ctx_src_xsize ~0x0007ffff
100.b32 ctx_src_ysize ~0x00001fff
101.b32 ctx_src_zsize ~0x000007ff
102.b32 ctx_src_zoff ~0x00000fff
103.b32 ctx_src_xoff ~0x0007ffff
104.b32 ctx_src_yoff ~0x00001fff
105// mthd 0x0220-0x0238, DST_TILE
106.b16 0x88 7
107.b32 ctx_dst_tile_mode ~0x00000fff
108.b32 ctx_dst_xsize ~0x0007ffff
109.b32 ctx_dst_ysize ~0x00001fff
110.b32 ctx_dst_zsize ~0x000007ff
111.b32 ctx_dst_zoff ~0x00000fff
112.b32 ctx_dst_xoff ~0x0007ffff
113.b32 ctx_dst_yoff ~0x00001fff
114// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
115.b16 0xc0 2
116.b32 0x00010000 + cmd_exec ~0xffffffff
117.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
118// mthd 0x030c-0x0340, various stuff
119.b16 0xc3 14
120.b32 ctx_src_address_high ~0x000000ff
121.b32 ctx_src_address_low ~0xfffffff0
122.b32 ctx_dst_address_high ~0x000000ff
123.b32 ctx_dst_address_low ~0xfffffff0
124.b32 ctx_src_pitch ~0x0007ffff
125.b32 ctx_dst_pitch ~0x0007ffff
126.b32 ctx_xcnt ~0x0000ffff
127.b32 ctx_ycnt ~0x00001fff
128.b32 ctx_format ~0x0333ffff
129.b32 ctx_swz_const0 ~0xffffffff
130.b32 ctx_swz_const1 ~0xffffffff
131.b32 ctx_query_address_high ~0x000000ff
132.b32 ctx_query_address_low ~0xffffffff
133.b32 ctx_query_counter ~0xffffffff
134.b16 0x800 0
135
136ifdef(`NVA3',
137.section nva3_pcopy_code,
138.section nvc0_pcopy_code
139)
140
141main:
142 clear b32 $r0
143 mov $sp $r0
144
145 // setup i0 handler and route fifo and ctxswitch to it
146 mov $r1 ih
147 mov $iv0 $r1
148 mov $r1 0x400
149 movw $r2 0xfff3
150 sethi $r2 0
151 iowr I[$r2 + 0x300] $r2
152
153 // enable interrupts
154 or $r2 0xc
155 iowr I[$r1] $r2
156 bset $flags ie0
157
158 // enable fifo access and context switching
159 mov $r1 0x1200
160 mov $r2 3
161 iowr I[$r1] $r2
162
163 // sleep forever, waking for interrupts
164 bset $flags $p0
165 spin:
166 sleep $p0
167 bra spin
168
169// i0 handler
170ih:
171 iord $r1 I[$r0 + 0x200]
172
173 and $r2 $r1 0x00000008
174 bra e ih_no_chsw
175 call chsw
176 ih_no_chsw:
177 and $r2 $r1 0x00000004
178 bra e ih_no_cmd
179 call dispatch
180
181 ih_no_cmd:
182 and $r1 $r1 0x0000000c
183 iowr I[$r0 + 0x100] $r1
184 iret
185
186// $p1 direction (0 = unload, 1 = load)
187// $r3 channel
188swctx:
189 mov $r4 0x7700
190 mov $xtargets $r4
191ifdef(`NVA3', `
192 // target 7 hardcoded to ctx dma object
193 mov $xdbase $r0
194', ` // NVC0
195 // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
196 mov $r4 0x2100
197 iord $r4 I[$r4 + 0]
198 and $r4 1
199 shl b32 $r4 4
200 add b32 $r4 0x30
201
202 // channel is in vram
203 mov $r15 0x61c
204 shl b32 $r15 6
205 mov $r5 0x114
206 iowrs I[$r15] $r5
207
208 // read 16-byte PCOPYn info, containing context pointer, from channel
209 shl b32 $r5 $r3 4
210 add b32 $r5 2
211 mov $xdbase $r5
212 mov $r5 $sp
213 // get a chunk of stack space, aligned to 256 byte boundary
214 sub b32 $r5 0x100
215 mov $r6 0xff
216 not b32 $r6
217 and $r5 $r6
218 sethi $r5 0x00020000
219 xdld $r4 $r5
220 xdwait
221 sethi $r5 0
222
223 // set context pointer, from within channel VM
224 mov $r14 0
225 iowrs I[$r15] $r14
226 ld b32 $r4 D[$r5 + 0]
227 shr b32 $r4 8
228 ld b32 $r6 D[$r5 + 4]
229 shl b32 $r6 24
230 or $r4 $r6
231 mov $xdbase $r4
232')
233 // 256-byte context, at start of data segment
234 mov b32 $r4 $r0
235 sethi $r4 0x60000
236
237 // swap!
238 bra $p1 swctx_load
239 xdst $r0 $r4
240 bra swctx_done
241 swctx_load:
242 xdld $r0 $r4
243 swctx_done:
244 xdwait
245 ret
246
247chsw:
248 // read current channel
249 mov $r2 0x1400
250 iord $r3 I[$r2]
251
252 // if it's active, unload it and return
253 xbit $r15 $r3 0x1e
254 bra e chsw_no_unload
255 bclr $flags $p1
256 call swctx
257 bclr $r3 0x1e
258 iowr I[$r2] $r3
259 mov $r4 1
260 iowr I[$r2 + 0x200] $r4
261 ret
262
263 // read next channel
264 chsw_no_unload:
265 iord $r3 I[$r2 + 0x100]
266
267 // is there a channel waiting to be loaded?
268 xbit $r13 $r3 0x1e
269 bra e chsw_finish_load
270 bset $flags $p1
271 call swctx
272ifdef(`NVA3',
273 // load dma objects back into TARGET regs
274 mov $r5 ctx_dma
275 mov $r6 ctx_dma_count
276 chsw_load_ctx_dma:
277 ld b32 $r7 D[$r5 + $r6 * 4]
278 add b32 $r8 $r6 0x180
279 shl b32 $r8 8
280 iowr I[$r8] $r7
281 sub b32 $r6 1
282 bra nc chsw_load_ctx_dma
283,)
284
285 chsw_finish_load:
286 mov $r3 2
287 iowr I[$r2 + 0x200] $r3
288 ret
289
290dispatch:
291 // read incoming fifo command
292 mov $r3 0x1900
293 iord $r2 I[$r3 + 0x100]
294 iord $r3 I[$r3 + 0x000]
295 and $r4 $r2 0x7ff
296 // $r2 will be used to store exception data
297 shl b32 $r2 0x10
298
299 // lookup method in the dispatch table, ILLEGAL_MTHD if not found
300 mov $r5 dispatch_table
301 clear b32 $r6
302 clear b32 $r7
303 dispatch_loop:
304 ld b16 $r6 D[$r5 + 0]
305 ld b16 $r7 D[$r5 + 2]
306 add b32 $r5 4
307 cmpu b32 $r4 $r6
308 bra c dispatch_illegal_mthd
309 add b32 $r7 $r6
310 cmpu b32 $r4 $r7
311 bra c dispatch_valid_mthd
312 sub b32 $r7 $r6
313 shl b32 $r7 3
314 add b32 $r5 $r7
315 bra dispatch_loop
316
317 // ensure no bits set in reserved fields, INVALID_BITFIELD
318 dispatch_valid_mthd:
319 sub b32 $r4 $r6
320 shl b32 $r4 3
321 add b32 $r4 $r5
322 ld b32 $r5 D[$r4 + 4]
323 and $r5 $r3
324 cmpu b32 $r5 0
325 bra ne dispatch_invalid_bitfield
326
327 // depending on dispatch flags: execute method, or save data as state
328 ld b16 $r5 D[$r4 + 0]
329 ld b16 $r6 D[$r4 + 2]
330 cmpu b32 $r6 0
331 bra ne dispatch_cmd
332 st b32 D[$r5] $r3
333 bra dispatch_done
334 dispatch_cmd:
335 bclr $flags $p1
336 call $r5
337 bra $p1 dispatch_error
338 bra dispatch_done
339
340 dispatch_invalid_bitfield:
341 or $r2 2
342 dispatch_illegal_mthd:
343 or $r2 1
344
345 // store exception data in SCRATCH0/SCRATCH1, signal hostirq
346 dispatch_error:
347 mov $r4 0x1000
348 iowr I[$r4 + 0x000] $r2
349 iowr I[$r4 + 0x100] $r3
350 mov $r2 0x40
351 iowr I[$r0] $r2
352 hostirq_wait:
353 iord $r2 I[$r0 + 0x200]
354 and $r2 0x40
355 cmpu b32 $r2 0
356 bra ne hostirq_wait
357
358 dispatch_done:
359 mov $r2 0x1d00
360 mov $r3 1
361 iowr I[$r2] $r3
362 ret
363
364// No-operation
365//
366// Inputs:
367// $r1: irqh state
368// $r2: hostirq state
369// $r3: data
370// $r4: dispatch table entry
371// Outputs:
372// $r1: irqh state
373// $p1: set on error
374// $r2: hostirq state
375// $r3: data
376cmd_nop:
377 ret
378
379// PM_TRIGGER
380//
381// Inputs:
382// $r1: irqh state
383// $r2: hostirq state
384// $r3: data
385// $r4: dispatch table entry
386// Outputs:
387// $r1: irqh state
388// $p1: set on error
389// $r2: hostirq state
390// $r3: data
391cmd_pm_trigger:
392 mov $r2 0x2200
393 clear b32 $r3
394 sethi $r3 0x20000
395 iowr I[$r2] $r3
396 ret
397
398ifdef(`NVA3',
399// SET_DMA_* method handler
400//
401// Inputs:
402// $r1: irqh state
403// $r2: hostirq state
404// $r3: data
405// $r4: dispatch table entry
406// Outputs:
407// $r1: irqh state
408// $p1: set on error
409// $r2: hostirq state
410// $r3: data
411cmd_dma:
412 sub b32 $r4 dispatch_dma
413 shr b32 $r4 1
414 bset $r3 0x1e
415 st b32 D[$r4 + ctx_dma] $r3
416 add b32 $r4 0x600
417 shl b32 $r4 6
418 iowr I[$r4] $r3
419 ret
420,)
421
422// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
423//
424cmd_exec_set_format:
425 // zero out a chunk of the stack to store the swizzle into
426 add $sp -0x10
427 st b32 D[$sp + 0x00] $r0
428 st b32 D[$sp + 0x04] $r0
429 st b32 D[$sp + 0x08] $r0
430 st b32 D[$sp + 0x0c] $r0
431
432 // extract cpp, src_ncomp and dst_ncomp from FORMAT
433 ld b32 $r4 D[$r0 + ctx_format]
434 extr $r5 $r4 16:17
435 add b32 $r5 1
436 extr $r6 $r4 20:21
437 add b32 $r6 1
438 extr $r7 $r4 24:25
439 add b32 $r7 1
440
441 // convert FORMAT swizzle mask to hw swizzle mask
442 bclr $flags $p2
443 clear b32 $r8
444 clear b32 $r9
445 ncomp_loop:
446 and $r10 $r4 0xf
447 shr b32 $r4 4
448 clear b32 $r11
449 bpc_loop:
450 cmpu b8 $r10 4
451 bra nc cmp_c0
452 mulu $r12 $r10 $r5
453 add b32 $r12 $r11
454 bset $flags $p2
455 bra bpc_next
456 cmp_c0:
457 bra ne cmp_c1
458 mov $r12 0x10
459 add b32 $r12 $r11
460 bra bpc_next
461 cmp_c1:
462 cmpu b8 $r10 6
463 bra nc cmp_zero
464 mov $r12 0x14
465 add b32 $r12 $r11
466 bra bpc_next
467 cmp_zero:
468 mov $r12 0x80
469 bpc_next:
470 st b8 D[$sp + $r8] $r12
471 add b32 $r8 1
472 add b32 $r11 1
473 cmpu b32 $r11 $r5
474 bra c bpc_loop
475 add b32 $r9 1
476 cmpu b32 $r9 $r7
477 bra c ncomp_loop
478
479 // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
480 mulu $r6 $r5
481 st b32 D[$r0 + ctx_src_cpp] $r6
482 ld b32 $r8 D[$r0 + ctx_xcnt]
483 mulu $r6 $r8
484 bra $p2 dst_xcnt
485 clear b32 $r6
486
487 dst_xcnt:
488 mulu $r7 $r5
489 st b32 D[$r0 + ctx_dst_cpp] $r7
490 mulu $r7 $r8
491
492 mov $r5 0x810
493 shl b32 $r5 6
494 iowr I[$r5 + 0x000] $r6
495 iowr I[$r5 + 0x100] $r7
496 add b32 $r5 0x800
497 ld b32 $r6 D[$r0 + ctx_dst_cpp]
498 sub b32 $r6 1
499 shl b32 $r6 8
500 ld b32 $r7 D[$r0 + ctx_src_cpp]
501 sub b32 $r7 1
502 or $r6 $r7
503 iowr I[$r5 + 0x000] $r6
504 add b32 $r5 0x100
505 ld b32 $r6 D[$sp + 0x00]
506 iowr I[$r5 + 0x000] $r6
507 ld b32 $r6 D[$sp + 0x04]
508 iowr I[$r5 + 0x100] $r6
509 ld b32 $r6 D[$sp + 0x08]
510 iowr I[$r5 + 0x200] $r6
511 ld b32 $r6 D[$sp + 0x0c]
512 iowr I[$r5 + 0x300] $r6
513 add b32 $r5 0x400
514 ld b32 $r6 D[$r0 + ctx_swz_const0]
515 iowr I[$r5 + 0x000] $r6
516 ld b32 $r6 D[$r0 + ctx_swz_const1]
517 iowr I[$r5 + 0x100] $r6
518 add $sp 0x10
519 ret
520
521// Setup to handle a tiled surface
522//
523// Calculates a number of parameters the hardware requires in order
524// to correctly handle tiling.
525//
526// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE):
527// nTx = round_up(w * cpp, 1 << Tp) >> Tp
528// nTy = round_up(h, 1 << Th) >> Th
529// Txo = (x * cpp) & ((1 << Tp) - 1)
530// Tx = (x * cpp) >> Tp
531// Tyo = y & ((1 << Th) - 1)
532// Ty = y >> Th
533// Tzo = z & ((1 << Td) - 1)
534// Tz = z >> Td
535//
536// off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo
537// off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp;
538//
539// Inputs:
540// $r4: hw command (0x104800)
541// $r5: ctx offset adjustment for src/dst selection
542// $p2: set if dst surface
543//
544cmd_exec_set_surface_tiled:
545 // translate TILE_MODE into Tp, Th, Td shift values
546 ld b32 $r7 D[$r5 + ctx_src_tile_mode]
547 extr $r9 $r7 8:11
548 extr $r8 $r7 4:7
549ifdef(`NVA3',
550 add b32 $r8 2
551,
552 add b32 $r8 3
553)
554 extr $r7 $r7 0:3
555 cmp b32 $r7 0xe
556 bra ne xtile64
557 mov $r7 4
558 bra xtileok
559 xtile64:
560 xbit $r7 $flags $p2
561 add b32 $r7 17
562 bset $r4 $r7
563 mov $r7 6
564 xtileok:
565
566 // Op = (x * cpp) & ((1 << Tp) - 1)
567 // Tx = (x * cpp) >> Tp
568 ld b32 $r10 D[$r5 + ctx_src_xoff]
569 ld b32 $r11 D[$r5 + ctx_src_cpp]
570 mulu $r10 $r11
571 mov $r11 1
572 shl b32 $r11 $r7
573 sub b32 $r11 1
574 and $r12 $r10 $r11
575 shr b32 $r10 $r7
576
577 // Tyo = y & ((1 << Th) - 1)
578 // Ty = y >> Th
579 ld b32 $r13 D[$r5 + ctx_src_yoff]
580 mov $r14 1
581 shl b32 $r14 $r8
582 sub b32 $r14 1
583 and $r11 $r13 $r14
584 shr b32 $r13 $r8
585
586 // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo)
587 add b32 $r14 1
588 shl b32 $r15 $r14 12
589 sub b32 $r14 $r11
590 or $r15 $r14
591 xbit $r6 $flags $p2
592 add b32 $r6 0x208
593 shl b32 $r6 8
594 iowr I[$r6 + 0x000] $r15
595
596 // Op += Tyo << Tp
597 shl b32 $r11 $r7
598 add b32 $r12 $r11
599
600 // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
601 ld b32 $r15 D[$r5 + ctx_src_xsize]
602 ld b32 $r11 D[$r5 + ctx_src_cpp]
603 mulu $r15 $r11
604 mov $r11 1
605 shl b32 $r11 $r7
606 sub b32 $r11 1
607 add b32 $r15 $r11
608 shr b32 $r15 $r7
609 push $r15
610
611 // nTy = (h + ((1 << Th) - 1)) >> Th
612 ld b32 $r15 D[$r5 + ctx_src_ysize]
613 mov $r11 1
614 shl b32 $r11 $r8
615 sub b32 $r11 1
616 add b32 $r15 $r11
617 shr b32 $r15 $r8
618 push $r15
619
620 // Tys = Tp + Th
621 // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td
622 add b32 $r7 $r8
623 sub b32 $r8 2
624 mov $r11 1
625 shl b32 $r11 $r8
626 shl b32 $r11 $r9
627
628 // Tzo = z & ((1 << Td) - 1)
629 // Tz = z >> Td
630 // Op += Tzo << Tys
631 // Ts = Tys + Td
632 ld b32 $r8 D[$r5 + ctx_src_zoff]
633 mov $r14 1
634 shl b32 $r14 $r9
635 sub b32 $r14 1
636 and $r15 $r8 $r14
637 shl b32 $r15 $r7
638 add b32 $r12 $r15
639 add b32 $r7 $r9
640 shr b32 $r8 $r9
641
642 // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts
643 pop $r15
644 pop $r9
645 mulu $r13 $r9
646 add b32 $r10 $r13
647 mulu $r8 $r9
648 mulu $r8 $r15
649 add b32 $r10 $r8
650 shl b32 $r10 $r7
651
652 // PITCH = (nTx - 1) << Ts
653 sub b32 $r9 1
654 shl b32 $r9 $r7
655 iowr I[$r6 + 0x200] $r9
656
657 // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
658 // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
659 ld b32 $r7 D[$r5 + ctx_src_address_low]
660 ld b32 $r8 D[$r5 + ctx_src_address_high]
661 add b32 $r10 $r12
662 add b32 $r7 $r10
663 adc b32 $r8 0
664 shl b32 $r8 16
665 or $r8 $r11
666 sub b32 $r6 0x600
667 iowr I[$r6 + 0x000] $r7
668 add b32 $r6 0x400
669 iowr I[$r6 + 0x000] $r8
670 ret
671
672// Setup to handle a linear surface
673//
674// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting
675//
676cmd_exec_set_surface_linear:
677 xbit $r6 $flags $p2
678 add b32 $r6 0x202
679 shl b32 $r6 8
680 ld b32 $r7 D[$r5 + ctx_src_address_low]
681 iowr I[$r6 + 0x000] $r7
682 add b32 $r6 0x400
683 ld b32 $r7 D[$r5 + ctx_src_address_high]
684 shl b32 $r7 16
685 iowr I[$r6 + 0x000] $r7
686 add b32 $r6 0x400
687 ld b32 $r7 D[$r5 + ctx_src_pitch]
688 iowr I[$r6 + 0x000] $r7
689 ret
690
691// wait for regs to be available for use
692cmd_exec_wait:
693 push $r0
694 push $r1
695 mov $r0 0x800
696 shl b32 $r0 6
697 loop:
698 iord $r1 I[$r0]
699 and $r1 1
700 bra ne loop
701 pop $r1
702 pop $r0
703 ret
704
705cmd_exec_query:
706 // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
707 xbit $r4 $r3 13
708 bra ne query_counter
709 call cmd_exec_wait
710 mov $r4 0x80c
711 shl b32 $r4 6
712 ld b32 $r5 D[$r0 + ctx_query_address_low]
713 add b32 $r5 4
714 iowr I[$r4 + 0x000] $r5
715 iowr I[$r4 + 0x100] $r0
716 mov $r5 0xc
717 iowr I[$r4 + 0x200] $r5
718 add b32 $r4 0x400
719 ld b32 $r5 D[$r0 + ctx_query_address_high]
720 shl b32 $r5 16
721 iowr I[$r4 + 0x000] $r5
722 add b32 $r4 0x500
723 mov $r5 0x00000b00
724 sethi $r5 0x00010000
725 iowr I[$r4 + 0x000] $r5
726 mov $r5 0x00004040
727 shl b32 $r5 1
728 sethi $r5 0x80800000
729 iowr I[$r4 + 0x100] $r5
730 mov $r5 0x00001110
731 sethi $r5 0x13120000
732 iowr I[$r4 + 0x200] $r5
733 mov $r5 0x00001514
734 sethi $r5 0x17160000
735 iowr I[$r4 + 0x300] $r5
736 mov $r5 0x00002601
737 sethi $r5 0x00010000
738 mov $r4 0x800
739 shl b32 $r4 6
740 iowr I[$r4 + 0x000] $r5
741
742 // write COUNTER
743 query_counter:
744 call cmd_exec_wait
745 mov $r4 0x80c
746 shl b32 $r4 6
747 ld b32 $r5 D[$r0 + ctx_query_address_low]
748 iowr I[$r4 + 0x000] $r5
749 iowr I[$r4 + 0x100] $r0
750 mov $r5 0x4
751 iowr I[$r4 + 0x200] $r5
752 add b32 $r4 0x400
753 ld b32 $r5 D[$r0 + ctx_query_address_high]
754 shl b32 $r5 16
755 iowr I[$r4 + 0x000] $r5
756 add b32 $r4 0x500
757 mov $r5 0x00000300
758 iowr I[$r4 + 0x000] $r5
759 mov $r5 0x00001110
760 sethi $r5 0x13120000
761 iowr I[$r4 + 0x100] $r5
762 ld b32 $r5 D[$r0 + ctx_query_counter]
763 add b32 $r4 0x500
764 iowr I[$r4 + 0x000] $r5
765 mov $r5 0x00002601
766 sethi $r5 0x00010000
767 mov $r4 0x800
768 shl b32 $r4 6
769 iowr I[$r4 + 0x000] $r5
770 ret
771
772// Execute a copy operation
773//
774// Inputs:
775// $r1: irqh state
776// $r2: hostirq state
777// $r3: data
778// 000002000 QUERY_SHORT
779// 000001000 QUERY
780// 000000100 DST_LINEAR
781// 000000010 SRC_LINEAR
782// 000000001 FORMAT
783// $r4: dispatch table entry
784// Outputs:
785// $r1: irqh state
786// $p1: set on error
787// $r2: hostirq state
788// $r3: data
789cmd_exec:
790 call cmd_exec_wait
791
792 // if format requested, call function to calculate it, otherwise
793 // fill in cpp/xcnt for both surfaces as if (cpp == 1)
794 xbit $r15 $r3 0
795 bra e cmd_exec_no_format
796 call cmd_exec_set_format
797 mov $r4 0x200
798 bra cmd_exec_init_src_surface
799 cmd_exec_no_format:
800 mov $r6 0x810
801 shl b32 $r6 6
802 mov $r7 1
803 st b32 D[$r0 + ctx_src_cpp] $r7
804 st b32 D[$r0 + ctx_dst_cpp] $r7
805 ld b32 $r7 D[$r0 + ctx_xcnt]
806 iowr I[$r6 + 0x000] $r7
807 iowr I[$r6 + 0x100] $r7
808 clear b32 $r4
809
810 cmd_exec_init_src_surface:
811 bclr $flags $p2
812 clear b32 $r5
813 xbit $r15 $r3 4
814 bra e src_tiled
815 call cmd_exec_set_surface_linear
816 bra cmd_exec_init_dst_surface
817 src_tiled:
818 call cmd_exec_set_surface_tiled
819 bset $r4 7
820
821 cmd_exec_init_dst_surface:
822 bset $flags $p2
823 mov $r5 ctx_dst_address_high - ctx_src_address_high
824 xbit $r15 $r3 8
825 bra e dst_tiled
826 call cmd_exec_set_surface_linear
827 bra cmd_exec_kick
828 dst_tiled:
829 call cmd_exec_set_surface_tiled
830 bset $r4 8
831
832 cmd_exec_kick:
833 mov $r5 0x800
834 shl b32 $r5 6
835 ld b32 $r6 D[$r0 + ctx_ycnt]
836 iowr I[$r5 + 0x100] $r6
837 mov $r6 0x0041
838 // SRC_TARGET = 1, DST_TARGET = 2
839 sethi $r6 0x44000000
840 or $r4 $r6
841 iowr I[$r5] $r4
842
843 // if requested, queue up a QUERY write after the copy has completed
844 xbit $r15 $r3 12
845 bra e cmd_exec_done
846 call cmd_exec_query
847
848 cmd_exec_done:
849 ret
850
851// Flush write cache
852//
853// Inputs:
854// $r1: irqh state
855// $r2: hostirq state
856// $r3: data
857// $r4: dispatch table entry
858// Outputs:
859// $r1: irqh state
860// $p1: set on error
861// $r2: hostirq state
862// $r3: data
863cmd_wrcache_flush:
864 mov $r2 0x2200
865 clear b32 $r3
866 sethi $r3 0x10000
867 iowr I[$r2] $r3
868 ret
869
870.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
new file mode 100644
index 000000000000..2731de22ebe9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -0,0 +1,534 @@
1uint32_t nva3_pcopy_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00010000,
67 0x00000000,
68 0x00000000,
69 0x00010040,
70 0x00010160,
71 0x00000000,
72 0x00010050,
73 0x00010162,
74 0x00000000,
75 0x00030060,
76 0x00010170,
77 0x00000000,
78 0x00010170,
79 0x00000000,
80 0x00010170,
81 0x00000000,
82 0x00070080,
83 0x00000028,
84 0xfffff000,
85 0x0000002c,
86 0xfff80000,
87 0x00000030,
88 0xffffe000,
89 0x00000034,
90 0xfffff800,
91 0x00000038,
92 0xfffff000,
93 0x0000003c,
94 0xfff80000,
95 0x00000040,
96 0xffffe000,
97 0x00070088,
98 0x00000054,
99 0xfffff000,
100 0x00000058,
101 0xfff80000,
102 0x0000005c,
103 0xffffe000,
104 0x00000060,
105 0xfffff800,
106 0x00000064,
107 0xfffff000,
108 0x00000068,
109 0xfff80000,
110 0x0000006c,
111 0xffffe000,
112 0x000200c0,
113 0x00010492,
114 0x00000000,
115 0x0001051b,
116 0x00000000,
117 0x000e00c3,
118 0x0000001c,
119 0xffffff00,
120 0x00000020,
121 0x0000000f,
122 0x00000048,
123 0xffffff00,
124 0x0000004c,
125 0x0000000f,
126 0x00000024,
127 0xfff80000,
128 0x00000050,
129 0xfff80000,
130 0x00000080,
131 0xffff0000,
132 0x00000084,
133 0xffffe000,
134 0x00000074,
135 0xfccc0000,
136 0x00000078,
137 0x00000000,
138 0x0000007c,
139 0x00000000,
140 0x00000010,
141 0xffffff00,
142 0x00000014,
143 0x00000000,
144 0x00000018,
145 0x00000000,
146 0x00000800,
147};
148
149uint32_t nva3_pcopy_code[] = {
150 0x04fe04bd,
151 0x3517f000,
152 0xf10010fe,
153 0xf1040017,
154 0xf0fff327,
155 0x22d00023,
156 0x0c25f0c0,
157 0xf40012d0,
158 0x17f11031,
159 0x27f01200,
160 0x0012d003,
161 0xf40031f4,
162 0x0ef40028,
163 0x8001cffd,
164 0xf40812c4,
165 0x21f4060b,
166 0x0412c472,
167 0xf4060bf4,
168 0x11c4c321,
169 0x4001d00c,
170 0x47f101f8,
171 0x4bfe7700,
172 0x0007fe00,
173 0xf00204b9,
174 0x01f40643,
175 0x0604fa09,
176 0xfa060ef4,
177 0x03f80504,
178 0x27f100f8,
179 0x23cf1400,
180 0x1e3fc800,
181 0xf4170bf4,
182 0x21f40132,
183 0x1e3af052,
184 0xf00023d0,
185 0x24d00147,
186 0xcf00f880,
187 0x3dc84023,
188 0x220bf41e,
189 0xf40131f4,
190 0x57f05221,
191 0x0367f004,
192 0xa07856bc,
193 0xb6018068,
194 0x87d00884,
195 0x0162b600,
196 0xf0f018f4,
197 0x23d00237,
198 0xf100f880,
199 0xcf190037,
200 0x33cf4032,
201 0xff24e400,
202 0x1024b607,
203 0x010057f1,
204 0x74bd64bd,
205 0x58005658,
206 0x50b60157,
207 0x0446b804,
208 0xbb4d08f4,
209 0x47b80076,
210 0x0f08f404,
211 0xb60276bb,
212 0x57bb0374,
213 0xdf0ef400,
214 0xb60246bb,
215 0x45bb0344,
216 0x01459800,
217 0xb00453fd,
218 0x1bf40054,
219 0x00455820,
220 0xb0014658,
221 0x1bf40064,
222 0x00538009,
223 0xf4300ef4,
224 0x55f90132,
225 0xf40c01f4,
226 0x25f0250e,
227 0x0125f002,
228 0x100047f1,
229 0xd00042d0,
230 0x27f04043,
231 0x0002d040,
232 0xf08002cf,
233 0x24b04024,
234 0xf71bf400,
235 0x1d0027f1,
236 0xd00137f0,
237 0x00f80023,
238 0x27f100f8,
239 0x34bd2200,
240 0xd00233f0,
241 0x00f80023,
242 0x012842b7,
243 0xf00145b6,
244 0x43801e39,
245 0x0040b701,
246 0x0644b606,
247 0xf80043d0,
248 0xf030f400,
249 0xb00001b0,
250 0x01b00101,
251 0x0301b002,
252 0xc71d0498,
253 0x50b63045,
254 0x3446c701,
255 0xc70160b6,
256 0x70b63847,
257 0x0232f401,
258 0x94bd84bd,
259 0xb60f4ac4,
260 0xb4bd0445,
261 0xf404a430,
262 0xa5ff0f18,
263 0x00cbbbc0,
264 0xf40231f4,
265 0x1bf4220e,
266 0x10c7f00c,
267 0xf400cbbb,
268 0xa430160e,
269 0x0c18f406,
270 0xbb14c7f0,
271 0x0ef400cb,
272 0x80c7f107,
273 0x01c83800,
274 0xb60180b6,
275 0xb5b801b0,
276 0xc308f404,
277 0xb80190b6,
278 0x08f40497,
279 0x0065fdb2,
280 0x98110680,
281 0x68fd2008,
282 0x0502f400,
283 0x75fd64bd,
284 0x1c078000,
285 0xf10078fd,
286 0xb6081057,
287 0x56d00654,
288 0x4057d000,
289 0x080050b7,
290 0xb61c0698,
291 0x64b60162,
292 0x11079808,
293 0xfd0172b6,
294 0x56d00567,
295 0x0050b700,
296 0x0060b401,
297 0xb40056d0,
298 0x56d00160,
299 0x0260b440,
300 0xb48056d0,
301 0x56d00360,
302 0x0050b7c0,
303 0x1e069804,
304 0x980056d0,
305 0x56d01f06,
306 0x1030f440,
307 0x579800f8,
308 0x6879c70a,
309 0xb66478c7,
310 0x77c70280,
311 0x0e76b060,
312 0xf0091bf4,
313 0x0ef40477,
314 0x027cf00f,
315 0xfd1170b6,
316 0x77f00947,
317 0x0f5a9806,
318 0xfd115b98,
319 0xb7f000ab,
320 0x04b7bb01,
321 0xff01b2b6,
322 0xa7bbc4ab,
323 0x105d9805,
324 0xbb01e7f0,
325 0xe2b604e8,
326 0xb4deff01,
327 0xb605d8bb,
328 0xef9401e0,
329 0x02ebbb0c,
330 0xf005fefd,
331 0x60b7026c,
332 0x64b60208,
333 0x006fd008,
334 0xbb04b7bb,
335 0x5f9800cb,
336 0x115b980b,
337 0xf000fbfd,
338 0xb7bb01b7,
339 0x01b2b604,
340 0xbb00fbbb,
341 0xf0f905f7,
342 0xf00c5f98,
343 0xb8bb01b7,
344 0x01b2b604,
345 0xbb00fbbb,
346 0xf0f905f8,
347 0xb60078bb,
348 0xb7f00282,
349 0x04b8bb01,
350 0x9804b9bb,
351 0xe7f00e58,
352 0x04e9bb01,
353 0xff01e2b6,
354 0xf7bbf48e,
355 0x00cfbb04,
356 0xbb0079bb,
357 0xf0fc0589,
358 0xd9fd90fc,
359 0x00adbb00,
360 0xfd0089fd,
361 0xa8bb008f,
362 0x04a7bb00,
363 0xbb0192b6,
364 0x69d00497,
365 0x08579880,
366 0xbb075898,
367 0x7abb00ac,
368 0x0081b600,
369 0xfd1084b6,
370 0x62b7058b,
371 0x67d00600,
372 0x0060b700,
373 0x0068d004,
374 0x6cf000f8,
375 0x0260b702,
376 0x0864b602,
377 0xd0085798,
378 0x60b70067,
379 0x57980400,
380 0x1074b607,
381 0xb70067d0,
382 0x98040060,
383 0x67d00957,
384 0xf900f800,
385 0xf110f900,
386 0xb6080007,
387 0x01cf0604,
388 0x0114f000,
389 0xfcfa1bf4,
390 0xf800fc10,
391 0x0d34c800,
392 0xf5701bf4,
393 0xf103ab21,
394 0xb6080c47,
395 0x05980644,
396 0x0450b605,
397 0xd00045d0,
398 0x57f04040,
399 0x8045d00c,
400 0x040040b7,
401 0xb6040598,
402 0x45d01054,
403 0x0040b700,
404 0x0057f105,
405 0x0153f00b,
406 0xf10045d0,
407 0xb6404057,
408 0x53f10154,
409 0x45d08080,
410 0x1057f140,
411 0x1253f111,
412 0x8045d013,
413 0x151457f1,
414 0x171653f1,
415 0xf1c045d0,
416 0xf0260157,
417 0x47f10153,
418 0x44b60800,
419 0x0045d006,
420 0x03ab21f5,
421 0x080c47f1,
422 0x980644b6,
423 0x45d00505,
424 0x4040d000,
425 0xd00457f0,
426 0x40b78045,
427 0x05980400,
428 0x1054b604,
429 0xb70045d0,
430 0xf1050040,
431 0xd0030057,
432 0x57f10045,
433 0x53f11110,
434 0x45d01312,
435 0x06059840,
436 0x050040b7,
437 0xf10045d0,
438 0xf0260157,
439 0x47f10153,
440 0x44b60800,
441 0x0045d006,
442 0x21f500f8,
443 0x3fc803ab,
444 0x0e0bf400,
445 0x018921f5,
446 0x020047f1,
447 0xf11e0ef4,
448 0xb6081067,
449 0x77f00664,
450 0x11078001,
451 0x981c0780,
452 0x67d02007,
453 0x4067d000,
454 0x32f444bd,
455 0xc854bd02,
456 0x0bf4043f,
457 0x8221f50a,
458 0x0a0ef403,
459 0x027621f5,
460 0xf40749f0,
461 0x57f00231,
462 0x083fc82c,
463 0xf50a0bf4,
464 0xf4038221,
465 0x21f50a0e,
466 0x49f00276,
467 0x0057f108,
468 0x0654b608,
469 0xd0210698,
470 0x67f04056,
471 0x0063f141,
472 0x0546fd44,
473 0xc80054d0,
474 0x0bf40c3f,
475 0xc521f507,
476 0xf100f803,
477 0xbd220027,
478 0x0133f034,
479 0xf80023d0,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534};
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index dbbafed36406..e4b2b9e934b2 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -27,32 +27,74 @@
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30/*XXX: boards using limits 0x40 need fixing, the register layout 30/* This is actually a lot more complex than it appears here, but hopefully
31 * is correct here, but, there's some other funny magic 31 * this should be able to deal with what the VBIOS leaves for us..
32 * that modifies things, so it's not likely we'll set/read 32 *
33 * the correct timings yet.. working on it... 33 * If not, well, I'll jump off that bridge when I come to it.
34 */ 34 */
35 35
36struct nva3_pm_state { 36struct nva3_pm_state {
37 struct pll_lims pll; 37 enum pll_types type;
38 int N, M, P; 38 u32 src0;
39 u32 src1;
40 u32 ctrl;
41 u32 coef;
42 u32 old_pnm;
43 u32 new_pnm;
44 u32 new_div;
39}; 45};
40 46
47static int
48nva3_pm_pll_offset(u32 id)
49{
50 static const u32 pll_map[] = {
51 0x00, PLL_CORE,
52 0x01, PLL_SHADER,
53 0x02, PLL_MEMORY,
54 0x00, 0x00
55 };
56 const u32 *map = pll_map;
57
58 while (map[1]) {
59 if (id == map[1])
60 return map[0];
61 map += 2;
62 }
63
64 return -ENOENT;
65}
66
41int 67int
42nva3_pm_clock_get(struct drm_device *dev, u32 id) 68nva3_pm_clock_get(struct drm_device *dev, u32 id)
43{ 69{
70 u32 src0, src1, ctrl, coef;
44 struct pll_lims pll; 71 struct pll_lims pll;
45 int P, N, M, ret; 72 int ret, off;
46 u32 reg; 73 int P, N, M;
47 74
48 ret = get_pll_limits(dev, id, &pll); 75 ret = get_pll_limits(dev, id, &pll);
49 if (ret) 76 if (ret)
50 return ret; 77 return ret;
51 78
52 reg = nv_rd32(dev, pll.reg + 4); 79 off = nva3_pm_pll_offset(id);
53 P = (reg & 0x003f0000) >> 16; 80 if (off < 0)
54 N = (reg & 0x0000ff00) >> 8; 81 return off;
55 M = (reg & 0x000000ff); 82
83 src0 = nv_rd32(dev, 0x4120 + (off * 4));
84 src1 = nv_rd32(dev, 0x4160 + (off * 4));
85 ctrl = nv_rd32(dev, pll.reg + 0);
86 coef = nv_rd32(dev, pll.reg + 4);
87 NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
88 id, src0, src1, ctrl, coef);
89
90 if (ctrl & 0x00000008) {
91 u32 div = ((src1 & 0x003c0000) >> 18) + 1;
92 return (pll.refclk * 2) / div;
93 }
94
95 P = (coef & 0x003f0000) >> 16;
96 N = (coef & 0x0000ff00) >> 8;
97 M = (coef & 0x000000ff);
56 return pll.refclk * N / M / P; 98 return pll.refclk * N / M / P;
57} 99}
58 100
@@ -60,36 +102,103 @@ void *
60nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 102nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
61 u32 id, int khz) 103 u32 id, int khz)
62{ 104{
63 struct nva3_pm_state *state; 105 struct nva3_pm_state *pll;
64 int dummy, ret; 106 struct pll_lims limits;
107 int N, M, P, diff;
108 int ret, off;
109
110 ret = get_pll_limits(dev, id, &limits);
111 if (ret < 0)
112 return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
113
114 off = nva3_pm_pll_offset(id);
115 if (id < 0)
116 return ERR_PTR(-EINVAL);
65 117
66 state = kzalloc(sizeof(*state), GFP_KERNEL); 118
67 if (!state) 119 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
120 if (!pll)
68 return ERR_PTR(-ENOMEM); 121 return ERR_PTR(-ENOMEM);
122 pll->type = id;
123 pll->src0 = 0x004120 + (off * 4);
124 pll->src1 = 0x004160 + (off * 4);
125 pll->ctrl = limits.reg + 0;
126 pll->coef = limits.reg + 4;
69 127
70 ret = get_pll_limits(dev, id, &state->pll); 128 /* If target clock is within [-2, 3) MHz of a divisor, we'll
71 if (ret < 0) { 129 * use that instead of calculating MNP values
72 kfree(state); 130 */
73 return (ret == -ENOENT) ? NULL : ERR_PTR(ret); 131 pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16);
132 if (pll->new_div) {
133 diff = khz - ((limits.refclk * 2) / pll->new_div);
134 if (diff < -2000 || diff >= 3000)
135 pll->new_div = 0;
74 } 136 }
75 137
76 ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy, 138 if (!pll->new_div) {
77 &state->M, &state->P); 139 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
78 if (ret < 0) { 140 if (ret < 0)
79 kfree(state); 141 return ERR_PTR(ret);
80 return ERR_PTR(ret); 142
143 pll->new_pnm = (P << 16) | (N << 8) | M;
144 pll->new_div = 2 - 1;
145 } else {
146 pll->new_pnm = 0;
147 pll->new_div--;
81 } 148 }
82 149
83 return state; 150 if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101)
151 pll->old_pnm = nv_rd32(dev, pll->coef);
152 return pll;
84} 153}
85 154
86void 155void
87nva3_pm_clock_set(struct drm_device *dev, void *pre_state) 156nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
88{ 157{
89 struct nva3_pm_state *state = pre_state; 158 struct nva3_pm_state *pll = pre_state;
90 u32 reg = state->pll.reg; 159 u32 ctrl = 0;
160
161 /* For the memory clock, NVIDIA will build a "script" describing
162 * the reclocking process and ask PDAEMON to execute it.
163 */
164 if (pll->type == PLL_MEMORY) {
165 nv_wr32(dev, 0x100210, 0);
166 nv_wr32(dev, 0x1002dc, 1);
167 nv_wr32(dev, 0x004018, 0x00001000);
168 ctrl = 0x18000100;
169 }
170
171 if (pll->old_pnm || !pll->new_pnm) {
172 nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 |
173 (pll->new_div << 18));
174 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
175 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
176 }
177
178 if (pll->new_pnm) {
179 nv_mask(dev, pll->src0, 0x00000101, 0x00000101);
180 nv_wr32(dev, pll->coef, pll->new_pnm);
181 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
182 nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000);
183 nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010);
184 nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl);
185 nv_mask(dev, pll->src1, 0x00000100, 0x00000000);
186 nv_mask(dev, pll->src1, 0x00000001, 0x00000000);
187 if (pll->type == PLL_MEMORY)
188 nv_wr32(dev, 0x4018, 0x10005000);
189 } else {
190 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
191 nv_mask(dev, pll->src0, 0x00000100, 0x00000000);
192 nv_mask(dev, pll->src0, 0x00000001, 0x00000000);
193 if (pll->type == PLL_MEMORY)
194 nv_wr32(dev, 0x4018, 0x1000d000);
195 }
196
197 if (pll->type == PLL_MEMORY) {
198 nv_wr32(dev, 0x1002dc, 0);
199 nv_wr32(dev, 0x100210, 0x80000000);
200 }
91 201
92 nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M); 202 kfree(pll);
93 kfree(state);
94} 203}
95 204
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
new file mode 100644
index 000000000000..208fa7ab3f42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nvc0_copy.fuc.h"
32
33struct nvc0_copy_engine {
34 struct nouveau_exec_engine base;
35 u32 irq;
36 u32 pmc;
37 u32 fuc;
38 u32 ctx;
39};
40
41static int
42nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
43{
44 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *ramin = chan->ramin;
48 struct nouveau_gpuobj *ctx = NULL;
49 int ret;
50
51 ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret)
55 return ret;
56
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst));
59 dev_priv->engine.instmem.flush(dev);
60
61 chan->engctx[engine] = ctx;
62 return 0;
63}
64
65static int
66nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
67 u32 handle, u16 class)
68{
69 return 0;
70}
71
72static void
73nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
74{
75 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
76 struct nouveau_gpuobj *ctx = chan->engctx[engine];
77 struct drm_device *dev = chan->dev;
78 u32 inst;
79
80 inst = (chan->ramin->vinst >> 12);
81 inst |= 0x40000000;
82
83 /* disable fifo access */
84 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
85 /* mark channel as unloaded if it's currently active */
86 if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
87 nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
88 /* mark next channel as invalid if it's about to be loaded */
89 if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
90 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
91 /* restore fifo access */
92 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
93
94 nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
95 nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
96 nouveau_gpuobj_ref(NULL, &ctx);
97
98 chan->engctx[engine] = ctx;
99}
100
101static int
102nvc0_copy_init(struct drm_device *dev, int engine)
103{
104 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
105 int i;
106
107 nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
108 nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
109 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
110
111 nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
112 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
113 nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
114
115 nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
116 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
117 if ((i & 0x3f) == 0)
118 nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
119 nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
120 }
121
122 nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
123 nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
124 nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
125 nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
126 return 0;
127}
128
129static int
130nvc0_copy_fini(struct drm_device *dev, int engine)
131{
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
133
134 nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
135
136 /* trigger fuc context unload */
137 nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
138 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
139 nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
140 nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
141
142 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
143 return 0;
144}
145
146static struct nouveau_enum nvc0_copy_isr_error_name[] = {
147 { 0x0001, "ILLEGAL_MTHD" },
148 { 0x0002, "INVALID_ENUM" },
149 { 0x0003, "INVALID_BITFIELD" },
150 {}
151};
152
153static void
154nvc0_copy_isr(struct drm_device *dev, int engine)
155{
156 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
157 u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
158 u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
159 u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
160 u32 chid = nvc0_graph_isr_chid(dev, inst);
161 u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
162 u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
163 u32 mthd = (addr & 0x07ff) << 2;
164 u32 subc = (addr & 0x3800) >> 11;
165 u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
169 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
170 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, pcopy->fuc + 0x004, stat);
179 }
180}
181
182static void
183nvc0_copy_isr_0(struct drm_device *dev)
184{
185 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
186}
187
188static void
189nvc0_copy_isr_1(struct drm_device *dev)
190{
191 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
192}
193
194static void
195nvc0_copy_destroy(struct drm_device *dev, int engine)
196{
197 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
198
199 nouveau_irq_unregister(dev, pcopy->irq);
200
201 if (engine == NVOBJ_ENGINE_COPY0)
202 NVOBJ_ENGINE_DEL(dev, COPY0);
203 else
204 NVOBJ_ENGINE_DEL(dev, COPY1);
205 kfree(pcopy);
206}
207
208int
209nvc0_copy_create(struct drm_device *dev, int engine)
210{
211 struct nvc0_copy_engine *pcopy;
212
213 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
214 if (!pcopy)
215 return -ENOMEM;
216
217 pcopy->base.destroy = nvc0_copy_destroy;
218 pcopy->base.init = nvc0_copy_init;
219 pcopy->base.fini = nvc0_copy_fini;
220 pcopy->base.context_new = nvc0_copy_context_new;
221 pcopy->base.context_del = nvc0_copy_context_del;
222 pcopy->base.object_new = nvc0_copy_object_new;
223
224 if (engine == 0) {
225 pcopy->irq = 5;
226 pcopy->pmc = 0x00000040;
227 pcopy->fuc = 0x104000;
228 pcopy->ctx = 0x0230;
229 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
230 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
231 NVOBJ_CLASS(dev, 0x90b5, COPY0);
232 } else {
233 pcopy->irq = 6;
234 pcopy->pmc = 0x00000080;
235 pcopy->fuc = 0x105000;
236 pcopy->ctx = 0x0240;
237 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
238 NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
239 NVOBJ_CLASS(dev, 0x90b8, COPY1);
240 }
241
242 return 0;
243}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
new file mode 100644
index 000000000000..419903880e9d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -0,0 +1,527 @@
1uint32_t nvc0_pcopy_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00010000,
67 0x00000000,
68 0x00000000,
69 0x00010040,
70 0x0001019f,
71 0x00000000,
72 0x00010050,
73 0x000101a1,
74 0x00000000,
75 0x00070080,
76 0x0000001c,
77 0xfffff000,
78 0x00000020,
79 0xfff80000,
80 0x00000024,
81 0xffffe000,
82 0x00000028,
83 0xfffff800,
84 0x0000002c,
85 0xfffff000,
86 0x00000030,
87 0xfff80000,
88 0x00000034,
89 0xffffe000,
90 0x00070088,
91 0x00000048,
92 0xfffff000,
93 0x0000004c,
94 0xfff80000,
95 0x00000050,
96 0xffffe000,
97 0x00000054,
98 0xfffff800,
99 0x00000058,
100 0xfffff000,
101 0x0000005c,
102 0xfff80000,
103 0x00000060,
104 0xffffe000,
105 0x000200c0,
106 0x000104b8,
107 0x00000000,
108 0x00010541,
109 0x00000000,
110 0x000e00c3,
111 0x00000010,
112 0xffffff00,
113 0x00000014,
114 0x0000000f,
115 0x0000003c,
116 0xffffff00,
117 0x00000040,
118 0x0000000f,
119 0x00000018,
120 0xfff80000,
121 0x00000044,
122 0xfff80000,
123 0x00000074,
124 0xffff0000,
125 0x00000078,
126 0xffffe000,
127 0x00000068,
128 0xfccc0000,
129 0x0000006c,
130 0x00000000,
131 0x00000070,
132 0x00000000,
133 0x00000004,
134 0xffffff00,
135 0x00000008,
136 0x00000000,
137 0x0000000c,
138 0x00000000,
139 0x00000800,
140};
141
142uint32_t nvc0_pcopy_code[] = {
143 0x04fe04bd,
144 0x3517f000,
145 0xf10010fe,
146 0xf1040017,
147 0xf0fff327,
148 0x22d00023,
149 0x0c25f0c0,
150 0xf40012d0,
151 0x17f11031,
152 0x27f01200,
153 0x0012d003,
154 0xf40031f4,
155 0x0ef40028,
156 0x8001cffd,
157 0xf40812c4,
158 0x21f4060b,
159 0x0412c4ca,
160 0xf5070bf4,
161 0xc4010221,
162 0x01d00c11,
163 0xf101f840,
164 0xfe770047,
165 0x47f1004b,
166 0x44cf2100,
167 0x0144f000,
168 0xb60444b6,
169 0xf7f13040,
170 0xf4b6061c,
171 0x1457f106,
172 0x00f5d101,
173 0xb6043594,
174 0x57fe0250,
175 0x0145fe00,
176 0x010052b7,
177 0x00ff67f1,
178 0x56fd60bd,
179 0x0253f004,
180 0xf80545fa,
181 0x0053f003,
182 0xd100e7f0,
183 0x549800fe,
184 0x0845b600,
185 0xb6015698,
186 0x46fd1864,
187 0x0047fe05,
188 0xf00204b9,
189 0x01f40643,
190 0x0604fa09,
191 0xfa060ef4,
192 0x03f80504,
193 0x27f100f8,
194 0x23cf1400,
195 0x1e3fc800,
196 0xf4170bf4,
197 0x21f40132,
198 0x1e3af053,
199 0xf00023d0,
200 0x24d00147,
201 0xcf00f880,
202 0x3dc84023,
203 0x090bf41e,
204 0xf40131f4,
205 0x37f05321,
206 0x8023d002,
207 0x37f100f8,
208 0x32cf1900,
209 0x0033cf40,
210 0x07ff24e4,
211 0xf11024b6,
212 0xbd010057,
213 0x5874bd64,
214 0x57580056,
215 0x0450b601,
216 0xf40446b8,
217 0x76bb4d08,
218 0x0447b800,
219 0xbb0f08f4,
220 0x74b60276,
221 0x0057bb03,
222 0xbbdf0ef4,
223 0x44b60246,
224 0x0045bb03,
225 0xfd014598,
226 0x54b00453,
227 0x201bf400,
228 0x58004558,
229 0x64b00146,
230 0x091bf400,
231 0xf4005380,
232 0x32f4300e,
233 0xf455f901,
234 0x0ef40c01,
235 0x0225f025,
236 0xf10125f0,
237 0xd0100047,
238 0x43d00042,
239 0x4027f040,
240 0xcf0002d0,
241 0x24f08002,
242 0x0024b040,
243 0xf1f71bf4,
244 0xf01d0027,
245 0x23d00137,
246 0xf800f800,
247 0x0027f100,
248 0xf034bd22,
249 0x23d00233,
250 0xf400f800,
251 0x01b0f030,
252 0x0101b000,
253 0xb00201b0,
254 0x04980301,
255 0x3045c71a,
256 0xc70150b6,
257 0x60b63446,
258 0x3847c701,
259 0xf40170b6,
260 0x84bd0232,
261 0x4ac494bd,
262 0x0445b60f,
263 0xa430b4bd,
264 0x0f18f404,
265 0xbbc0a5ff,
266 0x31f400cb,
267 0x220ef402,
268 0xf00c1bf4,
269 0xcbbb10c7,
270 0x160ef400,
271 0xf406a430,
272 0xc7f00c18,
273 0x00cbbb14,
274 0xf1070ef4,
275 0x380080c7,
276 0x80b601c8,
277 0x01b0b601,
278 0xf404b5b8,
279 0x90b6c308,
280 0x0497b801,
281 0xfdb208f4,
282 0x06800065,
283 0x1d08980e,
284 0xf40068fd,
285 0x64bd0502,
286 0x800075fd,
287 0x78fd1907,
288 0x1057f100,
289 0x0654b608,
290 0xd00056d0,
291 0x50b74057,
292 0x06980800,
293 0x0162b619,
294 0x980864b6,
295 0x72b60e07,
296 0x0567fd01,
297 0xb70056d0,
298 0xb4010050,
299 0x56d00060,
300 0x0160b400,
301 0xb44056d0,
302 0x56d00260,
303 0x0360b480,
304 0xb7c056d0,
305 0x98040050,
306 0x56d01b06,
307 0x1c069800,
308 0xf44056d0,
309 0x00f81030,
310 0xc7075798,
311 0x78c76879,
312 0x0380b664,
313 0xb06077c7,
314 0x1bf40e76,
315 0x0477f009,
316 0xf00f0ef4,
317 0x70b6027c,
318 0x0947fd11,
319 0x980677f0,
320 0x5b980c5a,
321 0x00abfd0e,
322 0xbb01b7f0,
323 0xb2b604b7,
324 0xc4abff01,
325 0x9805a7bb,
326 0xe7f00d5d,
327 0x04e8bb01,
328 0xff01e2b6,
329 0xd8bbb4de,
330 0x01e0b605,
331 0xbb0cef94,
332 0xfefd02eb,
333 0x026cf005,
334 0x020860b7,
335 0xd00864b6,
336 0xb7bb006f,
337 0x00cbbb04,
338 0x98085f98,
339 0xfbfd0e5b,
340 0x01b7f000,
341 0xb604b7bb,
342 0xfbbb01b2,
343 0x05f7bb00,
344 0x5f98f0f9,
345 0x01b7f009,
346 0xb604b8bb,
347 0xfbbb01b2,
348 0x05f8bb00,
349 0x78bbf0f9,
350 0x0282b600,
351 0xbb01b7f0,
352 0xb9bb04b8,
353 0x0b589804,
354 0xbb01e7f0,
355 0xe2b604e9,
356 0xf48eff01,
357 0xbb04f7bb,
358 0x79bb00cf,
359 0x0589bb00,
360 0x90fcf0fc,
361 0xbb00d9fd,
362 0x89fd00ad,
363 0x008ffd00,
364 0xbb00a8bb,
365 0x92b604a7,
366 0x0497bb01,
367 0x988069d0,
368 0x58980557,
369 0x00acbb04,
370 0xb6007abb,
371 0x84b60081,
372 0x058bfd10,
373 0x060062b7,
374 0xb70067d0,
375 0xd0040060,
376 0x00f80068,
377 0xb7026cf0,
378 0xb6020260,
379 0x57980864,
380 0x0067d005,
381 0x040060b7,
382 0xb6045798,
383 0x67d01074,
384 0x0060b700,
385 0x06579804,
386 0xf80067d0,
387 0xf900f900,
388 0x0007f110,
389 0x0604b608,
390 0xf00001cf,
391 0x1bf40114,
392 0xfc10fcfa,
393 0xc800f800,
394 0x1bf40d34,
395 0xd121f570,
396 0x0c47f103,
397 0x0644b608,
398 0xb6020598,
399 0x45d00450,
400 0x4040d000,
401 0xd00c57f0,
402 0x40b78045,
403 0x05980400,
404 0x1054b601,
405 0xb70045d0,
406 0xf1050040,
407 0xf00b0057,
408 0x45d00153,
409 0x4057f100,
410 0x0154b640,
411 0x808053f1,
412 0xf14045d0,
413 0xf1111057,
414 0xd0131253,
415 0x57f18045,
416 0x53f11514,
417 0x45d01716,
418 0x0157f1c0,
419 0x0153f026,
420 0x080047f1,
421 0xd00644b6,
422 0x21f50045,
423 0x47f103d1,
424 0x44b6080c,
425 0x02059806,
426 0xd00045d0,
427 0x57f04040,
428 0x8045d004,
429 0x040040b7,
430 0xb6010598,
431 0x45d01054,
432 0x0040b700,
433 0x0057f105,
434 0x0045d003,
435 0x111057f1,
436 0x131253f1,
437 0x984045d0,
438 0x40b70305,
439 0x45d00500,
440 0x0157f100,
441 0x0153f026,
442 0x080047f1,
443 0xd00644b6,
444 0x00f80045,
445 0x03d121f5,
446 0xf4003fc8,
447 0x21f50e0b,
448 0x47f101af,
449 0x0ef40200,
450 0x1067f11e,
451 0x0664b608,
452 0x800177f0,
453 0x07800e07,
454 0x1d079819,
455 0xd00067d0,
456 0x44bd4067,
457 0xbd0232f4,
458 0x043fc854,
459 0xf50a0bf4,
460 0xf403a821,
461 0x21f50a0e,
462 0x49f0029c,
463 0x0231f407,
464 0xc82c57f0,
465 0x0bf4083f,
466 0xa821f50a,
467 0x0a0ef403,
468 0x029c21f5,
469 0xf10849f0,
470 0xb6080057,
471 0x06980654,
472 0x4056d01e,
473 0xf14167f0,
474 0xfd440063,
475 0x54d00546,
476 0x0c3fc800,
477 0xf5070bf4,
478 0xf803eb21,
479 0x0027f100,
480 0xf034bd22,
481 0x23d00133,
482 0x0000f800,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527};
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 2886f2726a9e..fb4f5943e01b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -37,7 +37,7 @@ struct nvc0_fifo_priv {
37}; 37};
38 38
39struct nvc0_fifo_chan { 39struct nvc0_fifo_chan {
40 struct nouveau_bo *user; 40 struct nouveau_gpuobj *user;
41 struct nouveau_gpuobj *ramfc; 41 struct nouveau_gpuobj *ramfc;
42}; 42};
43 43
@@ -106,7 +106,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
107 struct nvc0_fifo_priv *priv = pfifo->priv; 107 struct nvc0_fifo_priv *priv = pfifo->priv;
108 struct nvc0_fifo_chan *fifoch; 108 struct nvc0_fifo_chan *fifoch;
109 u64 ib_virt, user_vinst; 109 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
110 int ret; 110 int ret;
111 111
112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); 112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
@@ -115,28 +115,13 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
115 fifoch = chan->fifo_priv; 115 fifoch = chan->fifo_priv;
116 116
117 /* allocate vram for control regs, map into polling area */ 117 /* allocate vram for control regs, map into polling area */
118 ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, 118 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
119 0, 0, &fifoch->user); 119 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
120 if (ret) 120 if (ret)
121 goto error; 121 goto error;
122 122
123 ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
124 if (ret) {
125 nouveau_bo_ref(NULL, &fifoch->user);
126 goto error;
127 }
128
129 user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
130
131 ret = nouveau_bo_map(fifoch->user);
132 if (ret) {
133 nouveau_bo_unpin(fifoch->user);
134 nouveau_bo_ref(NULL, &fifoch->user);
135 goto error;
136 }
137
138 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, 123 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
139 fifoch->user->bo.mem.mm_node); 124 *(struct nouveau_mem **)fifoch->user->node);
140 125
141 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 126 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
142 priv->user_vma.offset + (chan->id * 0x1000), 127 priv->user_vma.offset + (chan->id * 0x1000),
@@ -146,20 +131,6 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
146 goto error; 131 goto error;
147 } 132 }
148 133
149 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
150
151 /* zero channel regs */
152 nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
153 nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
154 nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
155 nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
156 nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
157 nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
158 nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
159 nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
160 nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
161 nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
162
163 /* ramfc */ 134 /* ramfc */
164 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, 135 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
165 chan->ramin->vinst, 0x100, 136 chan->ramin->vinst, 0x100,
@@ -167,8 +138,8 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
167 if (ret) 138 if (ret)
168 goto error; 139 goto error;
169 140
170 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst)); 141 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst));
171 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst)); 142 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst));
172 nv_wo32(fifoch->ramfc, 0x10, 0x0000face); 143 nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
173 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); 144 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
174 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); 145 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
@@ -223,11 +194,7 @@ nvc0_fifo_destroy_context(struct nouveau_channel *chan)
223 return; 194 return;
224 195
225 nouveau_gpuobj_ref(NULL, &fifoch->ramfc); 196 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
226 if (fifoch->user) { 197 nouveau_gpuobj_ref(NULL, &fifoch->user);
227 nouveau_bo_unmap(fifoch->user);
228 nouveau_bo_unpin(fifoch->user);
229 nouveau_bo_ref(NULL, &fifoch->user);
230 }
231 kfree(fifoch); 198 kfree(fifoch);
232} 199}
233 200
@@ -240,6 +207,21 @@ nvc0_fifo_load_context(struct nouveau_channel *chan)
240int 207int
241nvc0_fifo_unload_context(struct drm_device *dev) 208nvc0_fifo_unload_context(struct drm_device *dev)
242{ 209{
210 int i;
211
212 for (i = 0; i < 128; i++) {
213 if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1))
214 continue;
215
216 nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000);
217 nv_wr32(dev, 0x002634, i);
218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
220 i, nv_rd32(dev, 0x002634));
221 return -EBUSY;
222 }
223 }
224
243 return 0; 225 return 0;
244} 226}
245 227
@@ -309,6 +291,7 @@ nvc0_fifo_init(struct drm_device *dev)
309{ 291{
310 struct drm_nouveau_private *dev_priv = dev->dev_private; 292 struct drm_nouveau_private *dev_priv = dev->dev_private;
311 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 293 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
294 struct nouveau_channel *chan;
312 struct nvc0_fifo_priv *priv; 295 struct nvc0_fifo_priv *priv;
313 int ret, i; 296 int ret, i;
314 297
@@ -351,23 +334,74 @@ nvc0_fifo_init(struct drm_device *dev)
351 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ 334 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
352 nv_wr32(dev, 0x002100, 0xffffffff); 335 nv_wr32(dev, 0x002100, 0xffffffff);
353 nv_wr32(dev, 0x002140, 0xbfffffff); 336 nv_wr32(dev, 0x002140, 0xbfffffff);
337
338 /* restore PFIFO context table */
339 for (i = 0; i < 128; i++) {
340 chan = dev_priv->channels.ptr[i];
341 if (!chan || !chan->fifo_priv)
342 continue;
343
344 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
345 (chan->ramin->vinst >> 12));
346 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
347 }
348 nvc0_fifo_playlist_update(dev);
349
354 return 0; 350 return 0;
355} 351}
356 352
357struct nouveau_enum nvc0_fifo_fault_unit[] = { 353struct nouveau_enum nvc0_fifo_fault_unit[] = {
358 { 0, "PGRAPH" }, 354 { 0x00, "PGRAPH" },
359 { 3, "PEEPHOLE" }, 355 { 0x03, "PEEPHOLE" },
360 { 4, "BAR1" }, 356 { 0x04, "BAR1" },
361 { 5, "BAR3" }, 357 { 0x05, "BAR3" },
362 { 7, "PFIFO" }, 358 { 0x07, "PFIFO" },
359 { 0x10, "PBSP" },
360 { 0x11, "PPPP" },
361 { 0x13, "PCOUNTER" },
362 { 0x14, "PVP" },
363 { 0x15, "PCOPY0" },
364 { 0x16, "PCOPY1" },
365 { 0x17, "PDAEMON" },
363 {} 366 {}
364}; 367};
365 368
366struct nouveau_enum nvc0_fifo_fault_reason[] = { 369struct nouveau_enum nvc0_fifo_fault_reason[] = {
367 { 0, "PT_NOT_PRESENT" }, 370 { 0x00, "PT_NOT_PRESENT" },
368 { 1, "PT_TOO_SHORT" }, 371 { 0x01, "PT_TOO_SHORT" },
369 { 2, "PAGE_NOT_PRESENT" }, 372 { 0x02, "PAGE_NOT_PRESENT" },
370 { 3, "VM_LIMIT_EXCEEDED" }, 373 { 0x03, "VM_LIMIT_EXCEEDED" },
374 { 0x04, "NO_CHANNEL" },
375 { 0x05, "PAGE_SYSTEM_ONLY" },
376 { 0x06, "PAGE_READ_ONLY" },
377 { 0x0a, "COMPRESSED_SYSRAM" },
378 { 0x0c, "INVALID_STORAGE_TYPE" },
379 {}
380};
381
382struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
383 { 0x01, "PCOPY0" },
384 { 0x02, "PCOPY1" },
385 { 0x04, "DISPATCH" },
386 { 0x05, "CTXCTL" },
387 { 0x06, "PFIFO" },
388 { 0x07, "BAR_READ" },
389 { 0x08, "BAR_WRITE" },
390 { 0x0b, "PVP" },
391 { 0x0c, "PPPP" },
392 { 0x0d, "PBSP" },
393 { 0x11, "PCOUNTER" },
394 { 0x12, "PDAEMON" },
395 { 0x14, "CCACHE" },
396 { 0x15, "CCACHE_POST" },
397 {}
398};
399
400struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
401 { 0x01, "TEX" },
402 { 0x0c, "ESETUP" },
403 { 0x0e, "CTXCTL" },
404 { 0x0f, "PROP" },
371 {} 405 {}
372}; 406};
373 407
@@ -385,12 +419,20 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
385 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); 419 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
386 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); 420 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
387 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); 421 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
422 u32 client = (stat & 0x00001f00) >> 8;
388 423
389 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", 424 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
390 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); 425 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
391 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); 426 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
392 printk("] from "); 427 printk("] from ");
393 nouveau_enum_print(nvc0_fifo_fault_unit, unit); 428 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
429 if (stat & 0x00000040) {
430 printk("/");
431 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
432 } else {
433 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
434 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
435 }
394 printk(" on channel 0x%010llx\n", (u64)inst << 12); 436 printk(" on channel 0x%010llx\n", (u64)inst << 12);
395} 437}
396 438
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 3de9b721d8db..ca6db204d644 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -30,27 +30,40 @@
30#include "nouveau_mm.h" 30#include "nouveau_mm.h"
31#include "nvc0_graph.h" 31#include "nvc0_graph.h"
32 32
33static void nvc0_graph_isr(struct drm_device *); 33static int
34static void nvc0_runk140_isr(struct drm_device *); 34nvc0_graph_load_context(struct nouveau_channel *chan)
35static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
36
37void
38nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
39{ 35{
36 struct drm_device *dev = chan->dev;
37
38 nv_wr32(dev, 0x409840, 0x00000030);
39 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
40 nv_wr32(dev, 0x409504, 0x00000003);
41 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
42 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
43
44 return 0;
40} 45}
41 46
42struct nouveau_channel * 47static int
43nvc0_graph_channel(struct drm_device *dev) 48nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
44{ 49{
45 return NULL; 50 nv_wr32(dev, 0x409840, 0x00000003);
51 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
52 nv_wr32(dev, 0x409504, 0x00000009);
53 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
54 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
55 return -EBUSY;
56 }
57
58 return 0;
46} 59}
47 60
48static int 61static int
49nvc0_graph_construct_context(struct nouveau_channel *chan) 62nvc0_graph_construct_context(struct nouveau_channel *chan)
50{ 63{
51 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 64 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
52 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 65 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
53 struct nvc0_graph_chan *grch = chan->pgraph_ctx; 66 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
54 struct drm_device *dev = chan->dev; 67 struct drm_device *dev = chan->dev;
55 int ret, i; 68 int ret, i;
56 u32 *ctx; 69 u32 *ctx;
@@ -89,9 +102,8 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
89static int 102static int
90nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) 103nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
91{ 104{
92 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 105 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 106 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct nvc0_graph_chan *grch = chan->pgraph_ctx;
95 struct drm_device *dev = chan->dev; 107 struct drm_device *dev = chan->dev;
96 int i = 0, gpc, tp, ret; 108 int i = 0, gpc, tp, ret;
97 u32 magic; 109 u32 magic;
@@ -158,29 +170,27 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
158 return 0; 170 return 0;
159} 171}
160 172
161int 173static int
162nvc0_graph_create_context(struct nouveau_channel *chan) 174nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
163{ 175{
164 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 176 struct drm_device *dev = chan->dev;
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 178 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
166 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 179 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
167 struct nvc0_graph_priv *priv = pgraph->priv;
168 struct nvc0_graph_chan *grch; 180 struct nvc0_graph_chan *grch;
169 struct drm_device *dev = chan->dev;
170 struct nouveau_gpuobj *grctx; 181 struct nouveau_gpuobj *grctx;
171 int ret, i; 182 int ret, i;
172 183
173 chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL); 184 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
174 if (!chan->pgraph_ctx) 185 if (!grch)
175 return -ENOMEM; 186 return -ENOMEM;
176 grch = chan->pgraph_ctx; 187 chan->engctx[NVOBJ_ENGINE_GR] = grch;
177 188
178 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 189 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
179 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, 190 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
180 &grch->grctx); 191 &grch->grctx);
181 if (ret) 192 if (ret)
182 goto error; 193 goto error;
183 chan->ramin_grctx = grch->grctx;
184 grctx = grch->grctx; 194 grctx = grch->grctx;
185 195
186 ret = nvc0_graph_create_context_mmio_list(chan); 196 ret = nvc0_graph_create_context_mmio_list(chan);
@@ -200,104 +210,49 @@ nvc0_graph_create_context(struct nouveau_channel *chan)
200 for (i = 0; i < priv->grctx_size; i += 4) 210 for (i = 0; i < priv->grctx_size; i += 4)
201 nv_wo32(grctx, i, priv->grctx_vals[i / 4]); 211 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
202 212
203 nv_wo32(grctx, 0xf4, 0); 213 nv_wo32(grctx, 0xf4, 0);
204 nv_wo32(grctx, 0xf8, 0); 214 nv_wo32(grctx, 0xf8, 0);
205 nv_wo32(grctx, 0x10, grch->mmio_nr); 215 nv_wo32(grctx, 0x10, grch->mmio_nr);
206 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); 216 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
207 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); 217 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
208 nv_wo32(grctx, 0x1c, 1); 218 nv_wo32(grctx, 0x1c, 1);
209 nv_wo32(grctx, 0x20, 0); 219 nv_wo32(grctx, 0x20, 0);
210 nv_wo32(grctx, 0x28, 0); 220 nv_wo32(grctx, 0x28, 0);
211 nv_wo32(grctx, 0x2c, 0); 221 nv_wo32(grctx, 0x2c, 0);
212 pinstmem->flush(dev); 222 pinstmem->flush(dev);
213 return 0; 223 return 0;
214 224
215error: 225error:
216 pgraph->destroy_context(chan); 226 priv->base.context_del(chan, engine);
217 return ret; 227 return ret;
218} 228}
219 229
220void 230static void
221nvc0_graph_destroy_context(struct nouveau_channel *chan) 231nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
222{ 232{
223 struct nvc0_graph_chan *grch; 233 struct nvc0_graph_chan *grch = chan->engctx[engine];
224
225 grch = chan->pgraph_ctx;
226 chan->pgraph_ctx = NULL;
227 if (!grch)
228 return;
229 234
230 nouveau_gpuobj_ref(NULL, &grch->mmio); 235 nouveau_gpuobj_ref(NULL, &grch->mmio);
231 nouveau_gpuobj_ref(NULL, &grch->unk418810); 236 nouveau_gpuobj_ref(NULL, &grch->unk418810);
232 nouveau_gpuobj_ref(NULL, &grch->unk40800c); 237 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
233 nouveau_gpuobj_ref(NULL, &grch->unk408004); 238 nouveau_gpuobj_ref(NULL, &grch->unk408004);
234 nouveau_gpuobj_ref(NULL, &grch->grctx); 239 nouveau_gpuobj_ref(NULL, &grch->grctx);
235 chan->ramin_grctx = NULL; 240 chan->engctx[engine] = NULL;
236} 241}
237 242
238int 243static int
239nvc0_graph_load_context(struct nouveau_channel *chan) 244nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
245 u32 handle, u16 class)
240{ 246{
241 struct drm_device *dev = chan->dev;
242
243 nv_wr32(dev, 0x409840, 0x00000030);
244 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
245 nv_wr32(dev, 0x409504, 0x00000003);
246 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
247 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
248
249 return 0; 247 return 0;
250} 248}
251 249
252static int 250static int
253nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan) 251nvc0_graph_fini(struct drm_device *dev, int engine)
254{ 252{
255 nv_wr32(dev, 0x409840, 0x00000003);
256 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
257 nv_wr32(dev, 0x409504, 0x00000009);
258 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
259 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
260 return -EBUSY;
261 }
262
263 return 0; 253 return 0;
264} 254}
265 255
266int
267nvc0_graph_unload_context(struct drm_device *dev)
268{
269 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
270 return nvc0_graph_unload_context_to(dev, inst);
271}
272
273static void
274nvc0_graph_destroy(struct drm_device *dev)
275{
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
278 struct nvc0_graph_priv *priv;
279
280 priv = pgraph->priv;
281 if (!priv)
282 return;
283
284 nouveau_irq_unregister(dev, 12);
285 nouveau_irq_unregister(dev, 25);
286
287 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
288 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
289
290 if (priv->grctx_vals)
291 kfree(priv->grctx_vals);
292 kfree(priv);
293}
294
295void
296nvc0_graph_takedown(struct drm_device *dev)
297{
298 nvc0_graph_destroy(dev);
299}
300
301static int 256static int
302nvc0_graph_mthd_page_flip(struct nouveau_channel *chan, 257nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
303 u32 class, u32 mthd, u32 data) 258 u32 class, u32 mthd, u32 data)
@@ -306,119 +261,10 @@ nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
306 return 0; 261 return 0;
307} 262}
308 263
309static int
310nvc0_graph_create(struct drm_device *dev)
311{
312 struct drm_nouveau_private *dev_priv = dev->dev_private;
313 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
314 struct nvc0_graph_priv *priv;
315 int ret, gpc, i;
316
317 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
318 if (!priv)
319 return -ENOMEM;
320 pgraph->priv = priv;
321
322 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
323 if (ret)
324 goto error;
325
326 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
327 if (ret)
328 goto error;
329
330 for (i = 0; i < 0x1000; i += 4) {
331 nv_wo32(priv->unk4188b4, i, 0x00000010);
332 nv_wo32(priv->unk4188b8, i, 0x00000010);
333 }
334
335 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
336 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
337 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
338 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
339 priv->tp_total += priv->tp_nr[gpc];
340 }
341
342 /*XXX: these need figuring out... */
343 switch (dev_priv->chipset) {
344 case 0xc0:
345 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
346 priv->magic_not_rop_nr = 0x07;
347 /* filled values up to tp_total, the rest 0 */
348 priv->magicgpc980[0] = 0x22111000;
349 priv->magicgpc980[1] = 0x00000233;
350 priv->magicgpc980[2] = 0x00000000;
351 priv->magicgpc980[3] = 0x00000000;
352 priv->magicgpc918 = 0x000ba2e9;
353 } else
354 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
355 priv->magic_not_rop_nr = 0x05;
356 priv->magicgpc980[0] = 0x11110000;
357 priv->magicgpc980[1] = 0x00233222;
358 priv->magicgpc980[2] = 0x00000000;
359 priv->magicgpc980[3] = 0x00000000;
360 priv->magicgpc918 = 0x00092493;
361 } else
362 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
363 priv->magic_not_rop_nr = 0x06;
364 priv->magicgpc980[0] = 0x11110000;
365 priv->magicgpc980[1] = 0x03332222;
366 priv->magicgpc980[2] = 0x00000000;
367 priv->magicgpc980[3] = 0x00000000;
368 priv->magicgpc918 = 0x00088889;
369 }
370 break;
371 case 0xc3: /* 450, 4/0/0/0, 2 */
372 priv->magic_not_rop_nr = 0x03;
373 priv->magicgpc980[0] = 0x00003210;
374 priv->magicgpc980[1] = 0x00000000;
375 priv->magicgpc980[2] = 0x00000000;
376 priv->magicgpc980[3] = 0x00000000;
377 priv->magicgpc918 = 0x00200000;
378 break;
379 case 0xc4: /* 460, 3/4/0/0, 4 */
380 priv->magic_not_rop_nr = 0x01;
381 priv->magicgpc980[0] = 0x02321100;
382 priv->magicgpc980[1] = 0x00000000;
383 priv->magicgpc980[2] = 0x00000000;
384 priv->magicgpc980[3] = 0x00000000;
385 priv->magicgpc918 = 0x00124925;
386 break;
387 }
388
389 if (!priv->magic_not_rop_nr) {
390 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
391 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
392 priv->tp_nr[3], priv->rop_nr);
393 /* use 0xc3's values... */
394 priv->magic_not_rop_nr = 0x03;
395 priv->magicgpc980[0] = 0x00003210;
396 priv->magicgpc980[1] = 0x00000000;
397 priv->magicgpc980[2] = 0x00000000;
398 priv->magicgpc980[3] = 0x00000000;
399 priv->magicgpc918 = 0x00200000;
400 }
401
402 nouveau_irq_register(dev, 12, nvc0_graph_isr);
403 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
404 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
405 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
406 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
407 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
408 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
409 return 0;
410
411error:
412 nvc0_graph_destroy(dev);
413 return ret;
414}
415
416static void 264static void
417nvc0_graph_init_obj418880(struct drm_device *dev) 265nvc0_graph_init_obj418880(struct drm_device *dev)
418{ 266{
419 struct drm_nouveau_private *dev_priv = dev->dev_private; 267 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
420 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
421 struct nvc0_graph_priv *priv = pgraph->priv;
422 int i; 268 int i;
423 269
424 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); 270 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
@@ -449,35 +295,42 @@ nvc0_graph_init_regs(struct drm_device *dev)
449static void 295static void
450nvc0_graph_init_gpc_0(struct drm_device *dev) 296nvc0_graph_init_gpc_0(struct drm_device *dev)
451{ 297{
452 struct drm_nouveau_private *dev_priv = dev->dev_private; 298 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
453 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 299 u32 data[TP_MAX / 8];
454 int gpc; 300 u8 tpnr[GPC_MAX];
455 301 int i, gpc, tpc;
456 // TP ROP UNKVAL(magic_not_rop_nr) 302
457 // 450: 4/0/0/0 2 3 303 /*
458 // 460: 3/4/0/0 4 1 304 * TP ROP UNKVAL(magic_not_rop_nr)
459 // 465: 3/4/4/0 4 7 305 * 450: 4/0/0/0 2 3
460 // 470: 3/3/4/4 5 5 306 * 460: 3/4/0/0 4 1
461 // 480: 3/4/4/4 6 6 307 * 465: 3/4/4/0 4 7
462 308 * 470: 3/3/4/4 5 5
463 // magicgpc918 309 * 480: 3/4/4/4 6 6
464 // 450: 00200000 00000000001000000000000000000000 310 *
465 // 460: 00124925 00000000000100100100100100100101 311 * magicgpc918
466 // 465: 000ba2e9 00000000000010111010001011101001 312 * 450: 00200000 00000000001000000000000000000000
467 // 470: 00092493 00000000000010010010010010010011 313 * 460: 00124925 00000000000100100100100100100101
468 // 480: 00088889 00000000000010001000100010001001 314 * 465: 000ba2e9 00000000000010111010001011101001
469 315 * 470: 00092493 00000000000010010010010010010011
470 /* filled values up to tp_total, remainder 0 */ 316 * 480: 00088889 00000000000010001000100010001001
471 // 450: 00003210 00000000 00000000 00000000 317 */
472 // 460: 02321100 00000000 00000000 00000000 318
473 // 465: 22111000 00000233 00000000 00000000 319 memset(data, 0x00, sizeof(data));
474 // 470: 11110000 00233222 00000000 00000000 320 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
475 // 480: 11110000 03332222 00000000 00000000 321 for (i = 0, gpc = -1; i < priv->tp_total; i++) {
476 322 do {
477 nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]); 323 gpc = (gpc + 1) % priv->gpc_nr;
478 nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]); 324 } while (!tpnr[gpc]);
479 nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]); 325 tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
480 nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]); 326
327 data[i / 8] |= tpc << ((i % 8) * 4);
328 }
329
330 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
331 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
332 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
333 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
481 334
482 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 335 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
483 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | 336 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
@@ -509,8 +362,7 @@ nvc0_graph_init_units(struct drm_device *dev)
509static void 362static void
510nvc0_graph_init_gpc_1(struct drm_device *dev) 363nvc0_graph_init_gpc_1(struct drm_device *dev)
511{ 364{
512 struct drm_nouveau_private *dev_priv = dev->dev_private; 365 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
513 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
514 int gpc, tp; 366 int gpc, tp;
515 367
516 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 368 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
@@ -535,8 +387,7 @@ nvc0_graph_init_gpc_1(struct drm_device *dev)
535static void 387static void
536nvc0_graph_init_rop(struct drm_device *dev) 388nvc0_graph_init_rop(struct drm_device *dev)
537{ 389{
538 struct drm_nouveau_private *dev_priv = dev->dev_private; 390 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
539 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
540 int rop; 391 int rop;
541 392
542 for (rop = 0; rop < priv->rop_nr; rop++) { 393 for (rop = 0; rop < priv->rop_nr; rop++) {
@@ -547,62 +398,36 @@ nvc0_graph_init_rop(struct drm_device *dev)
547 } 398 }
548} 399}
549 400
550static int 401static void
551nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base, 402nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
552 const char *code_fw, const char *data_fw) 403 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
553{ 404{
554 const struct firmware *fw; 405 int i;
555 char name[32];
556 int ret, i;
557
558 snprintf(name, sizeof(name), "nouveau/%s", data_fw);
559 ret = request_firmware(&fw, name, &dev->pdev->dev);
560 if (ret) {
561 NV_ERROR(dev, "failed to load %s\n", data_fw);
562 return ret;
563 }
564 406
565 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000); 407 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
566 for (i = 0; i < fw->size / 4; i++) 408 for (i = 0; i < data->size / 4; i++)
567 nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]); 409 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
568 release_firmware(fw);
569
570 snprintf(name, sizeof(name), "nouveau/%s", code_fw);
571 ret = request_firmware(&fw, name, &dev->pdev->dev);
572 if (ret) {
573 NV_ERROR(dev, "failed to load %s\n", code_fw);
574 return ret;
575 }
576 410
577 nv_wr32(dev, fuc_base + 0x0180, 0x01000000); 411 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
578 for (i = 0; i < fw->size / 4; i++) { 412 for (i = 0; i < code->size / 4; i++) {
579 if ((i & 0x3f) == 0) 413 if ((i & 0x3f) == 0)
580 nv_wr32(dev, fuc_base + 0x0188, i >> 6); 414 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
581 nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]); 415 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
582 } 416 }
583 release_firmware(fw);
584
585 return 0;
586} 417}
587 418
588static int 419static int
589nvc0_graph_init_ctxctl(struct drm_device *dev) 420nvc0_graph_init_ctxctl(struct drm_device *dev)
590{ 421{
591 struct drm_nouveau_private *dev_priv = dev->dev_private; 422 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
592 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
593 u32 r000260; 423 u32 r000260;
594 int ret;
595 424
596 /* load fuc microcode */ 425 /* load fuc microcode */
597 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 426 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
598 ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d"); 427 nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
599 if (ret == 0) 428 nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
600 ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
601 nv_wr32(dev, 0x000260, r000260); 429 nv_wr32(dev, 0x000260, r000260);
602 430
603 if (ret)
604 return ret;
605
606 /* start both of them running */ 431 /* start both of them running */
607 nv_wr32(dev, 0x409840, 0xffffffff); 432 nv_wr32(dev, 0x409840, 0xffffffff);
608 nv_wr32(dev, 0x41a10c, 0x00000000); 433 nv_wr32(dev, 0x41a10c, 0x00000000);
@@ -644,41 +469,19 @@ nvc0_graph_init_ctxctl(struct drm_device *dev)
644 return 0; 469 return 0;
645} 470}
646 471
647int 472static int
648nvc0_graph_init(struct drm_device *dev) 473nvc0_graph_init(struct drm_device *dev, int engine)
649{ 474{
650 struct drm_nouveau_private *dev_priv = dev->dev_private;
651 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
652 int ret; 475 int ret;
653 476
654 dev_priv->engine.graph.accel_blocked = true;
655
656 switch (dev_priv->chipset) {
657 case 0xc0:
658 case 0xc3:
659 case 0xc4:
660 break;
661 default:
662 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
663 if (nouveau_noaccel != 0)
664 return 0;
665 break;
666 }
667
668 nv_mask(dev, 0x000200, 0x18001000, 0x00000000); 477 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
669 nv_mask(dev, 0x000200, 0x18001000, 0x18001000); 478 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
670 479
671 if (!pgraph->priv) {
672 ret = nvc0_graph_create(dev);
673 if (ret)
674 return ret;
675 }
676
677 nvc0_graph_init_obj418880(dev); 480 nvc0_graph_init_obj418880(dev);
678 nvc0_graph_init_regs(dev); 481 nvc0_graph_init_regs(dev);
679 //nvc0_graph_init_unitplemented_magics(dev); 482 /*nvc0_graph_init_unitplemented_magics(dev);*/
680 nvc0_graph_init_gpc_0(dev); 483 nvc0_graph_init_gpc_0(dev);
681 //nvc0_graph_init_unitplemented_c242(dev); 484 /*nvc0_graph_init_unitplemented_c242(dev);*/
682 485
683 nv_wr32(dev, 0x400500, 0x00010001); 486 nv_wr32(dev, 0x400500, 0x00010001);
684 nv_wr32(dev, 0x400100, 0xffffffff); 487 nv_wr32(dev, 0x400100, 0xffffffff);
@@ -697,12 +500,13 @@ nvc0_graph_init(struct drm_device *dev)
697 nv_wr32(dev, 0x400054, 0x34ce3464); 500 nv_wr32(dev, 0x400054, 0x34ce3464);
698 501
699 ret = nvc0_graph_init_ctxctl(dev); 502 ret = nvc0_graph_init_ctxctl(dev);
700 if (ret == 0) 503 if (ret)
701 dev_priv->engine.graph.accel_blocked = false; 504 return ret;
505
702 return 0; 506 return 0;
703} 507}
704 508
705static int 509int
706nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) 510nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
707{ 511{
708 struct drm_nouveau_private *dev_priv = dev->dev_private; 512 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -806,3 +610,187 @@ nvc0_runk140_isr(struct drm_device *dev)
806 units &= ~(1 << unit); 610 units &= ~(1 << unit);
807 } 611 }
808} 612}
613
614static int
615nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
616 struct nvc0_graph_fuc *fuc)
617{
618 struct drm_nouveau_private *dev_priv = dev->dev_private;
619 const struct firmware *fw;
620 char f[32];
621 int ret;
622
623 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
624 ret = request_firmware(&fw, f, &dev->pdev->dev);
625 if (ret) {
626 snprintf(f, sizeof(f), "nouveau/%s", fwname);
627 ret = request_firmware(&fw, f, &dev->pdev->dev);
628 if (ret) {
629 NV_ERROR(dev, "failed to load %s\n", fwname);
630 return ret;
631 }
632 }
633
634 fuc->size = fw->size;
635 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
636 release_firmware(fw);
637 return (fuc->data != NULL) ? 0 : -ENOMEM;
638}
639
640static void
641nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
642{
643 if (fuc->data) {
644 kfree(fuc->data);
645 fuc->data = NULL;
646 }
647}
648
649static void
650nvc0_graph_destroy(struct drm_device *dev, int engine)
651{
652 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
653
654 nvc0_graph_destroy_fw(&priv->fuc409c);
655 nvc0_graph_destroy_fw(&priv->fuc409d);
656 nvc0_graph_destroy_fw(&priv->fuc41ac);
657 nvc0_graph_destroy_fw(&priv->fuc41ad);
658
659 nouveau_irq_unregister(dev, 12);
660 nouveau_irq_unregister(dev, 25);
661
662 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
663 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
664
665 if (priv->grctx_vals)
666 kfree(priv->grctx_vals);
667
668 NVOBJ_ENGINE_DEL(dev, GR);
669 kfree(priv);
670}
671
672int
673nvc0_graph_create(struct drm_device *dev)
674{
675 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nvc0_graph_priv *priv;
677 int ret, gpc, i;
678
679 switch (dev_priv->chipset) {
680 case 0xc0:
681 case 0xc3:
682 case 0xc4:
683 break;
684 default:
685 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
686 return 0;
687 }
688
689 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
690 if (!priv)
691 return -ENOMEM;
692
693 priv->base.destroy = nvc0_graph_destroy;
694 priv->base.init = nvc0_graph_init;
695 priv->base.fini = nvc0_graph_fini;
696 priv->base.context_new = nvc0_graph_context_new;
697 priv->base.context_del = nvc0_graph_context_del;
698 priv->base.object_new = nvc0_graph_object_new;
699
700 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
701 nouveau_irq_register(dev, 12, nvc0_graph_isr);
702 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
703
704 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
705 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
706 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
707 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
708 ret = 0;
709 goto error;
710 }
711
712
713 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
714 if (ret)
715 goto error;
716
717 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
718 if (ret)
719 goto error;
720
721 for (i = 0; i < 0x1000; i += 4) {
722 nv_wo32(priv->unk4188b4, i, 0x00000010);
723 nv_wo32(priv->unk4188b8, i, 0x00000010);
724 }
725
726 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
727 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
728 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
729 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
730 priv->tp_total += priv->tp_nr[gpc];
731 }
732
733 /*XXX: these need figuring out... */
734 switch (dev_priv->chipset) {
735 case 0xc0:
736 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
737 priv->magic_not_rop_nr = 0x07;
738 /* filled values up to tp_total, the rest 0 */
739 priv->magicgpc918 = 0x000ba2e9;
740 } else
741 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
742 priv->magic_not_rop_nr = 0x05;
743 priv->magicgpc918 = 0x00092493;
744 } else
745 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
746 priv->magic_not_rop_nr = 0x06;
747 priv->magicgpc918 = 0x00088889;
748 }
749 break;
750 case 0xc3: /* 450, 4/0/0/0, 2 */
751 priv->magic_not_rop_nr = 0x03;
752 priv->magicgpc918 = 0x00200000;
753 break;
754 case 0xc4: /* 460, 3/4/0/0, 4 */
755 priv->magic_not_rop_nr = 0x01;
756 priv->magicgpc918 = 0x00124925;
757 break;
758 }
759
760 if (!priv->magic_not_rop_nr) {
761 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
762 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
763 priv->tp_nr[3], priv->rop_nr);
764 /* use 0xc3's values... */
765 priv->magic_not_rop_nr = 0x03;
766 priv->magicgpc918 = 0x00200000;
767 }
768
769 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
770 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
771 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
772 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
773 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
774 return 0;
775
776error:
777 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
778 return ret;
779}
780
781MODULE_FIRMWARE("nouveau/nvc0_fuc409c");
782MODULE_FIRMWARE("nouveau/nvc0_fuc409d");
783MODULE_FIRMWARE("nouveau/nvc0_fuc41ac");
784MODULE_FIRMWARE("nouveau/nvc0_fuc41ad");
785MODULE_FIRMWARE("nouveau/nvc3_fuc409c");
786MODULE_FIRMWARE("nouveau/nvc3_fuc409d");
787MODULE_FIRMWARE("nouveau/nvc3_fuc41ac");
788MODULE_FIRMWARE("nouveau/nvc3_fuc41ad");
789MODULE_FIRMWARE("nouveau/nvc4_fuc409c");
790MODULE_FIRMWARE("nouveau/nvc4_fuc409d");
791MODULE_FIRMWARE("nouveau/nvc4_fuc41ac");
792MODULE_FIRMWARE("nouveau/nvc4_fuc41ad");
793MODULE_FIRMWARE("nouveau/fuc409c");
794MODULE_FIRMWARE("nouveau/fuc409d");
795MODULE_FIRMWARE("nouveau/fuc41ac");
796MODULE_FIRMWARE("nouveau/fuc41ad");
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 40e26f9c56c4..f5d184e0689d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -28,13 +28,25 @@
28#define GPC_MAX 4 28#define GPC_MAX 4
29#define TP_MAX 32 29#define TP_MAX 32
30 30
31#define ROP_BCAST(r) (0x408800 + (r)) 31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r)) 32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r)) 33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r)) 34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) 35#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nvc0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
36 41
37struct nvc0_graph_priv { 42struct nvc0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nvc0_graph_fuc fuc409c;
46 struct nvc0_graph_fuc fuc409d;
47 struct nvc0_graph_fuc fuc41ac;
48 struct nvc0_graph_fuc fuc41ad;
49
38 u8 gpc_nr; 50 u8 gpc_nr;
39 u8 rop_nr; 51 u8 rop_nr;
40 u8 tp_nr[GPC_MAX]; 52 u8 tp_nr[GPC_MAX];
@@ -46,15 +58,14 @@ struct nvc0_graph_priv {
46 struct nouveau_gpuobj *unk4188b8; 58 struct nouveau_gpuobj *unk4188b8;
47 59
48 u8 magic_not_rop_nr; 60 u8 magic_not_rop_nr;
49 u32 magicgpc980[4];
50 u32 magicgpc918; 61 u32 magicgpc918;
51}; 62};
52 63
53struct nvc0_graph_chan { 64struct nvc0_graph_chan {
54 struct nouveau_gpuobj *grctx; 65 struct nouveau_gpuobj *grctx;
55 struct nouveau_gpuobj *unk408004; // 0x418810 too 66 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
56 struct nouveau_gpuobj *unk40800c; // 0x419004 too 67 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
57 struct nouveau_gpuobj *unk418810; // 0x419848 too 68 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
58 struct nouveau_gpuobj *mmio; 69 struct nouveau_gpuobj *mmio;
59 int mmio_nr; 70 int mmio_nr;
60}; 71};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index f880ff776db8..6df066114133 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1623,7 +1623,7 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
1623{ 1623{
1624 struct drm_nouveau_private *dev_priv = dev->dev_private; 1624 struct drm_nouveau_private *dev_priv = dev->dev_private;
1625 1625
1626 // ROPC_BROADCAST 1626 /* ROPC_BROADCAST */
1627 nv_wr32(dev, 0x408800, 0x02802a3c); 1627 nv_wr32(dev, 0x408800, 0x02802a3c);
1628 nv_wr32(dev, 0x408804, 0x00000040); 1628 nv_wr32(dev, 0x408804, 0x00000040);
1629 nv_wr32(dev, 0x408808, 0x0003e00d); 1629 nv_wr32(dev, 0x408808, 0x0003e00d);
@@ -1647,7 +1647,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1647{ 1647{
1648 int i; 1648 int i;
1649 1649
1650 // GPC_BROADCAST 1650 /* GPC_BROADCAST */
1651 nv_wr32(dev, 0x418380, 0x00000016); 1651 nv_wr32(dev, 0x418380, 0x00000016);
1652 nv_wr32(dev, 0x418400, 0x38004e00); 1652 nv_wr32(dev, 0x418400, 0x38004e00);
1653 nv_wr32(dev, 0x418404, 0x71e0ffff); 1653 nv_wr32(dev, 0x418404, 0x71e0ffff);
@@ -1728,7 +1728,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1728{ 1728{
1729 struct drm_nouveau_private *dev_priv = dev->dev_private; 1729 struct drm_nouveau_private *dev_priv = dev->dev_private;
1730 1730
1731 // GPC_BROADCAST.TP_BROADCAST 1731 /* GPC_BROADCAST.TP_BROADCAST */
1732 nv_wr32(dev, 0x419848, 0x00000000); 1732 nv_wr32(dev, 0x419848, 0x00000000);
1733 nv_wr32(dev, 0x419864, 0x0000012a); 1733 nv_wr32(dev, 0x419864, 0x0000012a);
1734 nv_wr32(dev, 0x419888, 0x00000000); 1734 nv_wr32(dev, 0x419888, 0x00000000);
@@ -1741,7 +1741,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1741 nv_wr32(dev, 0x419a1c, 0x00000000); 1741 nv_wr32(dev, 0x419a1c, 0x00000000);
1742 nv_wr32(dev, 0x419a20, 0x00000800); 1742 nv_wr32(dev, 0x419a20, 0x00000800);
1743 if (dev_priv->chipset != 0xc0) 1743 if (dev_priv->chipset != 0xc0)
1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3 1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */
1745 nv_wr32(dev, 0x419b00, 0x0a418820); 1745 nv_wr32(dev, 0x419b00, 0x0a418820);
1746 nv_wr32(dev, 0x419b04, 0x062080e6); 1746 nv_wr32(dev, 0x419b04, 0x062080e6);
1747 nv_wr32(dev, 0x419b08, 0x020398a4); 1747 nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1797,8 +1797,8 @@ int
1797nvc0_grctx_generate(struct nouveau_channel *chan) 1797nvc0_grctx_generate(struct nouveau_channel *chan)
1798{ 1798{
1799 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 1799 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1800 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 1800 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
1801 struct nvc0_graph_chan *grch = chan->pgraph_ctx; 1801 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
1802 struct drm_device *dev = chan->dev; 1802 struct drm_device *dev = chan->dev;
1803 int i, gpc, tp, id; 1803 int i, gpc, tp, id;
1804 u32 r000260, tmp; 1804 u32 r000260, tmp;
@@ -1912,13 +1912,13 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1912 for (i = 1; i < 7; i++) 1912 for (i = 1; i < 7; i++)
1913 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 1913 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
1914 1914
1915 // GPC_BROADCAST 1915 /* GPC_BROADCAST */
1916 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | 1916 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
1917 priv->magic_not_rop_nr); 1917 priv->magic_not_rop_nr);
1918 for (i = 0; i < 6; i++) 1918 for (i = 0; i < 6; i++)
1919 nv_wr32(dev, 0x418b08 + (i * 4), data[i]); 1919 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
1920 1920
1921 // GPC_BROADCAST.TP_BROADCAST 1921 /* GPC_BROADCAST.TP_BROADCAST */
1922 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | 1922 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
1923 priv->magic_not_rop_nr | 1923 priv->magic_not_rop_nr |
1924 data2[0]); 1924 data2[0]);
@@ -1926,7 +1926,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1926 for (i = 0; i < 6; i++) 1926 for (i = 0; i < 6; i++)
1927 nv_wr32(dev, 0x419b00 + (i * 4), data[i]); 1927 nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
1928 1928
1929 // UNK78xx 1929 /* UNK78xx */
1930 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) | 1930 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
1931 priv->magic_not_rop_nr); 1931 priv->magic_not_rop_nr);
1932 for (i = 0; i < 6; i++) 1932 for (i = 0; i < 6; i++)
@@ -1944,7 +1944,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1944 gpc = -1; 1944 gpc = -1;
1945 for (i = 0, gpc = -1; i < 32; i++) { 1945 for (i = 0, gpc = -1; i < 32; i++) {
1946 int ltp = i * (priv->tp_total - 1) / 32; 1946 int ltp = i * (priv->tp_total - 1) / 32;
1947 1947
1948 do { 1948 do {
1949 gpc = (gpc + 1) % priv->gpc_nr; 1949 gpc = (gpc + 1) % priv->gpc_nr;
1950 } while (!tpnr[gpc]); 1950 } while (!tpnr[gpc]);
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7bd745689097..ebdb0fdb8348 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -652,12 +652,12 @@ static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
652 652
653static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 653static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
654{ 654{
655 uint8_t count = U8((*ptr)++); 655 unsigned count = U8((*ptr)++);
656 SDEBUG(" count: %d\n", count); 656 SDEBUG(" count: %d\n", count);
657 if (arg == ATOM_UNIT_MICROSEC) 657 if (arg == ATOM_UNIT_MICROSEC)
658 udelay(count); 658 udelay(count);
659 else 659 else
660 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 660 msleep(count);
661} 661}
662 662
663static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) 663static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 7fd88497b930..49611e2365d9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -726,6 +726,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
726#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d 726#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
727#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e 727#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
728#define ATOM_ENCODER_CMD_SETUP 0x0f 728#define ATOM_ENCODER_CMD_SETUP 0x0f
729#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10
729 730
730// ucStatus 731// ucStatus
731#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 732#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
@@ -765,13 +766,19 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
765 USHORT usPixelClock; // in 10KHz; for bios convenient 766 USHORT usPixelClock; // in 10KHz; for bios convenient
766 ATOM_DIG_ENCODER_CONFIG_V3 acConfig; 767 ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
767 UCHAR ucAction; 768 UCHAR ucAction;
768 UCHAR ucEncoderMode; 769 union {
770 UCHAR ucEncoderMode;
769 // =0: DP encoder 771 // =0: DP encoder
770 // =1: LVDS encoder 772 // =1: LVDS encoder
771 // =2: DVI encoder 773 // =2: DVI encoder
772 // =3: HDMI encoder 774 // =3: HDMI encoder
773 // =4: SDVO encoder 775 // =4: SDVO encoder
774 // =5: DP audio 776 // =5: DP audio
777 UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
778 // =0: external DP
779 // =1: internal DP2
780 // =0x11: internal DP1 for NutMeg/Travis DP translator
781 };
775 UCHAR ucLaneNum; // how many lanes to enable 782 UCHAR ucLaneNum; // how many lanes to enable
776 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP 783 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
777 UCHAR ucReserved; 784 UCHAR ucReserved;
@@ -816,13 +823,19 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
816 UCHAR ucConfig; 823 UCHAR ucConfig;
817 }; 824 };
818 UCHAR ucAction; 825 UCHAR ucAction;
819 UCHAR ucEncoderMode; 826 union {
827 UCHAR ucEncoderMode;
820 // =0: DP encoder 828 // =0: DP encoder
821 // =1: LVDS encoder 829 // =1: LVDS encoder
822 // =2: DVI encoder 830 // =2: DVI encoder
823 // =3: HDMI encoder 831 // =3: HDMI encoder
824 // =4: SDVO encoder 832 // =4: SDVO encoder
825 // =5: DP audio 833 // =5: DP audio
834 UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
835 // =0: external DP
836 // =1: internal DP2
837 // =0x11: internal DP1 for NutMeg/Travis DP translator
838 };
826 UCHAR ucLaneNum; // how many lanes to enable 839 UCHAR ucLaneNum; // how many lanes to enable
827 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP 840 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
828 UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version 841 UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
@@ -836,6 +849,11 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
836#define PANEL_12BIT_PER_COLOR 0x04 849#define PANEL_12BIT_PER_COLOR 0x04
837#define PANEL_16BIT_PER_COLOR 0x05 850#define PANEL_16BIT_PER_COLOR 0x05
838 851
852//define ucPanelMode
853#define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00
854#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01
855#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11
856
839/****************************************************************************/ 857/****************************************************************************/
840// Structures used by UNIPHYTransmitterControlTable 858// Structures used by UNIPHYTransmitterControlTable
841// LVTMATransmitterControlTable 859// LVTMATransmitterControlTable
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 529a3a704731..ec848787d7d9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -420,7 +420,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
420 420
421 if (ASIC_IS_DCE5(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
423 args.v3.ucSpreadSpectrumType = ss->type; 423 args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
424 switch (pll_id) { 424 switch (pll_id) {
425 case ATOM_PPLL1: 425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
@@ -440,10 +440,12 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
440 case ATOM_PPLL_INVALID: 440 case ATOM_PPLL_INVALID:
441 return; 441 return;
442 } 442 }
443 args.v2.ucEnable = enable; 443 args.v3.ucEnable = enable;
444 if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
445 args.v3.ucEnable = ATOM_DISABLE;
444 } else if (ASIC_IS_DCE4(rdev)) { 446 } else if (ASIC_IS_DCE4(rdev)) {
445 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 447 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
446 args.v2.ucSpreadSpectrumType = ss->type; 448 args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
447 switch (pll_id) { 449 switch (pll_id) {
448 case ATOM_PPLL1: 450 case ATOM_PPLL1:
449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 451 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
@@ -464,32 +466,36 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
464 return; 466 return;
465 } 467 }
466 args.v2.ucEnable = enable; 468 args.v2.ucEnable = enable;
469 if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
470 args.v2.ucEnable = ATOM_DISABLE;
467 } else if (ASIC_IS_DCE3(rdev)) { 471 } else if (ASIC_IS_DCE3(rdev)) {
468 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 472 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
469 args.v1.ucSpreadSpectrumType = ss->type; 473 args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
470 args.v1.ucSpreadSpectrumStep = ss->step; 474 args.v1.ucSpreadSpectrumStep = ss->step;
471 args.v1.ucSpreadSpectrumDelay = ss->delay; 475 args.v1.ucSpreadSpectrumDelay = ss->delay;
472 args.v1.ucSpreadSpectrumRange = ss->range; 476 args.v1.ucSpreadSpectrumRange = ss->range;
473 args.v1.ucPpll = pll_id; 477 args.v1.ucPpll = pll_id;
474 args.v1.ucEnable = enable; 478 args.v1.ucEnable = enable;
475 } else if (ASIC_IS_AVIVO(rdev)) { 479 } else if (ASIC_IS_AVIVO(rdev)) {
476 if (enable == ATOM_DISABLE) { 480 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
481 (ss->type & ATOM_EXTERNAL_SS_MASK)) {
477 atombios_disable_ss(crtc); 482 atombios_disable_ss(crtc);
478 return; 483 return;
479 } 484 }
480 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 485 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
481 args.lvds_ss_2.ucSpreadSpectrumType = ss->type; 486 args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
482 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; 487 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
483 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; 488 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
484 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; 489 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
485 args.lvds_ss_2.ucEnable = enable; 490 args.lvds_ss_2.ucEnable = enable;
486 } else { 491 } else {
487 if (enable == ATOM_DISABLE) { 492 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
493 (ss->type & ATOM_EXTERNAL_SS_MASK)) {
488 atombios_disable_ss(crtc); 494 atombios_disable_ss(crtc);
489 return; 495 return;
490 } 496 }
491 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 497 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
492 args.lvds_ss.ucSpreadSpectrumType = ss->type; 498 args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
493 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; 499 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
494 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; 500 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
495 args.lvds_ss.ucEnable = enable; 501 args.lvds_ss.ucEnable = enable;
@@ -512,6 +518,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
512 struct radeon_device *rdev = dev->dev_private; 518 struct radeon_device *rdev = dev->dev_private;
513 struct drm_encoder *encoder = NULL; 519 struct drm_encoder *encoder = NULL;
514 struct radeon_encoder *radeon_encoder = NULL; 520 struct radeon_encoder *radeon_encoder = NULL;
521 struct drm_connector *connector = NULL;
515 u32 adjusted_clock = mode->clock; 522 u32 adjusted_clock = mode->clock;
516 int encoder_mode = 0; 523 int encoder_mode = 0;
517 u32 dp_clock = mode->clock; 524 u32 dp_clock = mode->clock;
@@ -546,9 +553,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
546 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
547 if (encoder->crtc == crtc) { 554 if (encoder->crtc == crtc) {
548 radeon_encoder = to_radeon_encoder(encoder); 555 radeon_encoder = to_radeon_encoder(encoder);
556 connector = radeon_get_connector_for_encoder(encoder);
557 if (connector)
558 bpc = connector->display_info.bpc;
549 encoder_mode = atombios_get_encoder_mode(encoder); 559 encoder_mode = atombios_get_encoder_mode(encoder);
550 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { 560 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
551 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 561 radeon_encoder_is_dp_bridge(encoder)) {
552 if (connector) { 562 if (connector) {
553 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 563 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
554 struct radeon_connector_atom_dig *dig_connector = 564 struct radeon_connector_atom_dig *dig_connector =
@@ -612,7 +622,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
612 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 622 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
613 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 623 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
614 args.v1.ucEncodeMode = encoder_mode; 624 args.v1.ucEncodeMode = encoder_mode;
615 if (ss_enabled) 625 if (ss_enabled && ss->percentage)
616 args.v1.ucConfig |= 626 args.v1.ucConfig |=
617 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 627 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
618 628
@@ -625,10 +635,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
625 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 635 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
626 args.v3.sInput.ucEncodeMode = encoder_mode; 636 args.v3.sInput.ucEncodeMode = encoder_mode;
627 args.v3.sInput.ucDispPllConfig = 0; 637 args.v3.sInput.ucDispPllConfig = 0;
628 if (ss_enabled) 638 if (ss_enabled && ss->percentage)
629 args.v3.sInput.ucDispPllConfig |= 639 args.v3.sInput.ucDispPllConfig |=
630 DISPPLL_CONFIG_SS_ENABLE; 640 DISPPLL_CONFIG_SS_ENABLE;
631 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 641 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
642 radeon_encoder_is_dp_bridge(encoder)) {
632 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 643 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
633 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 644 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
634 args.v3.sInput.ucDispPllConfig |= 645 args.v3.sInput.ucDispPllConfig |=
@@ -754,7 +765,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
754 u32 ref_div, 765 u32 ref_div,
755 u32 fb_div, 766 u32 fb_div,
756 u32 frac_fb_div, 767 u32 frac_fb_div,
757 u32 post_div) 768 u32 post_div,
769 int bpc,
770 bool ss_enabled,
771 struct radeon_atom_ss *ss)
758{ 772{
759 struct drm_device *dev = crtc->dev; 773 struct drm_device *dev = crtc->dev;
760 struct radeon_device *rdev = dev->dev_private; 774 struct radeon_device *rdev = dev->dev_private;
@@ -801,6 +815,8 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
801 args.v3.ucPostDiv = post_div; 815 args.v3.ucPostDiv = post_div;
802 args.v3.ucPpll = pll_id; 816 args.v3.ucPpll = pll_id;
803 args.v3.ucMiscInfo = (pll_id << 2); 817 args.v3.ucMiscInfo = (pll_id << 2);
818 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
819 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
804 args.v3.ucTransmitterId = encoder_id; 820 args.v3.ucTransmitterId = encoder_id;
805 args.v3.ucEncoderMode = encoder_mode; 821 args.v3.ucEncoderMode = encoder_mode;
806 break; 822 break;
@@ -812,6 +828,17 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
812 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 828 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
813 args.v5.ucPostDiv = post_div; 829 args.v5.ucPostDiv = post_div;
814 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 830 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
831 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
832 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
833 switch (bpc) {
834 case 8:
835 default:
836 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
837 break;
838 case 10:
839 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
840 break;
841 }
815 args.v5.ucTransmitterID = encoder_id; 842 args.v5.ucTransmitterID = encoder_id;
816 args.v5.ucEncoderMode = encoder_mode; 843 args.v5.ucEncoderMode = encoder_mode;
817 args.v5.ucPpll = pll_id; 844 args.v5.ucPpll = pll_id;
@@ -824,6 +851,23 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
824 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 851 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
825 args.v6.ucPostDiv = post_div; 852 args.v6.ucPostDiv = post_div;
826 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ 853 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
854 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
855 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
856 switch (bpc) {
857 case 8:
858 default:
859 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
860 break;
861 case 10:
862 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
863 break;
864 case 12:
865 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
866 break;
867 case 16:
868 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
869 break;
870 }
827 args.v6.ucTransmitterID = encoder_id; 871 args.v6.ucTransmitterID = encoder_id;
828 args.v6.ucEncoderMode = encoder_mode; 872 args.v6.ucEncoderMode = encoder_mode;
829 args.v6.ucPpll = pll_id; 873 args.v6.ucPpll = pll_id;
@@ -855,6 +899,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
855 int encoder_mode = 0; 899 int encoder_mode = 0;
856 struct radeon_atom_ss ss; 900 struct radeon_atom_ss ss;
857 bool ss_enabled = false; 901 bool ss_enabled = false;
902 int bpc = 8;
858 903
859 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 904 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
860 if (encoder->crtc == crtc) { 905 if (encoder->crtc == crtc) {
@@ -891,41 +936,30 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
891 struct radeon_connector_atom_dig *dig_connector = 936 struct radeon_connector_atom_dig *dig_connector =
892 radeon_connector->con_priv; 937 radeon_connector->con_priv;
893 int dp_clock; 938 int dp_clock;
939 bpc = connector->display_info.bpc;
894 940
895 switch (encoder_mode) { 941 switch (encoder_mode) {
896 case ATOM_ENCODER_MODE_DP: 942 case ATOM_ENCODER_MODE_DP:
897 /* DP/eDP */ 943 /* DP/eDP */
898 dp_clock = dig_connector->dp_clock / 10; 944 dp_clock = dig_connector->dp_clock / 10;
899 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 945 if (ASIC_IS_DCE4(rdev))
900 if (ASIC_IS_DCE4(rdev)) 946 ss_enabled =
901 ss_enabled = 947 radeon_atombios_get_asic_ss_info(rdev, &ss,
902 radeon_atombios_get_asic_ss_info(rdev, &ss, 948 ASIC_INTERNAL_SS_ON_DP,
903 dig->lcd_ss_id, 949 dp_clock);
904 dp_clock); 950 else {
905 else 951 if (dp_clock == 16200) {
906 ss_enabled = 952 ss_enabled =
907 radeon_atombios_get_ppll_ss_info(rdev, &ss, 953 radeon_atombios_get_ppll_ss_info(rdev, &ss,
908 dig->lcd_ss_id); 954 ATOM_DP_SS_ID2);
909 } else { 955 if (!ss_enabled)
910 if (ASIC_IS_DCE4(rdev))
911 ss_enabled =
912 radeon_atombios_get_asic_ss_info(rdev, &ss,
913 ASIC_INTERNAL_SS_ON_DP,
914 dp_clock);
915 else {
916 if (dp_clock == 16200) {
917 ss_enabled =
918 radeon_atombios_get_ppll_ss_info(rdev, &ss,
919 ATOM_DP_SS_ID2);
920 if (!ss_enabled)
921 ss_enabled =
922 radeon_atombios_get_ppll_ss_info(rdev, &ss,
923 ATOM_DP_SS_ID1);
924 } else
925 ss_enabled = 956 ss_enabled =
926 radeon_atombios_get_ppll_ss_info(rdev, &ss, 957 radeon_atombios_get_ppll_ss_info(rdev, &ss,
927 ATOM_DP_SS_ID1); 958 ATOM_DP_SS_ID1);
928 } 959 } else
960 ss_enabled =
961 radeon_atombios_get_ppll_ss_info(rdev, &ss,
962 ATOM_DP_SS_ID1);
929 } 963 }
930 break; 964 break;
931 case ATOM_ENCODER_MODE_LVDS: 965 case ATOM_ENCODER_MODE_LVDS:
@@ -974,7 +1008,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
974 1008
975 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1009 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
976 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1010 encoder_mode, radeon_encoder->encoder_id, mode->clock,
977 ref_div, fb_div, frac_fb_div, post_div); 1011 ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
978 1012
979 if (ss_enabled) { 1013 if (ss_enabled) {
980 /* calculate ss amount and step size */ 1014 /* calculate ss amount and step size */
@@ -982,7 +1016,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
982 u32 step_size; 1016 u32 step_size;
983 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; 1017 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
984 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; 1018 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
985 ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 1019 ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
986 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; 1020 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
987 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) 1021 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
988 step_size = (4 * amount * ref_div * (ss.rate * 2048)) / 1022 step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
@@ -1395,11 +1429,19 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1395 uint32_t pll_in_use = 0; 1429 uint32_t pll_in_use = 0;
1396 1430
1397 if (ASIC_IS_DCE4(rdev)) { 1431 if (ASIC_IS_DCE4(rdev)) {
1398 /* if crtc is driving DP and we have an ext clock, use that */
1399 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1432 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1400 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1433 if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
1434 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1435 * depending on the asic:
1436 * DCE4: PPLL or ext clock
1437 * DCE5: DCPLL or ext clock
1438 *
1439 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
1440 * PPLL/DCPLL programming and only program the DP DTO for the
1441 * crtc virtual pixel clock.
1442 */
1401 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { 1443 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
1402 if (rdev->clock.dp_extclk) 1444 if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
1403 return ATOM_PPLL_INVALID; 1445 return ATOM_PPLL_INVALID;
1404 } 1446 }
1405 } 1447 }
@@ -1515,6 +1557,8 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
1515static void atombios_crtc_disable(struct drm_crtc *crtc) 1557static void atombios_crtc_disable(struct drm_crtc *crtc)
1516{ 1558{
1517 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1559 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1560 struct radeon_atom_ss ss;
1561
1518 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1562 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1519 1563
1520 switch (radeon_crtc->pll_id) { 1564 switch (radeon_crtc->pll_id) {
@@ -1522,7 +1566,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1522 case ATOM_PPLL2: 1566 case ATOM_PPLL2:
1523 /* disable the ppll */ 1567 /* disable the ppll */
1524 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1568 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1525 0, 0, ATOM_DISABLE, 0, 0, 0, 0); 1569 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
1526 break; 1570 break;
1527 default: 1571 default:
1528 break; 1572 break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 695de9a38506..8c0f9e36ff8e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -43,158 +43,242 @@ static char *pre_emph_names[] = {
43 "0dB", "3.5dB", "6dB", "9.5dB" 43 "0dB", "3.5dB", "6dB", "9.5dB"
44}; 44};
45 45
46static const int dp_clocks[] = { 46/***** radeon AUX functions *****/
47 54000, /* 1 lane, 1.62 Ghz */ 47union aux_channel_transaction {
48 90000, /* 1 lane, 2.70 Ghz */ 48 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
49 108000, /* 2 lane, 1.62 Ghz */ 49 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
50 180000, /* 2 lane, 2.70 Ghz */
51 216000, /* 4 lane, 1.62 Ghz */
52 360000, /* 4 lane, 2.70 Ghz */
53}; 50};
54 51
55static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); 52static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
53 u8 *send, int send_bytes,
54 u8 *recv, int recv_size,
55 u8 delay, u8 *ack)
56{
57 struct drm_device *dev = chan->dev;
58 struct radeon_device *rdev = dev->dev_private;
59 union aux_channel_transaction args;
60 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
61 unsigned char *base;
62 int recv_bytes;
63
64 memset(&args, 0, sizeof(args));
56 65
57/* common helper functions */ 66 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
58static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 67
68 memcpy(base, send, send_bytes);
69
70 args.v1.lpAuxRequest = 0;
71 args.v1.lpDataOut = 16;
72 args.v1.ucDataOutLen = 0;
73 args.v1.ucChannelID = chan->rec.i2c_id;
74 args.v1.ucDelay = delay / 10;
75 if (ASIC_IS_DCE4(rdev))
76 args.v2.ucHPD_ID = chan->rec.hpd;
77
78 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
79
80 *ack = args.v1.ucReplyStatus;
81
82 /* timeout */
83 if (args.v1.ucReplyStatus == 1) {
84 DRM_DEBUG_KMS("dp_aux_ch timeout\n");
85 return -ETIMEDOUT;
86 }
87
88 /* flags not zero */
89 if (args.v1.ucReplyStatus == 2) {
90 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
91 return -EBUSY;
92 }
93
94 /* error */
95 if (args.v1.ucReplyStatus == 3) {
96 DRM_DEBUG_KMS("dp_aux_ch error\n");
97 return -EIO;
98 }
99
100 recv_bytes = args.v1.ucDataOutLen;
101 if (recv_bytes > recv_size)
102 recv_bytes = recv_size;
103
104 if (recv && recv_size)
105 memcpy(recv, base + 16, recv_bytes);
106
107 return recv_bytes;
108}
109
110static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
111 u16 address, u8 *send, u8 send_bytes, u8 delay)
59{ 112{
60 int i; 113 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
61 u8 max_link_bw; 114 int ret;
62 u8 max_lane_count; 115 u8 msg[20];
116 int msg_bytes = send_bytes + 4;
117 u8 ack;
63 118
64 if (!dpcd) 119 if (send_bytes > 16)
65 return 0; 120 return -1;
66 121
67 max_link_bw = dpcd[DP_MAX_LINK_RATE]; 122 msg[0] = address;
68 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 123 msg[1] = address >> 8;
124 msg[2] = AUX_NATIVE_WRITE << 4;
125 msg[3] = (msg_bytes << 4) | (send_bytes - 1);
126 memcpy(&msg[4], send, send_bytes);
69 127
70 switch (max_link_bw) { 128 while (1) {
71 case DP_LINK_BW_1_62: 129 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
72 default: 130 msg, msg_bytes, NULL, 0, delay, &ack);
73 for (i = 0; i < num_dp_clocks; i++) { 131 if (ret < 0)
74 if (i % 2) 132 return ret;
75 continue; 133 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
76 switch (max_lane_count) { 134 break;
77 case 1: 135 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
78 if (i > 1) 136 udelay(400);
79 return 0; 137 else
80 break; 138 return -EIO;
81 case 2:
82 if (i > 3)
83 return 0;
84 break;
85 case 4:
86 default:
87 break;
88 }
89 if (dp_clocks[i] > mode_clock) {
90 if (i < 2)
91 return 1;
92 else if (i < 4)
93 return 2;
94 else
95 return 4;
96 }
97 }
98 break;
99 case DP_LINK_BW_2_7:
100 for (i = 0; i < num_dp_clocks; i++) {
101 switch (max_lane_count) {
102 case 1:
103 if (i > 1)
104 return 0;
105 break;
106 case 2:
107 if (i > 3)
108 return 0;
109 break;
110 case 4:
111 default:
112 break;
113 }
114 if (dp_clocks[i] > mode_clock) {
115 if (i < 2)
116 return 1;
117 else if (i < 4)
118 return 2;
119 else
120 return 4;
121 }
122 }
123 break;
124 } 139 }
125 140
126 return 0; 141 return send_bytes;
127} 142}
128 143
129static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 144static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
145 u16 address, u8 *recv, int recv_bytes, u8 delay)
130{ 146{
131 int i; 147 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
132 u8 max_link_bw; 148 u8 msg[4];
133 u8 max_lane_count; 149 int msg_bytes = 4;
150 u8 ack;
151 int ret;
134 152
135 if (!dpcd) 153 msg[0] = address;
136 return 0; 154 msg[1] = address >> 8;
155 msg[2] = AUX_NATIVE_READ << 4;
156 msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
157
158 while (1) {
159 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
160 msg, msg_bytes, recv, recv_bytes, delay, &ack);
161 if (ret == 0)
162 return -EPROTO;
163 if (ret < 0)
164 return ret;
165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
166 return ret;
167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
168 udelay(400);
169 else
170 return -EIO;
171 }
172}
137 173
138 max_link_bw = dpcd[DP_MAX_LINK_RATE]; 174static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
139 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 175 u16 reg, u8 val)
176{
177 radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
178}
140 179
141 switch (max_link_bw) { 180static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
142 case DP_LINK_BW_1_62: 181 u16 reg)
182{
183 u8 val = 0;
184
185 radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
186
187 return val;
188}
189
190int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
191 u8 write_byte, u8 *read_byte)
192{
193 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
194 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
195 u16 address = algo_data->address;
196 u8 msg[5];
197 u8 reply[2];
198 unsigned retry;
199 int msg_bytes;
200 int reply_bytes = 1;
201 int ret;
202 u8 ack;
203
204 /* Set up the command byte */
205 if (mode & MODE_I2C_READ)
206 msg[2] = AUX_I2C_READ << 4;
207 else
208 msg[2] = AUX_I2C_WRITE << 4;
209
210 if (!(mode & MODE_I2C_STOP))
211 msg[2] |= AUX_I2C_MOT << 4;
212
213 msg[0] = address;
214 msg[1] = address >> 8;
215
216 switch (mode) {
217 case MODE_I2C_WRITE:
218 msg_bytes = 5;
219 msg[3] = msg_bytes << 4;
220 msg[4] = write_byte;
221 break;
222 case MODE_I2C_READ:
223 msg_bytes = 4;
224 msg[3] = msg_bytes << 4;
225 break;
143 default: 226 default:
144 for (i = 0; i < num_dp_clocks; i++) { 227 msg_bytes = 4;
145 if (i % 2) 228 msg[3] = 3 << 4;
146 continue;
147 switch (max_lane_count) {
148 case 1:
149 if (i > 1)
150 return 0;
151 break;
152 case 2:
153 if (i > 3)
154 return 0;
155 break;
156 case 4:
157 default:
158 break;
159 }
160 if (dp_clocks[i] > mode_clock)
161 return 162000;
162 }
163 break; 229 break;
164 case DP_LINK_BW_2_7:
165 for (i = 0; i < num_dp_clocks; i++) {
166 switch (max_lane_count) {
167 case 1:
168 if (i > 1)
169 return 0;
170 break;
171 case 2:
172 if (i > 3)
173 return 0;
174 break;
175 case 4:
176 default:
177 break;
178 }
179 if (dp_clocks[i] > mode_clock)
180 return (i % 2) ? 270000 : 162000;
181 }
182 } 230 }
183 231
184 return 0; 232 for (retry = 0; retry < 4; retry++) {
185} 233 ret = radeon_process_aux_ch(auxch,
234 msg, msg_bytes, reply, reply_bytes, 0, &ack);
235 if (ret < 0) {
236 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
237 return ret;
238 }
186 239
187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 240 switch (ack & AUX_NATIVE_REPLY_MASK) {
188{ 241 case AUX_NATIVE_REPLY_ACK:
189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); 242 /* I2C-over-AUX Reply field is only valid
190 int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock); 243 * when paired with AUX ACK.
244 */
245 break;
246 case AUX_NATIVE_REPLY_NACK:
247 DRM_DEBUG_KMS("aux_ch native nack\n");
248 return -EREMOTEIO;
249 case AUX_NATIVE_REPLY_DEFER:
250 DRM_DEBUG_KMS("aux_ch native defer\n");
251 udelay(400);
252 continue;
253 default:
254 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
255 return -EREMOTEIO;
256 }
191 257
192 if ((lanes == 0) || (dp_clock == 0)) 258 switch (ack & AUX_I2C_REPLY_MASK) {
193 return MODE_CLOCK_HIGH; 259 case AUX_I2C_REPLY_ACK:
260 if (mode == MODE_I2C_READ)
261 *read_byte = reply[0];
262 return ret;
263 case AUX_I2C_REPLY_NACK:
264 DRM_DEBUG_KMS("aux_i2c nack\n");
265 return -EREMOTEIO;
266 case AUX_I2C_REPLY_DEFER:
267 DRM_DEBUG_KMS("aux_i2c defer\n");
268 udelay(400);
269 break;
270 default:
271 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
272 return -EREMOTEIO;
273 }
274 }
194 275
195 return MODE_OK; 276 DRM_ERROR("aux i2c too many retries, giving up\n");
277 return -EREMOTEIO;
196} 278}
197 279
280/***** general DP utility functions *****/
281
198static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) 282static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
199{ 283{
200 return link_status[r - DP_LANE0_1_STATUS]; 284 return link_status[r - DP_LANE0_1_STATUS];
@@ -242,7 +326,7 @@ static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
242 return true; 326 return true;
243} 327}
244 328
245static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 329static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane) 330 int lane)
247 331
248{ 332{
@@ -255,7 +339,7 @@ static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE]
255 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 339 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
256} 340}
257 341
258static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 342static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
259 int lane) 343 int lane)
260{ 344{
261 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 345 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -267,22 +351,8 @@ static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_
267 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 351 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
268} 352}
269 353
270/* XXX fix me -- chip specific */
271#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 354#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
272static u8 dp_pre_emphasis_max(u8 voltage_swing) 355#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
273{
274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
275 case DP_TRAIN_VOLTAGE_SWING_400:
276 return DP_TRAIN_PRE_EMPHASIS_6;
277 case DP_TRAIN_VOLTAGE_SWING_600:
278 return DP_TRAIN_PRE_EMPHASIS_6;
279 case DP_TRAIN_VOLTAGE_SWING_800:
280 return DP_TRAIN_PRE_EMPHASIS_3_5;
281 case DP_TRAIN_VOLTAGE_SWING_1200:
282 default:
283 return DP_TRAIN_PRE_EMPHASIS_0;
284 }
285}
286 356
287static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], 357static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
288 int lane_count, 358 int lane_count,
@@ -308,10 +378,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
308 } 378 }
309 379
310 if (v >= DP_VOLTAGE_MAX) 380 if (v >= DP_VOLTAGE_MAX)
311 v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 381 v |= DP_TRAIN_MAX_SWING_REACHED;
312 382
313 if (p >= dp_pre_emphasis_max(v)) 383 if (p >= DP_PRE_EMPHASIS_MAX)
314 p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 384 p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
315 385
316 DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", 386 DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
317 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], 387 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
@@ -321,110 +391,109 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
321 train_set[lane] = v | p; 391 train_set[lane] = v | p;
322} 392}
323 393
324union aux_channel_transaction { 394/* convert bits per color to bits per pixel */
325 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 395/* get bpc from the EDID */
326 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 396static int convert_bpc_to_bpp(int bpc)
327};
328
329/* radeon aux chan functions */
330bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
331 int num_bytes, u8 *read_byte,
332 u8 read_buf_len, u8 delay)
333{ 397{
334 struct drm_device *dev = chan->dev; 398 if (bpc == 0)
335 struct radeon_device *rdev = dev->dev_private; 399 return 24;
336 union aux_channel_transaction args; 400 else
337 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 401 return bpc * 3;
338 unsigned char *base; 402}
339 int retry_count = 0;
340
341 memset(&args, 0, sizeof(args));
342
343 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
344
345retry:
346 memcpy(base, req_bytes, num_bytes);
347
348 args.v1.lpAuxRequest = 0;
349 args.v1.lpDataOut = 16;
350 args.v1.ucDataOutLen = 0;
351 args.v1.ucChannelID = chan->rec.i2c_id;
352 args.v1.ucDelay = delay / 10;
353 if (ASIC_IS_DCE4(rdev))
354 args.v2.ucHPD_ID = chan->rec.hpd;
355 403
356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 404/* get the max pix clock supported by the link rate and lane num */
405static int dp_get_max_dp_pix_clock(int link_rate,
406 int lane_num,
407 int bpp)
408{
409 return (link_rate * lane_num * 8) / bpp;
410}
357 411
358 if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { 412static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
359 if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) 413{
360 goto retry; 414 switch (dpcd[DP_MAX_LINK_RATE]) {
361 DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 415 case DP_LINK_BW_1_62:
362 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 416 default:
363 chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); 417 return 162000;
364 return false; 418 case DP_LINK_BW_2_7:
419 return 270000;
420 case DP_LINK_BW_5_4:
421 return 540000;
365 } 422 }
423}
366 424
367 if (args.v1.ucDataOutLen && read_byte && read_buf_len) { 425static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
368 if (read_buf_len < args.v1.ucDataOutLen) { 426{
369 DRM_ERROR("Buffer to small for return answer %d %d\n", 427 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
370 read_buf_len, args.v1.ucDataOutLen);
371 return false;
372 }
373 {
374 int len = min(read_buf_len, args.v1.ucDataOutLen);
375 memcpy(read_byte, base + 16, len);
376 }
377 }
378 return true;
379} 428}
380 429
381bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, 430static u8 dp_get_dp_link_rate_coded(int link_rate)
382 uint8_t send_bytes, uint8_t *send)
383{ 431{
384 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 432 switch (link_rate) {
385 u8 msg[20]; 433 case 162000:
386 u8 msg_len, dp_msg_len; 434 default:
387 bool ret; 435 return DP_LINK_BW_1_62;
436 case 270000:
437 return DP_LINK_BW_2_7;
438 case 540000:
439 return DP_LINK_BW_5_4;
440 }
441}
388 442
389 dp_msg_len = 4; 443/***** radeon specific DP functions *****/
390 msg[0] = address;
391 msg[1] = address >> 8;
392 msg[2] = AUX_NATIVE_WRITE << 4;
393 dp_msg_len += send_bytes;
394 msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
395 444
396 if (send_bytes > 16) 445/* First get the min lane# when low rate is used according to pixel clock
397 return false; 446 * (prefer low rate), second check max lane# supported by DP panel,
447 * if the max lane# < low rate lane# then use max lane# instead.
448 */
449static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
450 u8 dpcd[DP_DPCD_SIZE],
451 int pix_clock)
452{
453 int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
454 int max_link_rate = dp_get_max_link_rate(dpcd);
455 int max_lane_num = dp_get_max_lane_number(dpcd);
456 int lane_num;
457 int max_dp_pix_clock;
458
459 for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
460 max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
461 if (pix_clock <= max_dp_pix_clock)
462 break;
463 }
398 464
399 memcpy(&msg[4], send, send_bytes); 465 return lane_num;
400 msg_len = 4 + send_bytes;
401 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
402 return ret;
403} 466}
404 467
405bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, 468static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
406 uint8_t delay, uint8_t expected_bytes, 469 u8 dpcd[DP_DPCD_SIZE],
407 uint8_t *read_p) 470 int pix_clock)
408{ 471{
409 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 472 int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
410 u8 msg[20]; 473 int lane_num, max_pix_clock;
411 u8 msg_len, dp_msg_len; 474
412 bool ret = false; 475 if (radeon_connector_encoder_is_dp_bridge(connector))
413 msg_len = 4; 476 return 270000;
414 dp_msg_len = 4; 477
415 msg[0] = address; 478 lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
416 msg[1] = address >> 8; 479 max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
417 msg[2] = AUX_NATIVE_READ << 4; 480 if (pix_clock <= max_pix_clock)
418 msg[3] = (dp_msg_len) << 4; 481 return 162000;
419 msg[3] |= expected_bytes - 1; 482 max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
483 if (pix_clock <= max_pix_clock)
484 return 270000;
485 if (radeon_connector_is_dp12_capable(connector)) {
486 max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
487 if (pix_clock <= max_pix_clock)
488 return 540000;
489 }
420 490
421 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); 491 return dp_get_max_link_rate(dpcd);
422 return ret;
423} 492}
424 493
425/* radeon dp functions */ 494static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
426static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, 495 int action, int dp_clock,
427 uint8_t ucconfig, uint8_t lane_num) 496 u8 ucconfig, u8 lane_num)
428{ 497{
429 DP_ENCODER_SERVICE_PARAMETERS args; 498 DP_ENCODER_SERVICE_PARAMETERS args;
430 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); 499 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
@@ -454,60 +523,86 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
454{ 523{
455 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 524 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
456 u8 msg[25]; 525 u8 msg[25];
457 int ret; 526 int ret, i;
458 527
459 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); 528 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
460 if (ret) { 529 if (ret > 0) {
461 memcpy(dig_connector->dpcd, msg, 8); 530 memcpy(dig_connector->dpcd, msg, 8);
462 { 531 DRM_DEBUG_KMS("DPCD: ");
463 int i; 532 for (i = 0; i < 8; i++)
464 DRM_DEBUG_KMS("DPCD: "); 533 DRM_DEBUG_KMS("%02x ", msg[i]);
465 for (i = 0; i < 8; i++) 534 DRM_DEBUG_KMS("\n");
466 DRM_DEBUG_KMS("%02x ", msg[i]);
467 DRM_DEBUG_KMS("\n");
468 }
469 return true; 535 return true;
470 } 536 }
471 dig_connector->dpcd[0] = 0; 537 dig_connector->dpcd[0] = 0;
472 return false; 538 return false;
473} 539}
474 540
541static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
542 struct drm_connector *connector)
543{
544 struct drm_device *dev = encoder->dev;
545 struct radeon_device *rdev = dev->dev_private;
546 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
547
548 if (!ASIC_IS_DCE4(rdev))
549 return;
550
551 if (radeon_connector_encoder_is_dp_bridge(connector))
552 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
553
554 atombios_dig_encoder_setup(encoder,
555 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
556 panel_mode);
557}
558
475void radeon_dp_set_link_config(struct drm_connector *connector, 559void radeon_dp_set_link_config(struct drm_connector *connector,
476 struct drm_display_mode *mode) 560 struct drm_display_mode *mode)
477{ 561{
478 struct radeon_connector *radeon_connector; 562 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 struct radeon_connector_atom_dig *dig_connector; 563 struct radeon_connector_atom_dig *dig_connector;
480 564
481 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
482 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
483 return;
484
485 radeon_connector = to_radeon_connector(connector);
486 if (!radeon_connector->con_priv) 565 if (!radeon_connector->con_priv)
487 return; 566 return;
488 dig_connector = radeon_connector->con_priv; 567 dig_connector = radeon_connector->con_priv;
489 568
490 dig_connector->dp_clock = 569 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
491 dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); 570 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
492 dig_connector->dp_lane_count = 571 dig_connector->dp_clock =
493 dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); 572 radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
573 dig_connector->dp_lane_count =
574 radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
575 }
494} 576}
495 577
496int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, 578int radeon_dp_mode_valid_helper(struct drm_connector *connector,
497 struct drm_display_mode *mode) 579 struct drm_display_mode *mode)
498{ 580{
499 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 581 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
582 struct radeon_connector_atom_dig *dig_connector;
583 int dp_clock;
584
585 if (!radeon_connector->con_priv)
586 return MODE_CLOCK_HIGH;
587 dig_connector = radeon_connector->con_priv;
588
589 dp_clock =
590 radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
591
592 if ((dp_clock == 540000) &&
593 (!radeon_connector_is_dp12_capable(connector)))
594 return MODE_CLOCK_HIGH;
500 595
501 return dp_mode_valid(dig_connector->dpcd, mode->clock); 596 return MODE_OK;
502} 597}
503 598
504static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, 599static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
505 u8 link_status[DP_LINK_STATUS_SIZE]) 600 u8 link_status[DP_LINK_STATUS_SIZE])
506{ 601{
507 int ret; 602 int ret;
508 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, 603 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
509 DP_LINK_STATUS_SIZE, link_status); 604 link_status, DP_LINK_STATUS_SIZE, 100);
510 if (!ret) { 605 if (ret <= 0) {
511 DRM_ERROR("displayport link status failed\n"); 606 DRM_ERROR("displayport link status failed\n");
512 return false; 607 return false;
513 } 608 }
@@ -518,292 +613,309 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
518 return true; 613 return true;
519} 614}
520 615
521bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) 616struct radeon_dp_link_train_info {
522{ 617 struct radeon_device *rdev;
523 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 618 struct drm_encoder *encoder;
619 struct drm_connector *connector;
620 struct radeon_connector *radeon_connector;
621 int enc_id;
622 int dp_clock;
623 int dp_lane_count;
624 int rd_interval;
625 bool tp3_supported;
626 u8 dpcd[8];
627 u8 train_set[4];
524 u8 link_status[DP_LINK_STATUS_SIZE]; 628 u8 link_status[DP_LINK_STATUS_SIZE];
629 u8 tries;
630};
525 631
526 if (!atom_dp_get_link_status(radeon_connector, link_status)) 632static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
527 return false; 633{
528 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) 634 /* set the initial vs/emph on the source */
529 return false; 635 atombios_dig_transmitter_setup(dp_info->encoder,
530 return true; 636 ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
637 0, dp_info->train_set[0]); /* sets all lanes at once */
638
639 /* set the vs/emph on the sink */
640 radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
641 dp_info->train_set, dp_info->dp_lane_count, 0);
531} 642}
532 643
533static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) 644static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
534{ 645{
535 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 646 int rtp = 0;
536 647
537 if (dig_connector->dpcd[0] >= 0x11) { 648 /* set training pattern on the source */
538 radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, 649 if (ASIC_IS_DCE4(dp_info->rdev)) {
539 &power_state); 650 switch (tp) {
651 case DP_TRAINING_PATTERN_1:
652 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
653 break;
654 case DP_TRAINING_PATTERN_2:
655 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
656 break;
657 case DP_TRAINING_PATTERN_3:
658 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
659 break;
660 }
661 atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
662 } else {
663 switch (tp) {
664 case DP_TRAINING_PATTERN_1:
665 rtp = 0;
666 break;
667 case DP_TRAINING_PATTERN_2:
668 rtp = 1;
669 break;
670 }
671 radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
672 dp_info->dp_clock, dp_info->enc_id, rtp);
540 } 673 }
541}
542 674
543static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) 675 /* enable training pattern on the sink */
544{ 676 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
545 radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
546 &downspread);
547} 677}
548 678
549static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, 679static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
550 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
551{ 680{
552 radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, 681 u8 tmp;
553 link_configuration);
554}
555 682
556static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, 683 /* power up the sink */
557 struct drm_encoder *encoder, 684 if (dp_info->dpcd[0] >= 0x11)
558 u8 train_set[4]) 685 radeon_write_dpcd_reg(dp_info->radeon_connector,
559{ 686 DP_SET_POWER, DP_SET_POWER_D0);
560 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 687
561 int i; 688 /* possibly enable downspread on the sink */
689 if (dp_info->dpcd[3] & 0x1)
690 radeon_write_dpcd_reg(dp_info->radeon_connector,
691 DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
692 else
693 radeon_write_dpcd_reg(dp_info->radeon_connector,
694 DP_DOWNSPREAD_CTRL, 0);
562 695
563 for (i = 0; i < dig_connector->dp_lane_count; i++) 696 radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
564 atombios_dig_transmitter_setup(encoder,
565 ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
566 i, train_set[i]);
567 697
568 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, 698 /* set the lane count on the sink */
569 dig_connector->dp_lane_count, train_set); 699 tmp = dp_info->dp_lane_count;
570} 700 if (dp_info->dpcd[0] >= 0x11)
701 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
702 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
571 703
572static void dp_set_training(struct radeon_connector *radeon_connector, 704 /* set the link rate on the sink */
573 u8 training) 705 tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
574{ 706 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
575 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
576 1, &training);
577}
578 707
579void dp_link_train(struct drm_encoder *encoder, 708 /* start training on the source */
580 struct drm_connector *connector) 709 if (ASIC_IS_DCE4(dp_info->rdev))
581{ 710 atombios_dig_encoder_setup(dp_info->encoder,
582 struct drm_device *dev = encoder->dev; 711 ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
583 struct radeon_device *rdev = dev->dev_private; 712 else
584 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 713 radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
585 struct radeon_encoder_atom_dig *dig; 714 dp_info->dp_clock, dp_info->enc_id, 0);
586 struct radeon_connector *radeon_connector;
587 struct radeon_connector_atom_dig *dig_connector;
588 int enc_id = 0;
589 bool clock_recovery, channel_eq;
590 u8 link_status[DP_LINK_STATUS_SIZE];
591 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
592 u8 tries, voltage;
593 u8 train_set[4];
594 int i;
595 715
596 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && 716 /* disable the training pattern on the sink */
597 (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 717 radeon_write_dpcd_reg(dp_info->radeon_connector,
598 return; 718 DP_TRAINING_PATTERN_SET,
719 DP_TRAINING_PATTERN_DISABLE);
599 720
600 if (!radeon_encoder->enc_priv) 721 return 0;
601 return; 722}
602 dig = radeon_encoder->enc_priv;
603 723
604 radeon_connector = to_radeon_connector(connector); 724static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
605 if (!radeon_connector->con_priv) 725{
606 return; 726 udelay(400);
607 dig_connector = radeon_connector->con_priv;
608 727
609 if (dig->dig_encoder) 728 /* disable the training pattern on the sink */
610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 729 radeon_write_dpcd_reg(dp_info->radeon_connector,
611 else 730 DP_TRAINING_PATTERN_SET,
612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 731 DP_TRAINING_PATTERN_DISABLE);
613 if (dig->linkb)
614 enc_id |= ATOM_DP_CONFIG_LINK_B;
615 else
616 enc_id |= ATOM_DP_CONFIG_LINK_A;
617 732
618 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 733 /* disable the training pattern on the source */
619 if (dig_connector->dp_clock == 270000) 734 if (ASIC_IS_DCE4(dp_info->rdev))
620 link_configuration[0] = DP_LINK_BW_2_7; 735 atombios_dig_encoder_setup(dp_info->encoder,
736 ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
621 else 737 else
622 link_configuration[0] = DP_LINK_BW_1_62; 738 radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
623 link_configuration[1] = dig_connector->dp_lane_count; 739 dp_info->dp_clock, dp_info->enc_id, 0);
624 if (dig_connector->dpcd[0] >= 0x11)
625 link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
626 740
627 /* power up the sink */ 741 return 0;
628 dp_set_power(radeon_connector, DP_SET_POWER_D0); 742}
629 /* disable the training pattern on the sink */
630 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
631 /* set link bw and lanes on the sink */
632 dp_set_link_bw_lanes(radeon_connector, link_configuration);
633 /* disable downspread on the sink */
634 dp_set_downspread(radeon_connector, 0);
635 if (ASIC_IS_DCE4(rdev)) {
636 /* start training on the source */
637 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
638 /* set training pattern 1 on the source */
639 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
640 } else {
641 /* start training on the source */
642 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
643 dig_connector->dp_clock, enc_id, 0);
644 /* set training pattern 1 on the source */
645 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
646 dig_connector->dp_clock, enc_id, 0);
647 }
648 743
649 /* set initial vs/emph */ 744static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
650 memset(train_set, 0, 4); 745{
651 udelay(400); 746 bool clock_recovery;
652 /* set training pattern 1 on the sink */ 747 u8 voltage;
653 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); 748 int i;
654 749
655 dp_update_dpvs_emph(radeon_connector, encoder, train_set); 750 radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
751 memset(dp_info->train_set, 0, 4);
752 radeon_dp_update_vs_emph(dp_info);
753
754 udelay(400);
656 755
657 /* clock recovery loop */ 756 /* clock recovery loop */
658 clock_recovery = false; 757 clock_recovery = false;
659 tries = 0; 758 dp_info->tries = 0;
660 voltage = 0xff; 759 voltage = 0xff;
661 for (;;) { 760 while (1) {
662 udelay(100); 761 if (dp_info->rd_interval == 0)
663 if (!atom_dp_get_link_status(radeon_connector, link_status)) 762 udelay(100);
763 else
764 mdelay(dp_info->rd_interval * 4);
765
766 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
664 break; 767 break;
665 768
666 if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { 769 if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
667 clock_recovery = true; 770 clock_recovery = true;
668 break; 771 break;
669 } 772 }
670 773
671 for (i = 0; i < dig_connector->dp_lane_count; i++) { 774 for (i = 0; i < dp_info->dp_lane_count; i++) {
672 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 775 if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
673 break; 776 break;
674 } 777 }
675 if (i == dig_connector->dp_lane_count) { 778 if (i == dp_info->dp_lane_count) {
676 DRM_ERROR("clock recovery reached max voltage\n"); 779 DRM_ERROR("clock recovery reached max voltage\n");
677 break; 780 break;
678 } 781 }
679 782
680 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 783 if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
681 ++tries; 784 ++dp_info->tries;
682 if (tries == 5) { 785 if (dp_info->tries == 5) {
683 DRM_ERROR("clock recovery tried 5 times\n"); 786 DRM_ERROR("clock recovery tried 5 times\n");
684 break; 787 break;
685 } 788 }
686 } else 789 } else
687 tries = 0; 790 dp_info->tries = 0;
688 791
689 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 792 voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
690 793
691 /* Compute new train_set as requested by sink */ 794 /* Compute new train_set as requested by sink */
692 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); 795 dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
693 dp_update_dpvs_emph(radeon_connector, encoder, train_set); 796
797 radeon_dp_update_vs_emph(dp_info);
694 } 798 }
695 if (!clock_recovery) 799 if (!clock_recovery) {
696 DRM_ERROR("clock recovery failed\n"); 800 DRM_ERROR("clock recovery failed\n");
697 else 801 return -1;
802 } else {
698 DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", 803 DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
699 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 804 dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
700 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> 805 (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
701 DP_TRAIN_PRE_EMPHASIS_SHIFT); 806 DP_TRAIN_PRE_EMPHASIS_SHIFT);
807 return 0;
808 }
809}
702 810
811static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
812{
813 bool channel_eq;
703 814
704 /* set training pattern 2 on the sink */ 815 if (dp_info->tp3_supported)
705 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); 816 radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
706 /* set training pattern 2 on the source */
707 if (ASIC_IS_DCE4(rdev))
708 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
709 else 817 else
710 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 818 radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
711 dig_connector->dp_clock, enc_id, 1);
712 819
713 /* channel equalization loop */ 820 /* channel equalization loop */
714 tries = 0; 821 dp_info->tries = 0;
715 channel_eq = false; 822 channel_eq = false;
716 for (;;) { 823 while (1) {
717 udelay(400); 824 if (dp_info->rd_interval == 0)
718 if (!atom_dp_get_link_status(radeon_connector, link_status)) 825 udelay(400);
826 else
827 mdelay(dp_info->rd_interval * 4);
828
829 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
719 break; 830 break;
720 831
721 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { 832 if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
722 channel_eq = true; 833 channel_eq = true;
723 break; 834 break;
724 } 835 }
725 836
726 /* Try 5 times */ 837 /* Try 5 times */
727 if (tries > 5) { 838 if (dp_info->tries > 5) {
728 DRM_ERROR("channel eq failed: 5 tries\n"); 839 DRM_ERROR("channel eq failed: 5 tries\n");
729 break; 840 break;
730 } 841 }
731 842
732 /* Compute new train_set as requested by sink */ 843 /* Compute new train_set as requested by sink */
733 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); 844 dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
734 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
735 845
736 tries++; 846 radeon_dp_update_vs_emph(dp_info);
847 dp_info->tries++;
737 } 848 }
738 849
739 if (!channel_eq) 850 if (!channel_eq) {
740 DRM_ERROR("channel eq failed\n"); 851 DRM_ERROR("channel eq failed\n");
741 else 852 return -1;
853 } else {
742 DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", 854 DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
743 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 855 dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
744 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) 856 (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 857 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
746 858 return 0;
747 /* disable the training pattern on the sink */ 859 }
748 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
749
750 /* disable the training pattern on the source */
751 if (ASIC_IS_DCE4(rdev))
752 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
753 else
754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
755 dig_connector->dp_clock, enc_id, 0);
756} 860}
757 861
758int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 862void radeon_dp_link_train(struct drm_encoder *encoder,
759 uint8_t write_byte, uint8_t *read_byte) 863 struct drm_connector *connector)
760{ 864{
761 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 865 struct drm_device *dev = encoder->dev;
762 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; 866 struct radeon_device *rdev = dev->dev_private;
763 int ret = 0; 867 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
764 uint16_t address = algo_data->address; 868 struct radeon_encoder_atom_dig *dig;
765 uint8_t msg[5]; 869 struct radeon_connector *radeon_connector;
766 uint8_t reply[2]; 870 struct radeon_connector_atom_dig *dig_connector;
767 int msg_len, dp_msg_len; 871 struct radeon_dp_link_train_info dp_info;
768 int reply_bytes; 872 u8 tmp;
769
770 /* Set up the command byte */
771 if (mode & MODE_I2C_READ)
772 msg[2] = AUX_I2C_READ << 4;
773 else
774 msg[2] = AUX_I2C_WRITE << 4;
775
776 if (!(mode & MODE_I2C_STOP))
777 msg[2] |= AUX_I2C_MOT << 4;
778 873
779 msg[0] = address; 874 if (!radeon_encoder->enc_priv)
780 msg[1] = address >> 8; 875 return;
876 dig = radeon_encoder->enc_priv;
781 877
782 reply_bytes = 1; 878 radeon_connector = to_radeon_connector(connector);
879 if (!radeon_connector->con_priv)
880 return;
881 dig_connector = radeon_connector->con_priv;
783 882
784 msg_len = 4; 883 if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
785 dp_msg_len = 3; 884 (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
786 switch (mode) { 885 return;
787 case MODE_I2C_WRITE:
788 msg[4] = write_byte;
789 msg_len++;
790 dp_msg_len += 2;
791 break;
792 case MODE_I2C_READ:
793 dp_msg_len += 1;
794 break;
795 default:
796 break;
797 }
798 886
799 msg[3] = (dp_msg_len) << 4; 887 dp_info.enc_id = 0;
800 ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); 888 if (dig->dig_encoder)
889 dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
890 else
891 dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
892 if (dig->linkb)
893 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
894 else
895 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
801 896
802 if (ret) { 897 dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
803 if (read_byte) 898 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
804 *read_byte = reply[0]; 899 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
805 return reply_bytes; 900 dp_info.tp3_supported = true;
806 } 901 else
807 return -EREMOTEIO; 902 dp_info.tp3_supported = false;
903
904 memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
905 dp_info.rdev = rdev;
906 dp_info.encoder = encoder;
907 dp_info.connector = connector;
908 dp_info.radeon_connector = radeon_connector;
909 dp_info.dp_lane_count = dig_connector->dp_lane_count;
910 dp_info.dp_clock = dig_connector->dp_clock;
911
912 if (radeon_dp_link_train_init(&dp_info))
913 goto done;
914 if (radeon_dp_link_train_cr(&dp_info))
915 goto done;
916 if (radeon_dp_link_train_ce(&dp_info))
917 goto done;
918done:
919 if (radeon_dp_link_train_finish(&dp_info))
920 return;
808} 921}
809
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 9073e3bfb08c..7c37638095f7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1578,7 +1578,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1578 u32 sq_stack_resource_mgmt_2; 1578 u32 sq_stack_resource_mgmt_2;
1579 u32 sq_stack_resource_mgmt_3; 1579 u32 sq_stack_resource_mgmt_3;
1580 u32 vgt_cache_invalidation; 1580 u32 vgt_cache_invalidation;
1581 u32 hdp_host_path_cntl; 1581 u32 hdp_host_path_cntl, tmp;
1582 int i, j, num_shader_engines, ps_thread_count; 1582 int i, j, num_shader_engines, ps_thread_count;
1583 1583
1584 switch (rdev->family) { 1584 switch (rdev->family) {
@@ -1936,8 +1936,12 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1936 rdev->config.evergreen.tile_config |= (3 << 0); 1936 rdev->config.evergreen.tile_config |= (3 << 0);
1937 break; 1937 break;
1938 } 1938 }
1939 rdev->config.evergreen.tile_config |= 1939 /* num banks is 8 on all fusion asics */
1940 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1940 if (rdev->flags & RADEON_IS_IGP)
1941 rdev->config.evergreen.tile_config |= 8 << 4;
1942 else
1943 rdev->config.evergreen.tile_config |=
1944 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1941 rdev->config.evergreen.tile_config |= 1945 rdev->config.evergreen.tile_config |=
1942 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1946 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1943 rdev->config.evergreen.tile_config |= 1947 rdev->config.evergreen.tile_config |=
@@ -2141,6 +2145,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2141 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) 2145 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2142 WREG32(i, 0); 2146 WREG32(i, 0);
2143 2147
2148 tmp = RREG32(HDP_MISC_CNTL);
2149 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2150 WREG32(HDP_MISC_CNTL, tmp);
2151
2144 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 2152 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2145 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 2153 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2146 2154
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index fc40e0cc3451..f37e91ee8a11 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -64,6 +64,8 @@
64#define GB_BACKEND_MAP 0x98FC 64#define GB_BACKEND_MAP 0x98FC
65#define DMIF_ADDR_CONFIG 0xBD4 65#define DMIF_ADDR_CONFIG 0xBD4
66#define HDP_ADDR_CONFIG 0x2F48 66#define HDP_ADDR_CONFIG 0x2F48
67#define HDP_MISC_CNTL 0x2F4C
68#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
67 69
68#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 70#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
69#define GC_USER_RB_BACKEND_DISABLE 0x9B7C 71#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 3d8a7634bbe9..b205ba1cdd8f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,7 +417,7 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
417 num_shader_engines = 1; 417 num_shader_engines = 1;
418 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 418 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
419 num_shader_engines = rdev->config.cayman.max_shader_engines; 419 num_shader_engines = rdev->config.cayman.max_shader_engines;
420 if (num_backends_per_asic > num_shader_engines) 420 if (num_backends_per_asic < num_shader_engines)
421 num_backends_per_asic = num_shader_engines; 421 num_backends_per_asic = num_shader_engines;
422 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 422 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
423 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 423 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
@@ -829,7 +829,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
829 rdev->config.cayman.tile_config |= 829 rdev->config.cayman.tile_config |=
830 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 830 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
831 rdev->config.cayman.tile_config |= 831 rdev->config.cayman.tile_config |=
832 (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 832 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
833 rdev->config.cayman.tile_config |= 833 rdev->config.cayman.tile_config |=
834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
835 835
@@ -931,6 +931,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
931 WREG32(CB_PERF_CTR3_SEL_0, 0); 931 WREG32(CB_PERF_CTR3_SEL_0, 0);
932 WREG32(CB_PERF_CTR3_SEL_1, 0); 932 WREG32(CB_PERF_CTR3_SEL_1, 0);
933 933
934 tmp = RREG32(HDP_MISC_CNTL);
935 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
936 WREG32(HDP_MISC_CNTL, tmp);
937
934 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 938 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
935 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 939 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
936 940
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 0f9a08b53fbd..9736746da2d6 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -136,6 +136,8 @@
136#define HDP_NONSURFACE_INFO 0x2C08 136#define HDP_NONSURFACE_INFO 0x2C08
137#define HDP_NONSURFACE_SIZE 0x2C0C 137#define HDP_NONSURFACE_SIZE 0x2C0C
138#define HDP_ADDR_CONFIG 0x2F48 138#define HDP_ADDR_CONFIG 0x2F48
139#define HDP_MISC_CNTL 0x2F4C
140#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
139 141
140#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 142#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
141#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C 143#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
@@ -351,7 +353,7 @@
351#define MULTI_GPU_TILE_SIZE_MASK 0x03000000 353#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
352#define MULTI_GPU_TILE_SIZE_SHIFT 24 354#define MULTI_GPU_TILE_SIZE_SHIFT 24
353#define ROW_SIZE(x) ((x) << 28) 355#define ROW_SIZE(x) ((x) << 28)
354#define ROW_SIZE_MASK 0x30000007 356#define ROW_SIZE_MASK 0x30000000
355#define ROW_SIZE_SHIFT 28 357#define ROW_SIZE_SHIFT 28
356#define NUM_LOWER_PIPES(x) ((x) << 30) 358#define NUM_LOWER_PIPES(x) ((x) << 30)
357#define NUM_LOWER_PIPES_MASK 0x40000000 359#define NUM_LOWER_PIPES_MASK 0x40000000
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index ca576191d058..d948265db87e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -782,6 +782,7 @@ static struct radeon_asic evergreen_asic = {
782 .hpd_fini = &evergreen_hpd_fini, 782 .hpd_fini = &evergreen_hpd_fini,
783 .hpd_sense = &evergreen_hpd_sense, 783 .hpd_sense = &evergreen_hpd_sense,
784 .hpd_set_polarity = &evergreen_hpd_set_polarity, 784 .hpd_set_polarity = &evergreen_hpd_set_polarity,
785 .ioctl_wait_idle = r600_ioctl_wait_idle,
785 .gui_idle = &r600_gui_idle, 786 .gui_idle = &r600_gui_idle,
786 .pm_misc = &evergreen_pm_misc, 787 .pm_misc = &evergreen_pm_misc,
787 .pm_prepare = &evergreen_pm_prepare, 788 .pm_prepare = &evergreen_pm_prepare,
@@ -828,6 +829,7 @@ static struct radeon_asic sumo_asic = {
828 .hpd_fini = &evergreen_hpd_fini, 829 .hpd_fini = &evergreen_hpd_fini,
829 .hpd_sense = &evergreen_hpd_sense, 830 .hpd_sense = &evergreen_hpd_sense,
830 .hpd_set_polarity = &evergreen_hpd_set_polarity, 831 .hpd_set_polarity = &evergreen_hpd_set_polarity,
832 .ioctl_wait_idle = r600_ioctl_wait_idle,
831 .gui_idle = &r600_gui_idle, 833 .gui_idle = &r600_gui_idle,
832 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
833 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
@@ -874,6 +876,7 @@ static struct radeon_asic btc_asic = {
874 .hpd_fini = &evergreen_hpd_fini, 876 .hpd_fini = &evergreen_hpd_fini,
875 .hpd_sense = &evergreen_hpd_sense, 877 .hpd_sense = &evergreen_hpd_sense,
876 .hpd_set_polarity = &evergreen_hpd_set_polarity, 878 .hpd_set_polarity = &evergreen_hpd_set_polarity,
879 .ioctl_wait_idle = r600_ioctl_wait_idle,
877 .gui_idle = &r600_gui_idle, 880 .gui_idle = &r600_gui_idle,
878 .pm_misc = &evergreen_pm_misc, 881 .pm_misc = &evergreen_pm_misc,
879 .pm_prepare = &evergreen_pm_prepare, 882 .pm_prepare = &evergreen_pm_prepare,
@@ -920,6 +923,7 @@ static struct radeon_asic cayman_asic = {
920 .hpd_fini = &evergreen_hpd_fini, 923 .hpd_fini = &evergreen_hpd_fini,
921 .hpd_sense = &evergreen_hpd_sense, 924 .hpd_sense = &evergreen_hpd_sense,
922 .hpd_set_polarity = &evergreen_hpd_set_polarity, 925 .hpd_set_polarity = &evergreen_hpd_set_polarity,
926 .ioctl_wait_idle = r600_ioctl_wait_idle,
923 .gui_idle = &r600_gui_idle, 927 .gui_idle = &r600_gui_idle,
924 .pm_misc = &evergreen_pm_misc, 928 .pm_misc = &evergreen_pm_misc,
925 .pm_prepare = &evergreen_pm_prepare, 929 .pm_prepare = &evergreen_pm_prepare,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8caf546c8e92..5b991f7c6e2a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -505,12 +505,18 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
505 * DDC_VGA = RADEON_GPIO_VGA_DDC 505 * DDC_VGA = RADEON_GPIO_VGA_DDC
506 * DDC_LCD = RADEON_GPIOPAD_MASK 506 * DDC_LCD = RADEON_GPIOPAD_MASK
507 * DDC_GPIO = RADEON_MDGPIO_MASK 507 * DDC_GPIO = RADEON_MDGPIO_MASK
508 * r1xx/r2xx 508 * r1xx
509 * DDC_MONID = RADEON_GPIO_MONID 509 * DDC_MONID = RADEON_GPIO_MONID
510 * DDC_CRT2 = RADEON_GPIO_CRT2_DDC 510 * DDC_CRT2 = RADEON_GPIO_CRT2_DDC
511 * r3xx 511 * r200
512 * DDC_MONID = RADEON_GPIO_MONID 512 * DDC_MONID = RADEON_GPIO_MONID
513 * DDC_CRT2 = RADEON_GPIO_DVI_DDC 513 * DDC_CRT2 = RADEON_GPIO_DVI_DDC
514 * r300/r350
515 * DDC_MONID = RADEON_GPIO_DVI_DDC
516 * DDC_CRT2 = RADEON_GPIO_DVI_DDC
517 * rv2xx/rv3xx
518 * DDC_MONID = RADEON_GPIO_MONID
519 * DDC_CRT2 = RADEON_GPIO_MONID
514 * rs3xx/rs4xx 520 * rs3xx/rs4xx
515 * DDC_MONID = RADEON_GPIOPAD_MASK 521 * DDC_MONID = RADEON_GPIOPAD_MASK
516 * DDC_CRT2 = RADEON_GPIO_MONID 522 * DDC_CRT2 = RADEON_GPIO_MONID
@@ -537,17 +543,26 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
537 rdev->family == CHIP_RS400 || 543 rdev->family == CHIP_RS400 ||
538 rdev->family == CHIP_RS480) 544 rdev->family == CHIP_RS480)
539 ddc_line = RADEON_GPIOPAD_MASK; 545 ddc_line = RADEON_GPIOPAD_MASK;
540 else 546 else if (rdev->family == CHIP_R300 ||
547 rdev->family == CHIP_R350) {
548 ddc_line = RADEON_GPIO_DVI_DDC;
549 ddc = DDC_DVI;
550 } else
541 ddc_line = RADEON_GPIO_MONID; 551 ddc_line = RADEON_GPIO_MONID;
542 break; 552 break;
543 case DDC_CRT2: 553 case DDC_CRT2:
544 if (rdev->family == CHIP_RS300 || 554 if (rdev->family == CHIP_R200 ||
545 rdev->family == CHIP_RS400 || 555 rdev->family == CHIP_R300 ||
546 rdev->family == CHIP_RS480) 556 rdev->family == CHIP_R350) {
547 ddc_line = RADEON_GPIO_MONID;
548 else if (rdev->family >= CHIP_R300) {
549 ddc_line = RADEON_GPIO_DVI_DDC; 557 ddc_line = RADEON_GPIO_DVI_DDC;
550 ddc = DDC_DVI; 558 ddc = DDC_DVI;
559 } else if (rdev->family == CHIP_RS300 ||
560 rdev->family == CHIP_RS400 ||
561 rdev->family == CHIP_RS480)
562 ddc_line = RADEON_GPIO_MONID;
563 else if (rdev->family >= CHIP_RV350) {
564 ddc_line = RADEON_GPIO_MONID;
565 ddc = DDC_MONID;
551 } else 566 } else
552 ddc_line = RADEON_GPIO_CRT2_DDC; 567 ddc_line = RADEON_GPIO_CRT2_DDC;
553 break; 568 break;
@@ -709,26 +724,42 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
709 struct drm_device *dev = rdev->ddev; 724 struct drm_device *dev = rdev->ddev;
710 struct radeon_i2c_bus_rec i2c; 725 struct radeon_i2c_bus_rec i2c;
711 726
727 /* actual hw pads
728 * r1xx/rs2xx/rs3xx
729 * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
730 * r200
731 * 0x60, 0x64, 0x68, mm
732 * r300/r350
733 * 0x60, 0x64, mm
734 * rv2xx/rv3xx/rs4xx
735 * 0x60, 0x64, 0x68, gpiopads, mm
736 */
712 737
738 /* 0x60 */
713 i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 739 i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
714 rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC"); 740 rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
715 741 /* 0x64 */
716 i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 742 i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
717 rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC"); 743 rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
718 744
745 /* mm i2c */
719 i2c.valid = true; 746 i2c.valid = true;
720 i2c.hw_capable = true; 747 i2c.hw_capable = true;
721 i2c.mm_i2c = true; 748 i2c.mm_i2c = true;
722 i2c.i2c_id = 0xa0; 749 i2c.i2c_id = 0xa0;
723 rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C"); 750 rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
724 751
725 if (rdev->family == CHIP_RS300 || 752 if (rdev->family == CHIP_R300 ||
726 rdev->family == CHIP_RS400 || 753 rdev->family == CHIP_R350) {
727 rdev->family == CHIP_RS480) { 754 /* only 2 sw i2c pads */
755 } else if (rdev->family == CHIP_RS300 ||
756 rdev->family == CHIP_RS400 ||
757 rdev->family == CHIP_RS480) {
728 u16 offset; 758 u16 offset;
729 u8 id, blocks, clk, data; 759 u8 id, blocks, clk, data;
730 int i; 760 int i;
731 761
762 /* 0x68 */
732 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 763 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
733 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 764 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
734 765
@@ -740,6 +771,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
740 if (id == 136) { 771 if (id == 136) {
741 clk = RBIOS8(offset + 3 + (i * 5) + 3); 772 clk = RBIOS8(offset + 3 + (i * 5) + 3);
742 data = RBIOS8(offset + 3 + (i * 5) + 4); 773 data = RBIOS8(offset + 3 + (i * 5) + 4);
774 /* gpiopad */
743 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 775 i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
744 (1 << clk), (1 << data)); 776 (1 << clk), (1 << data));
745 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); 777 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
@@ -747,14 +779,15 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
747 } 779 }
748 } 780 }
749 } 781 }
750 782 } else if (rdev->family >= CHIP_R200) {
751 } else if (rdev->family >= CHIP_R300) { 783 /* 0x68 */
752 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 784 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
753 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 785 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
754 } else { 786 } else {
787 /* 0x68 */
755 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 788 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
756 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 789 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
757 790 /* 0x6c */
758 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 791 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
759 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC"); 792 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
760 } 793 }
@@ -2504,6 +2537,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2504 return true; 2537 return true;
2505} 2538}
2506 2539
2540static const char *thermal_controller_names[] = {
2541 "NONE",
2542 "lm63",
2543 "adm1032",
2544};
2545
2507void radeon_combios_get_power_modes(struct radeon_device *rdev) 2546void radeon_combios_get_power_modes(struct radeon_device *rdev)
2508{ 2547{
2509 struct drm_device *dev = rdev->ddev; 2548 struct drm_device *dev = rdev->ddev;
@@ -2524,6 +2563,54 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2524 return; 2563 return;
2525 } 2564 }
2526 2565
2566 /* check for a thermal chip */
2567 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
2568 if (offset) {
2569 u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
2570 struct radeon_i2c_bus_rec i2c_bus;
2571
2572 rev = RBIOS8(offset);
2573
2574 if (rev == 0) {
2575 thermal_controller = RBIOS8(offset + 3);
2576 gpio = RBIOS8(offset + 4) & 0x3f;
2577 i2c_addr = RBIOS8(offset + 5);
2578 } else if (rev == 1) {
2579 thermal_controller = RBIOS8(offset + 4);
2580 gpio = RBIOS8(offset + 5) & 0x3f;
2581 i2c_addr = RBIOS8(offset + 6);
2582 } else if (rev == 2) {
2583 thermal_controller = RBIOS8(offset + 4);
2584 gpio = RBIOS8(offset + 5) & 0x3f;
2585 i2c_addr = RBIOS8(offset + 6);
2586 clk_bit = RBIOS8(offset + 0xa);
2587 data_bit = RBIOS8(offset + 0xb);
2588 }
2589 if ((thermal_controller > 0) && (thermal_controller < 3)) {
2590 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
2591 thermal_controller_names[thermal_controller],
2592 i2c_addr >> 1);
2593 if (gpio == DDC_LCD) {
2594 /* MM i2c */
2595 i2c_bus.valid = true;
2596 i2c_bus.hw_capable = true;
2597 i2c_bus.mm_i2c = true;
2598 i2c_bus.i2c_id = 0xa0;
2599 } else if (gpio == DDC_GPIO)
2600 i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
2601 else
2602 i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
2603 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2604 if (rdev->pm.i2c_bus) {
2605 struct i2c_board_info info = { };
2606 const char *name = thermal_controller_names[thermal_controller];
2607 info.addr = i2c_addr >> 1;
2608 strlcpy(info.type, name, sizeof(info.type));
2609 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2610 }
2611 }
2612 }
2613
2527 if (rdev->flags & RADEON_IS_MOBILITY) { 2614 if (rdev->flags & RADEON_IS_MOBILITY) {
2528 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); 2615 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2529 if (offset) { 2616 if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5f45fa12bb8b..ee1dccb3fec9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -50,20 +50,21 @@ void radeon_connector_hotplug(struct drm_connector *connector)
50 struct radeon_device *rdev = dev->dev_private; 50 struct radeon_device *rdev = dev->dev_private;
51 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 51 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
52 52
53 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 53 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
54 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 54
55 55 /* powering up/down the eDP panel generates hpd events which
56 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 56 * can interfere with modesetting.
57 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 57 */
58 if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 58 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
59 (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) { 59 return;
60 if (radeon_dp_needs_link_train(radeon_connector)) {
61 if (connector->encoder)
62 dp_link_train(connector->encoder, connector);
63 }
64 }
65 }
66 60
61 /* pre-r600 did not always have the hpd pins mapped accurately to connectors */
62 if (rdev->family >= CHIP_R600) {
63 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
64 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
65 else
66 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
67 }
67} 68}
68 69
69static void radeon_property_change_mode(struct drm_encoder *encoder) 70static void radeon_property_change_mode(struct drm_encoder *encoder)
@@ -1054,23 +1055,124 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1054 int ret; 1055 int ret;
1055 1056
1056 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1057 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1058 struct drm_encoder *encoder;
1059 struct drm_display_mode *mode;
1060
1057 if (!radeon_dig_connector->edp_on) 1061 if (!radeon_dig_connector->edp_on)
1058 atombios_set_edp_panel_power(connector, 1062 atombios_set_edp_panel_power(connector,
1059 ATOM_TRANSMITTER_ACTION_POWER_ON); 1063 ATOM_TRANSMITTER_ACTION_POWER_ON);
1060 } 1064 ret = radeon_ddc_get_modes(radeon_connector);
1061 ret = radeon_ddc_get_modes(radeon_connector);
1062 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1063 if (!radeon_dig_connector->edp_on) 1065 if (!radeon_dig_connector->edp_on)
1064 atombios_set_edp_panel_power(connector, 1066 atombios_set_edp_panel_power(connector,
1065 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1067 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1066 } 1068
1069 if (ret > 0) {
1070 encoder = radeon_best_single_encoder(connector);
1071 if (encoder) {
1072 radeon_fixup_lvds_native_mode(encoder, connector);
1073 /* add scaled modes */
1074 radeon_add_common_modes(encoder, connector);
1075 }
1076 return ret;
1077 }
1078
1079 encoder = radeon_best_single_encoder(connector);
1080 if (!encoder)
1081 return 0;
1082
1083 /* we have no EDID modes */
1084 mode = radeon_fp_native_mode(encoder);
1085 if (mode) {
1086 ret = 1;
1087 drm_mode_probed_add(connector, mode);
1088 /* add the width/height from vbios tables if available */
1089 connector->display_info.width_mm = mode->width_mm;
1090 connector->display_info.height_mm = mode->height_mm;
1091 /* add scaled modes */
1092 radeon_add_common_modes(encoder, connector);
1093 }
1094 } else
1095 ret = radeon_ddc_get_modes(radeon_connector);
1067 1096
1068 return ret; 1097 return ret;
1069} 1098}
1070 1099
1100bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
1101{
1102 struct drm_mode_object *obj;
1103 struct drm_encoder *encoder;
1104 struct radeon_encoder *radeon_encoder;
1105 int i;
1106 bool found = false;
1107
1108 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1109 if (connector->encoder_ids[i] == 0)
1110 break;
1111
1112 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
1113 if (!obj)
1114 continue;
1115
1116 encoder = obj_to_encoder(obj);
1117 radeon_encoder = to_radeon_encoder(encoder);
1118
1119 switch (radeon_encoder->encoder_id) {
1120 case ENCODER_OBJECT_ID_TRAVIS:
1121 case ENCODER_OBJECT_ID_NUTMEG:
1122 found = true;
1123 break;
1124 default:
1125 break;
1126 }
1127 }
1128
1129 return found;
1130}
1131
1132bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1133{
1134 struct drm_mode_object *obj;
1135 struct drm_encoder *encoder;
1136 struct radeon_encoder *radeon_encoder;
1137 int i;
1138 bool found = false;
1139
1140 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1141 if (connector->encoder_ids[i] == 0)
1142 break;
1143
1144 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
1145 if (!obj)
1146 continue;
1147
1148 encoder = obj_to_encoder(obj);
1149 radeon_encoder = to_radeon_encoder(encoder);
1150 if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
1151 found = true;
1152 }
1153
1154 return found;
1155}
1156
1157bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
1158{
1159 struct drm_device *dev = connector->dev;
1160 struct radeon_device *rdev = dev->dev_private;
1161
1162 if (ASIC_IS_DCE5(rdev) &&
1163 (rdev->clock.dp_extclk >= 53900) &&
1164 radeon_connector_encoder_is_hbr2(connector)) {
1165 return true;
1166 }
1167
1168 return false;
1169}
1170
1071static enum drm_connector_status 1171static enum drm_connector_status
1072radeon_dp_detect(struct drm_connector *connector, bool force) 1172radeon_dp_detect(struct drm_connector *connector, bool force)
1073{ 1173{
1174 struct drm_device *dev = connector->dev;
1175 struct radeon_device *rdev = dev->dev_private;
1074 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1176 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1075 enum drm_connector_status ret = connector_status_disconnected; 1177 enum drm_connector_status ret = connector_status_disconnected;
1076 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1178 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
@@ -1081,6 +1183,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1081 } 1183 }
1082 1184
1083 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1185 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1186 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1187 if (encoder) {
1188 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1189 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
1190
1191 /* check if panel is valid */
1192 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
1193 ret = connector_status_connected;
1194 }
1084 /* eDP is always DP */ 1195 /* eDP is always DP */
1085 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1196 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1086 if (!radeon_dig_connector->edp_on) 1197 if (!radeon_dig_connector->edp_on)
@@ -1093,12 +1204,18 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1093 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1204 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1094 } else { 1205 } else {
1095 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1206 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1096 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 1207 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1097 if (radeon_dp_getdpcd(radeon_connector)) 1208 ret = connector_status_connected;
1098 ret = connector_status_connected; 1209 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
1210 radeon_dp_getdpcd(radeon_connector);
1099 } else { 1211 } else {
1100 if (radeon_ddc_probe(radeon_connector)) 1212 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
1101 ret = connector_status_connected; 1213 if (radeon_dp_getdpcd(radeon_connector))
1214 ret = connector_status_connected;
1215 } else {
1216 if (radeon_ddc_probe(radeon_connector))
1217 ret = connector_status_connected;
1218 }
1102 } 1219 }
1103 } 1220 }
1104 1221
@@ -1114,11 +1231,38 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1114 1231
1115 /* XXX check mode bandwidth */ 1232 /* XXX check mode bandwidth */
1116 1233
1117 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 1234 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1118 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 1235 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1119 return radeon_dp_mode_valid_helper(radeon_connector, mode); 1236
1120 else 1237 if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
1238 return MODE_PANEL;
1239
1240 if (encoder) {
1241 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1242 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
1243
1244 /* AVIVO hardware supports downscaling modes larger than the panel
1245 * to the panel size, but I'm not sure this is desirable.
1246 */
1247 if ((mode->hdisplay > native_mode->hdisplay) ||
1248 (mode->vdisplay > native_mode->vdisplay))
1249 return MODE_PANEL;
1250
1251 /* if scaling is disabled, block non-native modes */
1252 if (radeon_encoder->rmx_type == RMX_OFF) {
1253 if ((mode->hdisplay != native_mode->hdisplay) ||
1254 (mode->vdisplay != native_mode->vdisplay))
1255 return MODE_PANEL;
1256 }
1257 }
1121 return MODE_OK; 1258 return MODE_OK;
1259 } else {
1260 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1261 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
1262 return radeon_dp_mode_valid_helper(connector, mode);
1263 else
1264 return MODE_OK;
1265 }
1122} 1266}
1123 1267
1124struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { 1268struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
@@ -1151,8 +1295,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1151 struct drm_connector *connector; 1295 struct drm_connector *connector;
1152 struct radeon_connector *radeon_connector; 1296 struct radeon_connector *radeon_connector;
1153 struct radeon_connector_atom_dig *radeon_dig_connector; 1297 struct radeon_connector_atom_dig *radeon_dig_connector;
1298 struct drm_encoder *encoder;
1299 struct radeon_encoder *radeon_encoder;
1154 uint32_t subpixel_order = SubPixelNone; 1300 uint32_t subpixel_order = SubPixelNone;
1155 bool shared_ddc = false; 1301 bool shared_ddc = false;
1302 bool is_dp_bridge = false;
1156 1303
1157 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1304 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1158 return; 1305 return;
@@ -1184,6 +1331,21 @@ radeon_add_atom_connector(struct drm_device *dev,
1184 } 1331 }
1185 } 1332 }
1186 1333
1334 /* check if it's a dp bridge */
1335 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1336 radeon_encoder = to_radeon_encoder(encoder);
1337 if (radeon_encoder->devices & supported_device) {
1338 switch (radeon_encoder->encoder_id) {
1339 case ENCODER_OBJECT_ID_TRAVIS:
1340 case ENCODER_OBJECT_ID_NUTMEG:
1341 is_dp_bridge = true;
1342 break;
1343 default:
1344 break;
1345 }
1346 }
1347 }
1348
1187 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); 1349 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
1188 if (!radeon_connector) 1350 if (!radeon_connector)
1189 return; 1351 return;
@@ -1201,61 +1363,39 @@ radeon_add_atom_connector(struct drm_device *dev,
1201 if (!radeon_connector->router_bus) 1363 if (!radeon_connector->router_bus)
1202 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1364 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
1203 } 1365 }
1204 switch (connector_type) { 1366
1205 case DRM_MODE_CONNECTOR_VGA: 1367 if (is_dp_bridge) {
1206 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1207 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1208 if (i2c_bus->valid) {
1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1210 if (!radeon_connector->ddc_bus)
1211 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1212 }
1213 radeon_connector->dac_load_detect = true;
1214 drm_connector_attach_property(&radeon_connector->base,
1215 rdev->mode_info.load_detect_property,
1216 1);
1217 /* no HPD on analog connectors */
1218 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1219 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1220 connector->interlace_allowed = true;
1221 connector->doublescan_allowed = true;
1222 break;
1223 case DRM_MODE_CONNECTOR_DVIA:
1224 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1225 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1226 if (i2c_bus->valid) {
1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1228 if (!radeon_connector->ddc_bus)
1229 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1230 }
1231 radeon_connector->dac_load_detect = true;
1232 drm_connector_attach_property(&radeon_connector->base,
1233 rdev->mode_info.load_detect_property,
1234 1);
1235 /* no HPD on analog connectors */
1236 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1237 connector->interlace_allowed = true;
1238 connector->doublescan_allowed = true;
1239 break;
1240 case DRM_MODE_CONNECTOR_DVII:
1241 case DRM_MODE_CONNECTOR_DVID:
1242 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1368 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1243 if (!radeon_dig_connector) 1369 if (!radeon_dig_connector)
1244 goto failed; 1370 goto failed;
1245 radeon_dig_connector->igp_lane_info = igp_lane_info; 1371 radeon_dig_connector->igp_lane_info = igp_lane_info;
1246 radeon_connector->con_priv = radeon_dig_connector; 1372 radeon_connector->con_priv = radeon_dig_connector;
1247 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1373 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1248 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1374 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1249 if (i2c_bus->valid) { 1375 if (i2c_bus->valid) {
1376 /* add DP i2c bus */
1377 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1378 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1379 else
1380 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1381 if (!radeon_dig_connector->dp_i2c_bus)
1382 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1383 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1251 if (!radeon_connector->ddc_bus) 1384 if (!radeon_connector->ddc_bus)
1252 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1385 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1253 } 1386 }
1254 subpixel_order = SubPixelHorizontalRGB; 1387 switch (connector_type) {
1255 drm_connector_attach_property(&radeon_connector->base, 1388 case DRM_MODE_CONNECTOR_VGA:
1256 rdev->mode_info.coherent_mode_property, 1389 case DRM_MODE_CONNECTOR_DVIA:
1257 1); 1390 default:
1258 if (ASIC_IS_AVIVO(rdev)) { 1391 connector->interlace_allowed = true;
1392 connector->doublescan_allowed = true;
1393 break;
1394 case DRM_MODE_CONNECTOR_DVII:
1395 case DRM_MODE_CONNECTOR_DVID:
1396 case DRM_MODE_CONNECTOR_HDMIA:
1397 case DRM_MODE_CONNECTOR_HDMIB:
1398 case DRM_MODE_CONNECTOR_DisplayPort:
1259 drm_connector_attach_property(&radeon_connector->base, 1399 drm_connector_attach_property(&radeon_connector->base,
1260 rdev->mode_info.underscan_property, 1400 rdev->mode_info.underscan_property,
1261 UNDERSCAN_OFF); 1401 UNDERSCAN_OFF);
@@ -1265,131 +1405,234 @@ radeon_add_atom_connector(struct drm_device *dev,
1265 drm_connector_attach_property(&radeon_connector->base, 1405 drm_connector_attach_property(&radeon_connector->base,
1266 rdev->mode_info.underscan_vborder_property, 1406 rdev->mode_info.underscan_vborder_property,
1267 0); 1407 0);
1408 subpixel_order = SubPixelHorizontalRGB;
1409 connector->interlace_allowed = true;
1410 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1411 connector->doublescan_allowed = true;
1412 else
1413 connector->doublescan_allowed = false;
1414 break;
1415 case DRM_MODE_CONNECTOR_LVDS:
1416 case DRM_MODE_CONNECTOR_eDP:
1417 drm_connector_attach_property(&radeon_connector->base,
1418 dev->mode_config.scaling_mode_property,
1419 DRM_MODE_SCALE_FULLSCREEN);
1420 subpixel_order = SubPixelHorizontalRGB;
1421 connector->interlace_allowed = false;
1422 connector->doublescan_allowed = false;
1423 break;
1268 } 1424 }
1269 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1425 } else {
1426 switch (connector_type) {
1427 case DRM_MODE_CONNECTOR_VGA:
1428 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1429 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1430 if (i2c_bus->valid) {
1431 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1432 if (!radeon_connector->ddc_bus)
1433 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1434 }
1270 radeon_connector->dac_load_detect = true; 1435 radeon_connector->dac_load_detect = true;
1271 drm_connector_attach_property(&radeon_connector->base, 1436 drm_connector_attach_property(&radeon_connector->base,
1272 rdev->mode_info.load_detect_property, 1437 rdev->mode_info.load_detect_property,
1273 1); 1438 1);
1274 } 1439 /* no HPD on analog connectors */
1275 connector->interlace_allowed = true; 1440 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1276 if (connector_type == DRM_MODE_CONNECTOR_DVII) 1441 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1442 connector->interlace_allowed = true;
1277 connector->doublescan_allowed = true; 1443 connector->doublescan_allowed = true;
1278 else 1444 break;
1279 connector->doublescan_allowed = false; 1445 case DRM_MODE_CONNECTOR_DVIA:
1280 break; 1446 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1281 case DRM_MODE_CONNECTOR_HDMIA: 1447 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1282 case DRM_MODE_CONNECTOR_HDMIB: 1448 if (i2c_bus->valid) {
1283 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1449 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1284 if (!radeon_dig_connector) 1450 if (!radeon_connector->ddc_bus)
1285 goto failed; 1451 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1286 radeon_dig_connector->igp_lane_info = igp_lane_info; 1452 }
1287 radeon_connector->con_priv = radeon_dig_connector; 1453 radeon_connector->dac_load_detect = true;
1288 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1289 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1290 if (i2c_bus->valid) {
1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1292 if (!radeon_connector->ddc_bus)
1293 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1294 }
1295 drm_connector_attach_property(&radeon_connector->base,
1296 rdev->mode_info.coherent_mode_property,
1297 1);
1298 if (ASIC_IS_AVIVO(rdev)) {
1299 drm_connector_attach_property(&radeon_connector->base, 1454 drm_connector_attach_property(&radeon_connector->base,
1300 rdev->mode_info.underscan_property, 1455 rdev->mode_info.load_detect_property,
1301 UNDERSCAN_OFF); 1456 1);
1457 /* no HPD on analog connectors */
1458 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1459 connector->interlace_allowed = true;
1460 connector->doublescan_allowed = true;
1461 break;
1462 case DRM_MODE_CONNECTOR_DVII:
1463 case DRM_MODE_CONNECTOR_DVID:
1464 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1465 if (!radeon_dig_connector)
1466 goto failed;
1467 radeon_dig_connector->igp_lane_info = igp_lane_info;
1468 radeon_connector->con_priv = radeon_dig_connector;
1469 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1470 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1471 if (i2c_bus->valid) {
1472 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1473 if (!radeon_connector->ddc_bus)
1474 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1475 }
1476 subpixel_order = SubPixelHorizontalRGB;
1302 drm_connector_attach_property(&radeon_connector->base, 1477 drm_connector_attach_property(&radeon_connector->base,
1303 rdev->mode_info.underscan_hborder_property, 1478 rdev->mode_info.coherent_mode_property,
1304 0); 1479 1);
1480 if (ASIC_IS_AVIVO(rdev)) {
1481 drm_connector_attach_property(&radeon_connector->base,
1482 rdev->mode_info.underscan_property,
1483 UNDERSCAN_OFF);
1484 drm_connector_attach_property(&radeon_connector->base,
1485 rdev->mode_info.underscan_hborder_property,
1486 0);
1487 drm_connector_attach_property(&radeon_connector->base,
1488 rdev->mode_info.underscan_vborder_property,
1489 0);
1490 }
1491 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1492 radeon_connector->dac_load_detect = true;
1493 drm_connector_attach_property(&radeon_connector->base,
1494 rdev->mode_info.load_detect_property,
1495 1);
1496 }
1497 connector->interlace_allowed = true;
1498 if (connector_type == DRM_MODE_CONNECTOR_DVII)
1499 connector->doublescan_allowed = true;
1500 else
1501 connector->doublescan_allowed = false;
1502 break;
1503 case DRM_MODE_CONNECTOR_HDMIA:
1504 case DRM_MODE_CONNECTOR_HDMIB:
1505 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1506 if (!radeon_dig_connector)
1507 goto failed;
1508 radeon_dig_connector->igp_lane_info = igp_lane_info;
1509 radeon_connector->con_priv = radeon_dig_connector;
1510 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1511 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1512 if (i2c_bus->valid) {
1513 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1514 if (!radeon_connector->ddc_bus)
1515 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1516 }
1305 drm_connector_attach_property(&radeon_connector->base, 1517 drm_connector_attach_property(&radeon_connector->base,
1306 rdev->mode_info.underscan_vborder_property, 1518 rdev->mode_info.coherent_mode_property,
1307 0); 1519 1);
1308 } 1520 if (ASIC_IS_AVIVO(rdev)) {
1309 subpixel_order = SubPixelHorizontalRGB; 1521 drm_connector_attach_property(&radeon_connector->base,
1310 connector->interlace_allowed = true; 1522 rdev->mode_info.underscan_property,
1311 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1523 UNDERSCAN_OFF);
1312 connector->doublescan_allowed = true; 1524 drm_connector_attach_property(&radeon_connector->base,
1313 else 1525 rdev->mode_info.underscan_hborder_property,
1314 connector->doublescan_allowed = false; 1526 0);
1315 break; 1527 drm_connector_attach_property(&radeon_connector->base,
1316 case DRM_MODE_CONNECTOR_DisplayPort: 1528 rdev->mode_info.underscan_vborder_property,
1317 case DRM_MODE_CONNECTOR_eDP: 1529 0);
1318 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1530 }
1319 if (!radeon_dig_connector) 1531 subpixel_order = SubPixelHorizontalRGB;
1320 goto failed; 1532 connector->interlace_allowed = true;
1321 radeon_dig_connector->igp_lane_info = igp_lane_info; 1533 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1322 radeon_connector->con_priv = radeon_dig_connector; 1534 connector->doublescan_allowed = true;
1323 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1324 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1325 if (i2c_bus->valid) {
1326 /* add DP i2c bus */
1327 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1328 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1329 else 1535 else
1536 connector->doublescan_allowed = false;
1537 break;
1538 case DRM_MODE_CONNECTOR_DisplayPort:
1539 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1540 if (!radeon_dig_connector)
1541 goto failed;
1542 radeon_dig_connector->igp_lane_info = igp_lane_info;
1543 radeon_connector->con_priv = radeon_dig_connector;
1544 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1545 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1546 if (i2c_bus->valid) {
1547 /* add DP i2c bus */
1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1548 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1331 if (!radeon_dig_connector->dp_i2c_bus) 1549 if (!radeon_dig_connector->dp_i2c_bus)
1332 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1550 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1551 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1334 if (!radeon_connector->ddc_bus) 1552 if (!radeon_connector->ddc_bus)
1335 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1553 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1336 } 1554 }
1337 subpixel_order = SubPixelHorizontalRGB; 1555 subpixel_order = SubPixelHorizontalRGB;
1338 drm_connector_attach_property(&radeon_connector->base,
1339 rdev->mode_info.coherent_mode_property,
1340 1);
1341 if (ASIC_IS_AVIVO(rdev)) {
1342 drm_connector_attach_property(&radeon_connector->base, 1556 drm_connector_attach_property(&radeon_connector->base,
1343 rdev->mode_info.underscan_property, 1557 rdev->mode_info.coherent_mode_property,
1344 UNDERSCAN_OFF); 1558 1);
1559 if (ASIC_IS_AVIVO(rdev)) {
1560 drm_connector_attach_property(&radeon_connector->base,
1561 rdev->mode_info.underscan_property,
1562 UNDERSCAN_OFF);
1563 drm_connector_attach_property(&radeon_connector->base,
1564 rdev->mode_info.underscan_hborder_property,
1565 0);
1566 drm_connector_attach_property(&radeon_connector->base,
1567 rdev->mode_info.underscan_vborder_property,
1568 0);
1569 }
1570 connector->interlace_allowed = true;
1571 /* in theory with a DP to VGA converter... */
1572 connector->doublescan_allowed = false;
1573 break;
1574 case DRM_MODE_CONNECTOR_eDP:
1575 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1576 if (!radeon_dig_connector)
1577 goto failed;
1578 radeon_dig_connector->igp_lane_info = igp_lane_info;
1579 radeon_connector->con_priv = radeon_dig_connector;
1580 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1581 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1582 if (i2c_bus->valid) {
1583 /* add DP i2c bus */
1584 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1585 if (!radeon_dig_connector->dp_i2c_bus)
1586 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1587 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1588 if (!radeon_connector->ddc_bus)
1589 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1590 }
1345 drm_connector_attach_property(&radeon_connector->base, 1591 drm_connector_attach_property(&radeon_connector->base,
1346 rdev->mode_info.underscan_hborder_property, 1592 dev->mode_config.scaling_mode_property,
1347 0); 1593 DRM_MODE_SCALE_FULLSCREEN);
1594 subpixel_order = SubPixelHorizontalRGB;
1595 connector->interlace_allowed = false;
1596 connector->doublescan_allowed = false;
1597 break;
1598 case DRM_MODE_CONNECTOR_SVIDEO:
1599 case DRM_MODE_CONNECTOR_Composite:
1600 case DRM_MODE_CONNECTOR_9PinDIN:
1601 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1602 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1603 radeon_connector->dac_load_detect = true;
1348 drm_connector_attach_property(&radeon_connector->base, 1604 drm_connector_attach_property(&radeon_connector->base,
1349 rdev->mode_info.underscan_vborder_property, 1605 rdev->mode_info.load_detect_property,
1350 0); 1606 1);
1351 } 1607 drm_connector_attach_property(&radeon_connector->base,
1352 connector->interlace_allowed = true; 1608 rdev->mode_info.tv_std_property,
1353 /* in theory with a DP to VGA converter... */ 1609 radeon_atombios_get_tv_info(rdev));
1354 connector->doublescan_allowed = false; 1610 /* no HPD on analog connectors */
1355 break; 1611 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1356 case DRM_MODE_CONNECTOR_SVIDEO: 1612 connector->interlace_allowed = false;
1357 case DRM_MODE_CONNECTOR_Composite: 1613 connector->doublescan_allowed = false;
1358 case DRM_MODE_CONNECTOR_9PinDIN: 1614 break;
1359 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1615 case DRM_MODE_CONNECTOR_LVDS:
1360 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1616 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1361 radeon_connector->dac_load_detect = true; 1617 if (!radeon_dig_connector)
1362 drm_connector_attach_property(&radeon_connector->base, 1618 goto failed;
1363 rdev->mode_info.load_detect_property, 1619 radeon_dig_connector->igp_lane_info = igp_lane_info;
1364 1); 1620 radeon_connector->con_priv = radeon_dig_connector;
1365 drm_connector_attach_property(&radeon_connector->base, 1621 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1366 rdev->mode_info.tv_std_property, 1622 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1367 radeon_atombios_get_tv_info(rdev)); 1623 if (i2c_bus->valid) {
1368 /* no HPD on analog connectors */ 1624 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1369 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1625 if (!radeon_connector->ddc_bus)
1370 connector->interlace_allowed = false; 1626 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1371 connector->doublescan_allowed = false; 1627 }
1372 break; 1628 drm_connector_attach_property(&radeon_connector->base,
1373 case DRM_MODE_CONNECTOR_LVDS: 1629 dev->mode_config.scaling_mode_property,
1374 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1630 DRM_MODE_SCALE_FULLSCREEN);
1375 if (!radeon_dig_connector) 1631 subpixel_order = SubPixelHorizontalRGB;
1376 goto failed; 1632 connector->interlace_allowed = false;
1377 radeon_dig_connector->igp_lane_info = igp_lane_info; 1633 connector->doublescan_allowed = false;
1378 radeon_connector->con_priv = radeon_dig_connector; 1634 break;
1379 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1380 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1381 if (i2c_bus->valid) {
1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1383 if (!radeon_connector->ddc_bus)
1384 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1385 } 1635 }
1386 drm_connector_attach_property(&radeon_connector->base,
1387 dev->mode_config.scaling_mode_property,
1388 DRM_MODE_SCALE_FULLSCREEN);
1389 subpixel_order = SubPixelHorizontalRGB;
1390 connector->interlace_allowed = false;
1391 connector->doublescan_allowed = false;
1392 break;
1393 } 1636 }
1394 1637
1395 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { 1638 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 890217e678d3..5b61364e31f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -923,6 +923,9 @@ int radeon_resume_kms(struct drm_device *dev)
923 radeon_fbdev_set_suspend(rdev, 0); 923 radeon_fbdev_set_suspend(rdev, 0);
924 console_unlock(); 924 console_unlock();
925 925
926 /* init dig PHYs */
927 if (rdev->is_atom_bios)
928 radeon_atom_encoder_init(rdev);
926 /* reset hpd state */ 929 /* reset hpd state */
927 radeon_hpd_init(rdev); 930 radeon_hpd_init(rdev);
928 /* blat the mode back in */ 931 /* blat the mode back in */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bdbab5c43bdc..ae247eec87c0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1087,8 +1087,9 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
1087 *frac_fb_div_p = best_frac_feedback_div; 1087 *frac_fb_div_p = best_frac_feedback_div;
1088 *ref_div_p = best_ref_div; 1088 *ref_div_p = best_ref_div;
1089 *post_div_p = best_post_div; 1089 *post_div_p = best_post_div;
1090 DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1090 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1091 freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1091 (long long)freq,
1092 best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1092 best_ref_div, best_post_div); 1093 best_ref_div, best_post_div);
1093 1094
1094} 1095}
@@ -1344,6 +1345,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
1344 if (!ret) { 1345 if (!ret) {
1345 return ret; 1346 return ret;
1346 } 1347 }
1348
1349 /* init dig PHYs */
1350 if (rdev->is_atom_bios)
1351 radeon_atom_encoder_init(rdev);
1352
1347 /* initialize hpd */ 1353 /* initialize hpd */
1348 radeon_hpd_init(rdev); 1354 radeon_hpd_init(rdev);
1349 1355
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 63d2de8771dc..1d330606292f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -50,9 +50,10 @@
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
53 * 2.10.0 - fusion 2D tiling
53 */ 54 */
54#define KMS_DRIVER_MAJOR 2 55#define KMS_DRIVER_MAJOR 2
55#define KMS_DRIVER_MINOR 9 56#define KMS_DRIVER_MINOR 10
56#define KMS_DRIVER_PATCHLEVEL 0 57#define KMS_DRIVER_PATCHLEVEL 0
57int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 58int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
58int radeon_driver_unload_kms(struct drm_device *dev); 59int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b4274883227f..1b557554696e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -229,6 +229,22 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
229 return NULL; 229 return NULL;
230} 230}
231 231
232static struct drm_connector *
233radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
234{
235 struct drm_device *dev = encoder->dev;
236 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
237 struct drm_connector *connector;
238 struct radeon_connector *radeon_connector;
239
240 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
241 radeon_connector = to_radeon_connector(connector);
242 if (radeon_encoder->devices & radeon_connector->devices)
243 return connector;
244 }
245 return NULL;
246}
247
232struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) 248struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
233{ 249{
234 struct drm_device *dev = encoder->dev; 250 struct drm_device *dev = encoder->dev;
@@ -250,6 +266,25 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder
250 return NULL; 266 return NULL;
251} 267}
252 268
269bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
270{
271 struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
272
273 if (other_encoder) {
274 struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
275
276 switch (radeon_encoder->encoder_id) {
277 case ENCODER_OBJECT_ID_TRAVIS:
278 case ENCODER_OBJECT_ID_NUTMEG:
279 return true;
280 default:
281 return false;
282 }
283 }
284
285 return false;
286}
287
253void radeon_panel_mode_fixup(struct drm_encoder *encoder, 288void radeon_panel_mode_fixup(struct drm_encoder *encoder,
254 struct drm_display_mode *adjusted_mode) 289 struct drm_display_mode *adjusted_mode)
255{ 290{
@@ -621,6 +656,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
621 struct radeon_connector *radeon_connector; 656 struct radeon_connector *radeon_connector;
622 struct radeon_connector_atom_dig *dig_connector; 657 struct radeon_connector_atom_dig *dig_connector;
623 658
659 /* dp bridges are always DP */
660 if (radeon_encoder_is_dp_bridge(encoder))
661 return ATOM_ENCODER_MODE_DP;
662
624 connector = radeon_get_connector_for_encoder(encoder); 663 connector = radeon_get_connector_for_encoder(encoder);
625 if (!connector) { 664 if (!connector) {
626 switch (radeon_encoder->encoder_id) { 665 switch (radeon_encoder->encoder_id) {
@@ -668,7 +707,6 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
668 return ATOM_ENCODER_MODE_LVDS; 707 return ATOM_ENCODER_MODE_LVDS;
669 break; 708 break;
670 case DRM_MODE_CONNECTOR_DisplayPort: 709 case DRM_MODE_CONNECTOR_DisplayPort:
671 case DRM_MODE_CONNECTOR_eDP:
672 dig_connector = radeon_connector->con_priv; 710 dig_connector = radeon_connector->con_priv;
673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 711 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 712 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
@@ -682,6 +720,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
682 } else 720 } else
683 return ATOM_ENCODER_MODE_DVI; 721 return ATOM_ENCODER_MODE_DVI;
684 break; 722 break;
723 case DRM_MODE_CONNECTOR_eDP:
724 return ATOM_ENCODER_MODE_DP;
685 case DRM_MODE_CONNECTOR_DVIA: 725 case DRM_MODE_CONNECTOR_DVIA:
686 case DRM_MODE_CONNECTOR_VGA: 726 case DRM_MODE_CONNECTOR_VGA:
687 return ATOM_ENCODER_MODE_CRT; 727 return ATOM_ENCODER_MODE_CRT;
@@ -747,7 +787,7 @@ union dig_encoder_control {
747}; 787};
748 788
749void 789void
750atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 790atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
751{ 791{
752 struct drm_device *dev = encoder->dev; 792 struct drm_device *dev = encoder->dev;
753 struct radeon_device *rdev = dev->dev_private; 793 struct radeon_device *rdev = dev->dev_private;
@@ -760,6 +800,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
760 int dp_clock = 0; 800 int dp_clock = 0;
761 int dp_lane_count = 0; 801 int dp_lane_count = 0;
762 int hpd_id = RADEON_HPD_NONE; 802 int hpd_id = RADEON_HPD_NONE;
803 int bpc = 8;
763 804
764 if (connector) { 805 if (connector) {
765 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 806 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -769,6 +810,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
769 dp_clock = dig_connector->dp_clock; 810 dp_clock = dig_connector->dp_clock;
770 dp_lane_count = dig_connector->dp_lane_count; 811 dp_lane_count = dig_connector->dp_lane_count;
771 hpd_id = radeon_connector->hpd.hpd; 812 hpd_id = radeon_connector->hpd.hpd;
813 bpc = connector->display_info.bpc;
772 } 814 }
773 815
774 /* no dig encoder assigned */ 816 /* no dig encoder assigned */
@@ -791,7 +833,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
791 833
792 args.v1.ucAction = action; 834 args.v1.ucAction = action;
793 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 835 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
794 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 836 if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
837 args.v3.ucPanelMode = panel_mode;
838 else
839 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
795 840
796 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || 841 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
797 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) 842 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
@@ -810,7 +855,27 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
810 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 855 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
811 } 856 }
812 args.v4.acConfig.ucDigSel = dig->dig_encoder; 857 args.v4.acConfig.ucDigSel = dig->dig_encoder;
813 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; 858 switch (bpc) {
859 case 0:
860 args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
861 break;
862 case 6:
863 args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
864 break;
865 case 8:
866 default:
867 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
868 break;
869 case 10:
870 args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
871 break;
872 case 12:
873 args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
874 break;
875 case 16:
876 args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
877 break;
878 }
814 if (hpd_id == RADEON_HPD_NONE) 879 if (hpd_id == RADEON_HPD_NONE)
815 args.v4.ucHPD_ID = 0; 880 args.v4.ucHPD_ID = 0;
816 else 881 else
@@ -819,7 +884,27 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
819 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) 884 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
820 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; 885 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
821 args.v3.acConfig.ucDigSel = dig->dig_encoder; 886 args.v3.acConfig.ucDigSel = dig->dig_encoder;
822 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; 887 switch (bpc) {
888 case 0:
889 args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
890 break;
891 case 6:
892 args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
893 break;
894 case 8:
895 default:
896 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
897 break;
898 case 10:
899 args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
900 break;
901 case 12:
902 args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
903 break;
904 case 16:
905 args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
906 break;
907 }
823 } else { 908 } else {
824 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) 909 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
825 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; 910 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
@@ -859,7 +944,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
859 struct radeon_device *rdev = dev->dev_private; 944 struct radeon_device *rdev = dev->dev_private;
860 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 945 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
861 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 946 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
862 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 947 struct drm_connector *connector;
863 union dig_transmitter_control args; 948 union dig_transmitter_control args;
864 int index = 0; 949 int index = 0;
865 uint8_t frev, crev; 950 uint8_t frev, crev;
@@ -870,6 +955,11 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
870 int connector_object_id = 0; 955 int connector_object_id = 0;
871 int igp_lane_info = 0; 956 int igp_lane_info = 0;
872 957
958 if (action == ATOM_TRANSMITTER_ACTION_INIT)
959 connector = radeon_get_connector_for_encoder_init(encoder);
960 else
961 connector = radeon_get_connector_for_encoder(encoder);
962
873 if (connector) { 963 if (connector) {
874 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 964 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
875 struct radeon_connector_atom_dig *dig_connector = 965 struct radeon_connector_atom_dig *dig_connector =
@@ -931,10 +1021,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
931 else 1021 else
932 args.v3.ucLaneNum = 4; 1022 args.v3.ucLaneNum = 4;
933 1023
934 if (dig->linkb) { 1024 if (dig->linkb)
935 args.v3.acConfig.ucLinkSel = 1; 1025 args.v3.acConfig.ucLinkSel = 1;
1026 if (dig->dig_encoder & 1)
936 args.v3.acConfig.ucEncoderSel = 1; 1027 args.v3.acConfig.ucEncoderSel = 1;
937 }
938 1028
939 /* Select the PLL for the PHY 1029 /* Select the PLL for the PHY
940 * DP PHY should be clocked from external src if there is 1030 * DP PHY should be clocked from external src if there is
@@ -946,11 +1036,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
946 } 1036 }
947 1037
948 if (ASIC_IS_DCE5(rdev)) { 1038 if (ASIC_IS_DCE5(rdev)) {
949 if (is_dp && rdev->clock.dp_extclk) 1039 /* On DCE5 DCPLL usually generates the DP ref clock */
950 args.v4.acConfig.ucRefClkSource = 3; /* external src */ 1040 if (is_dp) {
951 else 1041 if (rdev->clock.dp_extclk)
1042 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
1043 else
1044 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
1045 } else
952 args.v4.acConfig.ucRefClkSource = pll_id; 1046 args.v4.acConfig.ucRefClkSource = pll_id;
953 } else { 1047 } else {
1048 /* On DCE4, if there is an external clock, it generates the DP ref clock */
954 if (is_dp && rdev->clock.dp_extclk) 1049 if (is_dp && rdev->clock.dp_extclk)
955 args.v3.acConfig.ucRefClkSource = 2; /* external src */ 1050 args.v3.acConfig.ucRefClkSource = 2; /* external src */
956 else 1051 else
@@ -1047,7 +1142,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1047 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1142 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1048} 1143}
1049 1144
1050void 1145bool
1051atombios_set_edp_panel_power(struct drm_connector *connector, int action) 1146atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1052{ 1147{
1053 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1148 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1058,23 +1153,37 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1058 uint8_t frev, crev; 1153 uint8_t frev, crev;
1059 1154
1060 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) 1155 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
1061 return; 1156 goto done;
1062 1157
1063 if (!ASIC_IS_DCE4(rdev)) 1158 if (!ASIC_IS_DCE4(rdev))
1064 return; 1159 goto done;
1065 1160
1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && 1161 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 1162 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1068 return; 1163 goto done;
1069 1164
1070 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 1165 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1071 return; 1166 goto done;
1072 1167
1073 memset(&args, 0, sizeof(args)); 1168 memset(&args, 0, sizeof(args));
1074 1169
1075 args.v1.ucAction = action; 1170 args.v1.ucAction = action;
1076 1171
1077 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1172 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1173
1174 /* wait for the panel to power up */
1175 if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
1176 int i;
1177
1178 for (i = 0; i < 300; i++) {
1179 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1180 return true;
1181 mdelay(1);
1182 }
1183 return false;
1184 }
1185done:
1186 return true;
1078} 1187}
1079 1188
1080union external_encoder_control { 1189union external_encoder_control {
@@ -1092,13 +1201,19 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1092 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1201 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1093 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); 1202 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
1094 union external_encoder_control args; 1203 union external_encoder_control args;
1095 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1204 struct drm_connector *connector;
1096 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); 1205 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
1097 u8 frev, crev; 1206 u8 frev, crev;
1098 int dp_clock = 0; 1207 int dp_clock = 0;
1099 int dp_lane_count = 0; 1208 int dp_lane_count = 0;
1100 int connector_object_id = 0; 1209 int connector_object_id = 0;
1101 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 1210 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1211 int bpc = 8;
1212
1213 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1214 connector = radeon_get_connector_for_encoder_init(encoder);
1215 else
1216 connector = radeon_get_connector_for_encoder(encoder);
1102 1217
1103 if (connector) { 1218 if (connector) {
1104 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1219 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1109,6 +1224,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1109 dp_lane_count = dig_connector->dp_lane_count; 1224 dp_lane_count = dig_connector->dp_lane_count;
1110 connector_object_id = 1225 connector_object_id =
1111 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1226 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1227 bpc = connector->display_info.bpc;
1112 } 1228 }
1113 1229
1114 memset(&args, 0, sizeof(args)); 1230 memset(&args, 0, sizeof(args));
@@ -1166,7 +1282,27 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1166 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; 1282 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1167 break; 1283 break;
1168 } 1284 }
1169 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; 1285 switch (bpc) {
1286 case 0:
1287 args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
1288 break;
1289 case 6:
1290 args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
1291 break;
1292 case 8:
1293 default:
1294 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1295 break;
1296 case 10:
1297 args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
1298 break;
1299 case 12:
1300 args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
1301 break;
1302 case 16:
1303 args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
1304 break;
1305 }
1170 break; 1306 break;
1171 default: 1307 default:
1172 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1308 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
@@ -1307,9 +1443,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1307 ATOM_TRANSMITTER_ACTION_POWER_ON); 1443 ATOM_TRANSMITTER_ACTION_POWER_ON);
1308 radeon_dig_connector->edp_on = true; 1444 radeon_dig_connector->edp_on = true;
1309 } 1445 }
1310 dp_link_train(encoder, connector);
1311 if (ASIC_IS_DCE4(rdev)) 1446 if (ASIC_IS_DCE4(rdev))
1312 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1447 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1448 radeon_dp_link_train(encoder, connector);
1449 if (ASIC_IS_DCE4(rdev))
1450 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1313 } 1451 }
1314 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1452 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1315 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); 1453 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
@@ -1322,7 +1460,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1322 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1460 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1323 1461
1324 if (ASIC_IS_DCE4(rdev)) 1462 if (ASIC_IS_DCE4(rdev))
1325 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1463 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1326 if (connector && 1464 if (connector &&
1327 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 1465 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1328 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1466 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1601,12 +1739,9 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1601 /* DCE4/5 */ 1739 /* DCE4/5 */
1602 if (ASIC_IS_DCE4(rdev)) { 1740 if (ASIC_IS_DCE4(rdev)) {
1603 dig = radeon_encoder->enc_priv; 1741 dig = radeon_encoder->enc_priv;
1604 if (ASIC_IS_DCE41(rdev)) { 1742 if (ASIC_IS_DCE41(rdev))
1605 if (dig->linkb) 1743 return radeon_crtc->crtc_id;
1606 return 1; 1744 else {
1607 else
1608 return 0;
1609 } else {
1610 switch (radeon_encoder->encoder_id) { 1745 switch (radeon_encoder->encoder_id) {
1611 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1746 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1612 if (dig->linkb) 1747 if (dig->linkb)
@@ -1662,6 +1797,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1662 return 1; 1797 return 1;
1663} 1798}
1664 1799
1800/* This only needs to be called once at startup */
1801void
1802radeon_atom_encoder_init(struct radeon_device *rdev)
1803{
1804 struct drm_device *dev = rdev->ddev;
1805 struct drm_encoder *encoder;
1806
1807 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1808 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1809 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1810
1811 switch (radeon_encoder->encoder_id) {
1812 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1813 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1814 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1815 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1816 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1817 break;
1818 default:
1819 break;
1820 }
1821
1822 if (ext_encoder && ASIC_IS_DCE41(rdev))
1823 atombios_external_encoder_setup(encoder, ext_encoder,
1824 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1825 }
1826}
1827
1665static void 1828static void
1666radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1829radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1667 struct drm_display_mode *mode, 1830 struct drm_display_mode *mode,
@@ -1696,19 +1859,17 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1696 /* disable the transmitter */ 1859 /* disable the transmitter */
1697 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1860 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1698 /* setup and enable the encoder */ 1861 /* setup and enable the encoder */
1699 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP); 1862 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1700 1863
1701 /* init and enable the transmitter */ 1864 /* enable the transmitter */
1702 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1703 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1865 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1704 } else { 1866 } else {
1705 /* disable the encoder and transmitter */ 1867 /* disable the encoder and transmitter */
1706 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1868 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1707 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1869 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1708 1870
1709 /* setup and enable the encoder and transmitter */ 1871 /* setup and enable the encoder and transmitter */
1710 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1872 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1711 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1712 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1873 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1713 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1874 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1714 } 1875 }
@@ -1733,12 +1894,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1733 } 1894 }
1734 1895
1735 if (ext_encoder) { 1896 if (ext_encoder) {
1736 if (ASIC_IS_DCE41(rdev)) { 1897 if (ASIC_IS_DCE41(rdev))
1737 atombios_external_encoder_setup(encoder, ext_encoder,
1738 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1739 atombios_external_encoder_setup(encoder, ext_encoder, 1898 atombios_external_encoder_setup(encoder, ext_encoder,
1740 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); 1899 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1741 } else 1900 else
1742 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1901 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1743 } 1902 }
1744 1903
@@ -1845,8 +2004,9 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1845 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2004 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1846 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 2005 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1847 2006
1848 if (radeon_encoder->active_device & 2007 if ((radeon_encoder->active_device &
1849 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 2008 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2009 radeon_encoder_is_dp_bridge(encoder)) {
1850 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 2010 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1851 if (dig) 2011 if (dig)
1852 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); 2012 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
@@ -1855,11 +2015,17 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1855 radeon_atom_output_lock(encoder, true); 2015 radeon_atom_output_lock(encoder, true);
1856 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 2016 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1857 2017
1858 /* select the clock/data port if it uses a router */
1859 if (connector) { 2018 if (connector) {
1860 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 2019 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2020
2021 /* select the clock/data port if it uses a router */
1861 if (radeon_connector->router.cd_valid) 2022 if (radeon_connector->router.cd_valid)
1862 radeon_router_select_cd_port(radeon_connector); 2023 radeon_router_select_cd_port(radeon_connector);
2024
2025 /* turn eDP panel on for mode set */
2026 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2027 atombios_set_edp_panel_power(connector,
2028 ATOM_TRANSMITTER_ACTION_POWER_ON);
1863 } 2029 }
1864 2030
1865 /* this is needed for the pll/ss setup to work correctly in some cases */ 2031 /* this is needed for the pll/ss setup to work correctly in some cases */
@@ -1914,7 +2080,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1914 else { 2080 else {
1915 /* disable the encoder and transmitter */ 2081 /* disable the encoder and transmitter */
1916 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 2082 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1917 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 2083 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1918 } 2084 }
1919 break; 2085 break;
1920 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2086 case ENCODER_OBJECT_ID_INTERNAL_DDI:
@@ -2116,8 +2282,6 @@ radeon_add_atom_encoder(struct drm_device *dev,
2116 } else { 2282 } else {
2117 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2283 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2118 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2284 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2119 if (ASIC_IS_AVIVO(rdev))
2120 radeon_encoder->underscan_type = UNDERSCAN_AUTO;
2121 } 2285 }
2122 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2286 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2123 break; 2287 break;
@@ -2150,8 +2314,6 @@ radeon_add_atom_encoder(struct drm_device *dev,
2150 } else { 2314 } else {
2151 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2315 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2152 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2316 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2153 if (ASIC_IS_AVIVO(rdev))
2154 radeon_encoder->underscan_type = UNDERSCAN_AUTO;
2155 } 2317 }
2156 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2318 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2157 break; 2319 break;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 983cbac75af0..781196db792f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -888,6 +888,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
888 888
889 i2c->rec = *rec; 889 i2c->rec = *rec;
890 i2c->adapter.owner = THIS_MODULE; 890 i2c->adapter.owner = THIS_MODULE;
891 i2c->adapter.class = I2C_CLASS_DDC;
891 i2c->dev = dev; 892 i2c->dev = dev;
892 i2c_set_adapdata(&i2c->adapter, i2c); 893 i2c_set_adapdata(&i2c->adapter, i2c);
893 if (rec->mm_i2c || 894 if (rec->mm_i2c ||
@@ -947,6 +948,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
947 948
948 i2c->rec = *rec; 949 i2c->rec = *rec;
949 i2c->adapter.owner = THIS_MODULE; 950 i2c->adapter.owner = THIS_MODULE;
951 i2c->adapter.class = I2C_CLASS_DDC;
950 i2c->dev = dev; 952 i2c->dev = dev;
951 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 953 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
952 "Radeon aux bus %s", name); 954 "Radeon aux bus %s", name);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9c57538231d5..977a341266b6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -464,22 +464,27 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
464extern struct drm_connector * 464extern struct drm_connector *
465radeon_get_connector_for_encoder(struct drm_encoder *encoder); 465radeon_get_connector_for_encoder(struct drm_encoder *encoder);
466 466
467extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
468extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
469extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
470extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
471
467extern void radeon_connector_hotplug(struct drm_connector *connector); 472extern void radeon_connector_hotplug(struct drm_connector *connector);
468extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); 473extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
469extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
470 struct drm_display_mode *mode); 474 struct drm_display_mode *mode);
471extern void radeon_dp_set_link_config(struct drm_connector *connector, 475extern void radeon_dp_set_link_config(struct drm_connector *connector,
472 struct drm_display_mode *mode); 476 struct drm_display_mode *mode);
473extern void dp_link_train(struct drm_encoder *encoder, 477extern void radeon_dp_link_train(struct drm_encoder *encoder,
474 struct drm_connector *connector); 478 struct drm_connector *connector);
475extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 479extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
476extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 480extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
477extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action); 481extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
482extern void radeon_atom_encoder_init(struct radeon_device *rdev);
478extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 483extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
479 int action, uint8_t lane_num, 484 int action, uint8_t lane_num,
480 uint8_t lane_set); 485 uint8_t lane_set);
481extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 486extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
482 uint8_t write_byte, uint8_t *read_byte); 487 u8 write_byte, u8 *read_byte);
483 488
484extern void radeon_i2c_init(struct radeon_device *rdev); 489extern void radeon_i2c_init(struct radeon_device *rdev);
485extern void radeon_i2c_fini(struct radeon_device *rdev); 490extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -545,7 +550,7 @@ struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, i
545extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); 550extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
546extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 551extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
547extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 552extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
548extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); 553extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
549extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 554extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
550 555
551extern void radeon_crtc_load_lut(struct drm_crtc *crtc); 556extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 498b284e5ef9..58434e804d91 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,7 +215,6 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
215/* stage one happens before delay */ 215/* stage one happens before delay */
216static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) 216static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
217{ 217{
218 int ret;
219 int i; 218 int i;
220 struct vga_switcheroo_client *active = NULL; 219 struct vga_switcheroo_client *active = NULL;
221 220
@@ -228,11 +227,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
228 if (!active) 227 if (!active)
229 return 0; 228 return 0;
230 229
231 /* power up the first device */
232 ret = pci_enable_device(new_client->pdev);
233 if (ret)
234 return ret;
235
236 if (new_client->pwr_state == VGA_SWITCHEROO_OFF) 230 if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
237 vga_switchon(new_client); 231 vga_switchon(new_client);
238 232
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index be8d4cb5861c..8a1021f2e319 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -61,7 +61,7 @@ struct vga_device {
61 unsigned int mem_lock_cnt; /* legacy MEM lock count */ 61 unsigned int mem_lock_cnt; /* legacy MEM lock count */
62 unsigned int io_norm_cnt; /* normal IO count */ 62 unsigned int io_norm_cnt; /* normal IO count */
63 unsigned int mem_norm_cnt; /* normal MEM count */ 63 unsigned int mem_norm_cnt; /* normal MEM count */
64 64 bool bridge_has_one_vga;
65 /* allow IRQ enable/disable hook */ 65 /* allow IRQ enable/disable hook */
66 void *cookie; 66 void *cookie;
67 void (*irq_set_state)(void *cookie, bool enable); 67 void (*irq_set_state)(void *cookie, bool enable);
@@ -165,6 +165,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
165 unsigned int wants, legacy_wants, match; 165 unsigned int wants, legacy_wants, match;
166 struct vga_device *conflict; 166 struct vga_device *conflict;
167 unsigned int pci_bits; 167 unsigned int pci_bits;
168 u32 flags = 0;
169
168 /* Account for "normal" resources to lock. If we decode the legacy, 170 /* Account for "normal" resources to lock. If we decode the legacy,
169 * counterpart, we need to request it as well 171 * counterpart, we need to request it as well
170 */ 172 */
@@ -237,16 +239,23 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
237 /* looks like he doesn't have a lock, we can steal 239 /* looks like he doesn't have a lock, we can steal
238 * them from him 240 * them from him
239 */ 241 */
240 vga_irq_set_state(conflict, false);
241 242
243 flags = 0;
242 pci_bits = 0; 244 pci_bits = 0;
243 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
244 pci_bits |= PCI_COMMAND_MEMORY;
245 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
246 pci_bits |= PCI_COMMAND_IO;
247 245
248 pci_set_vga_state(conflict->pdev, false, pci_bits, 246 if (!conflict->bridge_has_one_vga) {
249 change_bridge); 247 vga_irq_set_state(conflict, false);
248 flags |= PCI_VGA_STATE_CHANGE_DECODES;
249 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
250 pci_bits |= PCI_COMMAND_MEMORY;
251 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
252 pci_bits |= PCI_COMMAND_IO;
253 }
254
255 if (change_bridge)
256 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
257
258 pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
250 conflict->owns &= ~lwants; 259 conflict->owns &= ~lwants;
251 /* If he also owned non-legacy, that is no longer the case */ 260 /* If he also owned non-legacy, that is no longer the case */
252 if (lwants & VGA_RSRC_LEGACY_MEM) 261 if (lwants & VGA_RSRC_LEGACY_MEM)
@@ -261,14 +270,24 @@ enable_them:
261 * also have in "decodes". We can lock resources we don't decode but 270 * also have in "decodes". We can lock resources we don't decode but
262 * not own them. 271 * not own them.
263 */ 272 */
273 flags = 0;
264 pci_bits = 0; 274 pci_bits = 0;
265 if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
266 pci_bits |= PCI_COMMAND_MEMORY;
267 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
268 pci_bits |= PCI_COMMAND_IO;
269 pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK));
270 275
271 vga_irq_set_state(vgadev, true); 276 if (!vgadev->bridge_has_one_vga) {
277 flags |= PCI_VGA_STATE_CHANGE_DECODES;
278 if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
279 pci_bits |= PCI_COMMAND_MEMORY;
280 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
281 pci_bits |= PCI_COMMAND_IO;
282 }
283 if (!!(wants & VGA_RSRC_LEGACY_MASK))
284 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
285
286 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
287
288 if (!vgadev->bridge_has_one_vga) {
289 vga_irq_set_state(vgadev, true);
290 }
272 vgadev->owns |= (wants & vgadev->decodes); 291 vgadev->owns |= (wants & vgadev->decodes);
273lock_them: 292lock_them:
274 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); 293 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -421,6 +440,62 @@ bail:
421} 440}
422EXPORT_SYMBOL(vga_put); 441EXPORT_SYMBOL(vga_put);
423 442
443/* Rules for using a bridge to control a VGA descendant decoding:
444 if a bridge has only one VGA descendant then it can be used
445 to control the VGA routing for that device.
446 It should always use the bridge closest to the device to control it.
447 If a bridge has a direct VGA descendant, but also have a sub-bridge
448 VGA descendant then we cannot use that bridge to control the direct VGA descendant.
449 So for every device we register, we need to iterate all its parent bridges
450 so we can invalidate any devices using them properly.
451*/
452static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
453{
454 struct vga_device *same_bridge_vgadev;
455 struct pci_bus *new_bus, *bus;
456 struct pci_dev *new_bridge, *bridge;
457
458 vgadev->bridge_has_one_vga = true;
459
460 if (list_empty(&vga_list))
461 return;
462
463 /* okay iterate the new devices bridge hierarachy */
464 new_bus = vgadev->pdev->bus;
465 while (new_bus) {
466 new_bridge = new_bus->self;
467
468 if (new_bridge) {
469 /* go through list of devices already registered */
470 list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
471 bus = same_bridge_vgadev->pdev->bus;
472 bridge = bus->self;
473
474 /* see if the share a bridge with this device */
475 if (new_bridge == bridge) {
476 /* if their direct parent bridge is the same
477 as any bridge of this device then it can't be used
478 for that device */
479 same_bridge_vgadev->bridge_has_one_vga = false;
480 }
481
482 /* now iterate the previous devices bridge hierarchy */
483 /* if the new devices parent bridge is in the other devices
484 hierarchy then we can't use it to control this device */
485 while (bus) {
486 bridge = bus->self;
487 if (bridge) {
488 if (bridge == vgadev->pdev->bus->self)
489 vgadev->bridge_has_one_vga = false;
490 }
491 bus = bus->parent;
492 }
493 }
494 }
495 new_bus = new_bus->parent;
496 }
497}
498
424/* 499/*
425 * Currently, we assume that the "initial" setup of the system is 500 * Currently, we assume that the "initial" setup of the system is
426 * not sane, that is we come up with conflicting devices and let 501 * not sane, that is we come up with conflicting devices and let
@@ -500,6 +575,8 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
500 vga_default = pci_dev_get(pdev); 575 vga_default = pci_dev_get(pdev);
501#endif 576#endif
502 577
578 vga_arbiter_check_bridge_sharing(vgadev);
579
503 /* Add to the list */ 580 /* Add to the list */
504 list_add(&vgadev->list, &vga_list); 581 list_add(&vgadev->list, &vga_list);
505 vga_count++; 582 vga_count++;
@@ -1222,6 +1299,7 @@ static int __init vga_arb_device_init(void)
1222{ 1299{
1223 int rc; 1300 int rc;
1224 struct pci_dev *pdev; 1301 struct pci_dev *pdev;
1302 struct vga_device *vgadev;
1225 1303
1226 rc = misc_register(&vga_arb_device); 1304 rc = misc_register(&vga_arb_device);
1227 if (rc < 0) 1305 if (rc < 0)
@@ -1238,6 +1316,13 @@ static int __init vga_arb_device_init(void)
1238 vga_arbiter_add_pci_device(pdev); 1316 vga_arbiter_add_pci_device(pdev);
1239 1317
1240 pr_info("vgaarb: loaded\n"); 1318 pr_info("vgaarb: loaded\n");
1319
1320 list_for_each_entry(vgadev, &vga_list, list) {
1321 if (vgadev->bridge_has_one_vga)
1322 pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
1323 else
1324 pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
1325 }
1241 return rc; 1326 return rc;
1242} 1327}
1243subsys_initcall(vga_arb_device_init); 1328subsys_initcall(vga_arb_device_init);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 326652f673f7..646068e5100b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,6 +79,7 @@ config I2C_AMD8111
79config I2C_I801 79config I2C_I801
80 tristate "Intel 82801 (ICH/PCH)" 80 tristate "Intel 82801 (ICH/PCH)"
81 depends on PCI 81 depends on PCI
82 select CHECK_SIGNATURE if X86 && DMI
82 help 83 help
83 If you say yes to this option, support will be included for the Intel 84 If you say yes to this option, support will be included for the Intel
84 801 family of mainboard I2C interfaces. Specifically, the following 85 801 family of mainboard I2C interfaces. Specifically, the following
@@ -101,6 +102,7 @@ config I2C_I801
101 6 Series (PCH) 102 6 Series (PCH)
102 Patsburg (PCH) 103 Patsburg (PCH)
103 DH89xxCC (PCH) 104 DH89xxCC (PCH)
105 Panther Point (PCH)
104 106
105 This driver can also be built as a module. If so, the module 107 This driver can also be built as a module. If so, the module
106 will be called i2c-i801. 108 will be called i2c-i801.
@@ -671,15 +673,19 @@ config I2C_XILINX
671 will be called xilinx_i2c. 673 will be called xilinx_i2c.
672 674
673config I2C_EG20T 675config I2C_EG20T
674 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH" 676 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
675 depends on PCI 677 depends on PCI
676 help 678 help
677 This driver is for PCH(Platform controller Hub) I2C of EG20T which 679 This driver is for PCH(Platform controller Hub) I2C of EG20T which
678 is an IOH(Input/Output Hub) for x86 embedded processor. 680 is an IOH(Input/Output Hub) for x86 embedded processor.
679 This driver can access PCH I2C bus device. 681 This driver can access PCH I2C bus device.
680 682
681 This driver also supports the ML7213, a companion chip for the 683 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
682 Atom E6xx series and compatible with the Intel EG20T PCH. 684 Output Hub), ML7213 and ML7223.
685 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
686 for MP(Media Phone) use.
687 ML7213/ML7223 is companion chip for Intel Atom E6xx series.
688 ML7213/ML7223 is completely compatible for Intel EG20T PCH.
683 689
684comment "External I2C/SMBus adapter drivers" 690comment "External I2C/SMBus adapter drivers"
685 691
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 878a12026af2..8abfa4a03ce1 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -182,10 +182,12 @@ static DEFINE_MUTEX(pch_mutex);
182/* Definition for ML7213 by OKI SEMICONDUCTOR */ 182/* Definition for ML7213 by OKI SEMICONDUCTOR */
183#define PCI_VENDOR_ID_ROHM 0x10DB 183#define PCI_VENDOR_ID_ROHM 0x10DB
184#define PCI_DEVICE_ID_ML7213_I2C 0x802D 184#define PCI_DEVICE_ID_ML7213_I2C 0x802D
185#define PCI_DEVICE_ID_ML7223_I2C 0x8010
185 186
186static struct pci_device_id __devinitdata pch_pcidev_id[] = { 187static struct pci_device_id __devinitdata pch_pcidev_id[] = {
187 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, }, 188 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
188 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, }, 189 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
190 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
189 {0,} 191 {0,}
190}; 192};
191 193
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ec36208c9977..ab26840d0c70 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -50,6 +50,7 @@
50 Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes 50 Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes
51 Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes 51 Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
52 DH89xxCC (PCH) 0x2330 32 hard yes yes yes 52 DH89xxCC (PCH) 0x2330 32 hard yes yes yes
53 Panther Point (PCH) 0x1e22 32 hard yes yes yes
53 54
54 Features supported by this driver: 55 Features supported by this driver:
55 Software PEC no 56 Software PEC no
@@ -137,11 +138,11 @@
137/* Older devices have their ID defined in <linux/pci_ids.h> */ 138/* Older devices have their ID defined in <linux/pci_ids.h> */
138#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 139#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
139#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 140#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
140#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
141/* Patsburg also has three 'Integrated Device Function' SMBus controllers */ 141/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
142#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 142#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
143#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 143#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
144#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 144#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
145#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
145#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 146#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
146#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 147#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
147 148
@@ -159,6 +160,8 @@ static struct pci_driver i801_driver;
159#define FEATURE_BLOCK_BUFFER (1 << 1) 160#define FEATURE_BLOCK_BUFFER (1 << 1)
160#define FEATURE_BLOCK_PROC (1 << 2) 161#define FEATURE_BLOCK_PROC (1 << 2)
161#define FEATURE_I2C_BLOCK_READ (1 << 3) 162#define FEATURE_I2C_BLOCK_READ (1 << 3)
163/* Not really a feature, but it's convenient to handle it as such */
164#define FEATURE_IDF (1 << 15)
162 165
163static const char *i801_feature_names[] = { 166static const char *i801_feature_names[] = {
164 "SMBus PEC", 167 "SMBus PEC",
@@ -629,12 +632,13 @@ static const struct pci_device_id i801_ids[] = {
629 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) }, 632 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
630 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) }, 633 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
631 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) }, 634 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
635 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
632 { 0, } 636 { 0, }
633}; 637};
634 638
635MODULE_DEVICE_TABLE(pci, i801_ids); 639MODULE_DEVICE_TABLE(pci, i801_ids);
636 640
637#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE 641#if defined CONFIG_X86 && defined CONFIG_DMI
638static unsigned char apanel_addr; 642static unsigned char apanel_addr;
639 643
640/* Scan the system ROM for the signature "FJKEYINF" */ 644/* Scan the system ROM for the signature "FJKEYINF" */
@@ -664,11 +668,7 @@ static void __init input_apanel_init(void)
664 } 668 }
665 iounmap(bios); 669 iounmap(bios);
666} 670}
667#else
668static void __init input_apanel_init(void) {}
669#endif
670 671
671#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
672struct dmi_onboard_device_info { 672struct dmi_onboard_device_info {
673 const char *name; 673 const char *name;
674 u8 type; 674 u8 type;
@@ -734,7 +734,30 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
734 dmi_check_onboard_device(type, name, adap); 734 dmi_check_onboard_device(type, name, adap);
735 } 735 }
736} 736}
737#endif 737
738/* Register optional slaves */
739static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
740{
741 /* Only register slaves on main SMBus channel */
742 if (priv->features & FEATURE_IDF)
743 return;
744
745 if (apanel_addr) {
746 struct i2c_board_info info;
747
748 memset(&info, 0, sizeof(struct i2c_board_info));
749 info.addr = apanel_addr;
750 strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
751 i2c_new_device(&priv->adapter, &info);
752 }
753
754 if (dmi_name_in_vendors("FUJITSU"))
755 dmi_walk(dmi_check_onboard_devices, &priv->adapter);
756}
757#else
758static void __init input_apanel_init(void) {}
759static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
760#endif /* CONFIG_X86 && CONFIG_DMI */
738 761
739static int __devinit i801_probe(struct pci_dev *dev, 762static int __devinit i801_probe(struct pci_dev *dev,
740 const struct pci_device_id *id) 763 const struct pci_device_id *id)
@@ -754,6 +777,11 @@ static int __devinit i801_probe(struct pci_dev *dev,
754 777
755 priv->pci_dev = dev; 778 priv->pci_dev = dev;
756 switch (dev->device) { 779 switch (dev->device) {
780 case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
781 case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
782 case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
783 priv->features |= FEATURE_IDF;
784 /* fall through */
757 default: 785 default:
758 priv->features |= FEATURE_I2C_BLOCK_READ; 786 priv->features |= FEATURE_I2C_BLOCK_READ;
759 /* fall through */ 787 /* fall through */
@@ -839,21 +867,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
839 goto exit_release; 867 goto exit_release;
840 } 868 }
841 869
842 /* Register optional slaves */ 870 i801_probe_optional_slaves(priv);
843#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
844 if (apanel_addr) {
845 struct i2c_board_info info;
846
847 memset(&info, 0, sizeof(struct i2c_board_info));
848 info.addr = apanel_addr;
849 strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
850 i2c_new_device(&priv->adapter, &info);
851 }
852#endif
853#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
854 if (dmi_name_in_vendors("FUJITSU"))
855 dmi_walk(dmi_check_onboard_devices, &priv->adapter);
856#endif
857 871
858 pci_set_drvdata(dev, priv); 872 pci_set_drvdata(dev, priv);
859 return 0; 873 return 0;
@@ -913,7 +927,8 @@ static struct pci_driver i801_driver = {
913 927
914static int __init i2c_i801_init(void) 928static int __init i2c_i801_init(void)
915{ 929{
916 input_apanel_init(); 930 if (dmi_name_in_vendors("FUJITSU"))
931 input_apanel_init();
917 return pci_register_driver(&i801_driver); 932 return pci_register_driver(&i801_driver);
918} 933}
919 934
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index e10e5cf3751a..0c731ca69f15 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -15,13 +15,14 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/delay.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/err.h> 21#include <linux/err.h>
23#include <linux/clk.h> 22#include <linux/clk.h>
24#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <plat/i2c.h> 27#include <plat/i2c.h>
27 28
@@ -103,9 +104,6 @@
103/* maximum threshold value */ 104/* maximum threshold value */
104#define MAX_I2C_FIFO_THRESHOLD 15 105#define MAX_I2C_FIFO_THRESHOLD 15
105 106
106/* per-transfer delay, required for the hardware to stabilize */
107#define I2C_DELAY 150
108
109enum i2c_status { 107enum i2c_status {
110 I2C_NOP, 108 I2C_NOP,
111 I2C_ON_GOING, 109 I2C_ON_GOING,
@@ -120,9 +118,6 @@ enum i2c_operation {
120 I2C_READ = 0x01 118 I2C_READ = 0x01
121}; 119};
122 120
123/* controller response timeout in ms */
124#define I2C_TIMEOUT_MS 2000
125
126/** 121/**
127 * struct i2c_nmk_client - client specific data 122 * struct i2c_nmk_client - client specific data
128 * @slave_adr: 7-bit slave address 123 * @slave_adr: 7-bit slave address
@@ -151,6 +146,7 @@ struct i2c_nmk_client {
151 * @stop: stop condition 146 * @stop: stop condition
152 * @xfer_complete: acknowledge completion for a I2C message 147 * @xfer_complete: acknowledge completion for a I2C message
153 * @result: controller propogated result 148 * @result: controller propogated result
149 * @busy: Busy doing transfer
154 */ 150 */
155struct nmk_i2c_dev { 151struct nmk_i2c_dev {
156 struct platform_device *pdev; 152 struct platform_device *pdev;
@@ -163,6 +159,8 @@ struct nmk_i2c_dev {
163 int stop; 159 int stop;
164 struct completion xfer_complete; 160 struct completion xfer_complete;
165 int result; 161 int result;
162 struct regulator *regulator;
163 bool busy;
166}; 164};
167 165
168/* controller's abort causes */ 166/* controller's abort causes */
@@ -209,7 +207,7 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
209 writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR); 207 writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR);
210 208
211 for (i = 0; i < LOOP_ATTEMPTS; i++) { 209 for (i = 0; i < LOOP_ATTEMPTS; i++) {
212 timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT_MS); 210 timeout = jiffies + dev->adap.timeout;
213 211
214 while (!time_after(jiffies, timeout)) { 212 while (!time_after(jiffies, timeout)) {
215 if ((readl(dev->virtbase + I2C_CR) & 213 if ((readl(dev->virtbase + I2C_CR) &
@@ -253,11 +251,9 @@ static int init_hw(struct nmk_i2c_dev *dev)
253{ 251{
254 int stat; 252 int stat;
255 253
256 clk_enable(dev->clk);
257
258 stat = flush_i2c_fifo(dev); 254 stat = flush_i2c_fifo(dev);
259 if (stat) 255 if (stat)
260 return stat; 256 goto exit;
261 257
262 /* disable the controller */ 258 /* disable the controller */
263 i2c_clr_bit(dev->virtbase + I2C_CR , I2C_CR_PE); 259 i2c_clr_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
@@ -268,10 +264,8 @@ static int init_hw(struct nmk_i2c_dev *dev)
268 264
269 dev->cli.operation = I2C_NO_OPERATION; 265 dev->cli.operation = I2C_NO_OPERATION;
270 266
271 clk_disable(dev->clk); 267exit:
272 268 return stat;
273 udelay(I2C_DELAY);
274 return 0;
275} 269}
276 270
277/* enable peripheral, master mode operation */ 271/* enable peripheral, master mode operation */
@@ -424,7 +418,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
424 dev->virtbase + I2C_IMSCR); 418 dev->virtbase + I2C_IMSCR);
425 419
426 timeout = wait_for_completion_interruptible_timeout( 420 timeout = wait_for_completion_interruptible_timeout(
427 &dev->xfer_complete, msecs_to_jiffies(I2C_TIMEOUT_MS)); 421 &dev->xfer_complete, dev->adap.timeout);
428 422
429 if (timeout < 0) { 423 if (timeout < 0) {
430 dev_err(&dev->pdev->dev, 424 dev_err(&dev->pdev->dev,
@@ -434,14 +428,32 @@ static int read_i2c(struct nmk_i2c_dev *dev)
434 } 428 }
435 429
436 if (timeout == 0) { 430 if (timeout == 0) {
437 /* controller has timedout, re-init the h/w */ 431 /* Controller timed out */
438 dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n"); 432 dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n",
439 (void) init_hw(dev); 433 dev->cli.slave_adr);
440 status = -ETIMEDOUT; 434 status = -ETIMEDOUT;
441 } 435 }
442 return status; 436 return status;
443} 437}
444 438
439static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
440{
441 int count;
442
443 for (count = (no_bytes - 2);
444 (count > 0) &&
445 (dev->cli.count != 0);
446 count--) {
447 /* write to the Tx FIFO */
448 writeb(*dev->cli.buffer,
449 dev->virtbase + I2C_TFR);
450 dev->cli.buffer++;
451 dev->cli.count--;
452 dev->cli.xfer_bytes++;
453 }
454
455}
456
445/** 457/**
446 * write_i2c() - Write data to I2C client. 458 * write_i2c() - Write data to I2C client.
447 * @dev: private data of I2C Driver 459 * @dev: private data of I2C Driver
@@ -469,8 +481,13 @@ static int write_i2c(struct nmk_i2c_dev *dev)
469 init_completion(&dev->xfer_complete); 481 init_completion(&dev->xfer_complete);
470 482
471 /* enable interrupts by settings the masks */ 483 /* enable interrupts by settings the masks */
472 irq_mask = (I2C_IT_TXFNE | I2C_IT_TXFOVR | 484 irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR);
473 I2C_IT_MAL | I2C_IT_BERR); 485
486 /* Fill the TX FIFO with transmit data */
487 fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD);
488
489 if (dev->cli.count != 0)
490 irq_mask |= I2C_IT_TXFNE;
474 491
475 /* 492 /*
476 * check if we want to transfer a single or multiple bytes, if so 493 * check if we want to transfer a single or multiple bytes, if so
@@ -488,7 +505,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
488 dev->virtbase + I2C_IMSCR); 505 dev->virtbase + I2C_IMSCR);
489 506
490 timeout = wait_for_completion_interruptible_timeout( 507 timeout = wait_for_completion_interruptible_timeout(
491 &dev->xfer_complete, msecs_to_jiffies(I2C_TIMEOUT_MS)); 508 &dev->xfer_complete, dev->adap.timeout);
492 509
493 if (timeout < 0) { 510 if (timeout < 0) {
494 dev_err(&dev->pdev->dev, 511 dev_err(&dev->pdev->dev,
@@ -498,9 +515,9 @@ static int write_i2c(struct nmk_i2c_dev *dev)
498 } 515 }
499 516
500 if (timeout == 0) { 517 if (timeout == 0) {
501 /* controller has timedout, re-init the h/w */ 518 /* Controller timed out */
502 dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n"); 519 dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n",
503 (void) init_hw(dev); 520 dev->cli.slave_adr);
504 status = -ETIMEDOUT; 521 status = -ETIMEDOUT;
505 } 522 }
506 523
@@ -508,6 +525,51 @@ static int write_i2c(struct nmk_i2c_dev *dev)
508} 525}
509 526
510/** 527/**
528 * nmk_i2c_xfer_one() - transmit a single I2C message
529 * @dev: device with a message encoded into it
530 * @flags: message flags
531 */
532static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
533{
534 int status;
535
536 if (flags & I2C_M_RD) {
537 /* read operation */
538 dev->cli.operation = I2C_READ;
539 status = read_i2c(dev);
540 } else {
541 /* write operation */
542 dev->cli.operation = I2C_WRITE;
543 status = write_i2c(dev);
544 }
545
546 if (status || (dev->result)) {
547 u32 i2c_sr;
548 u32 cause;
549
550 i2c_sr = readl(dev->virtbase + I2C_SR);
551 /*
552 * Check if the controller I2C operation status
553 * is set to ABORT(11b).
554 */
555 if (((i2c_sr >> 2) & 0x3) == 0x3) {
556 /* get the abort cause */
557 cause = (i2c_sr >> 4) & 0x7;
558 dev_err(&dev->pdev->dev, "%s\n", cause
559 >= ARRAY_SIZE(abort_causes) ?
560 "unknown reason" :
561 abort_causes[cause]);
562 }
563
564 (void) init_hw(dev);
565
566 status = status ? status : dev->result;
567 }
568
569 return status;
570}
571
572/**
511 * nmk_i2c_xfer() - I2C transfer function used by kernel framework 573 * nmk_i2c_xfer() - I2C transfer function used by kernel framework
512 * @i2c_adap: Adapter pointer to the controller 574 * @i2c_adap: Adapter pointer to the controller
513 * @msgs: Pointer to data to be written. 575 * @msgs: Pointer to data to be written.
@@ -559,53 +621,55 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
559{ 621{
560 int status; 622 int status;
561 int i; 623 int i;
562 u32 cause;
563 struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap); 624 struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap);
625 int j;
626
627 dev->busy = true;
628
629 if (dev->regulator)
630 regulator_enable(dev->regulator);
631 pm_runtime_get_sync(&dev->pdev->dev);
632
633 clk_enable(dev->clk);
564 634
565 status = init_hw(dev); 635 status = init_hw(dev);
566 if (status) 636 if (status)
567 return status; 637 goto out;
568 638
569 clk_enable(dev->clk); 639 /* Attempt three times to send the message queue */
640 for (j = 0; j < 3; j++) {
641 /* setup the i2c controller */
642 setup_i2c_controller(dev);
570 643
571 /* setup the i2c controller */ 644 for (i = 0; i < num_msgs; i++) {
572 setup_i2c_controller(dev); 645 if (unlikely(msgs[i].flags & I2C_M_TEN)) {
646 dev_err(&dev->pdev->dev, "10 bit addressing"
647 "not supported\n");
573 648
574 for (i = 0; i < num_msgs; i++) { 649 status = -EINVAL;
575 if (unlikely(msgs[i].flags & I2C_M_TEN)) { 650 goto out;
576 dev_err(&dev->pdev->dev, "10 bit addressing" 651 }
577 "not supported\n"); 652 dev->cli.slave_adr = msgs[i].addr;
578 return -EINVAL; 653 dev->cli.buffer = msgs[i].buf;
579 } 654 dev->cli.count = msgs[i].len;
580 dev->cli.slave_adr = msgs[i].addr; 655 dev->stop = (i < (num_msgs - 1)) ? 0 : 1;
581 dev->cli.buffer = msgs[i].buf; 656 dev->result = 0;
582 dev->cli.count = msgs[i].len; 657
583 dev->stop = (i < (num_msgs - 1)) ? 0 : 1; 658 status = nmk_i2c_xfer_one(dev, msgs[i].flags);
584 dev->result = 0; 659 if (status != 0)
585 660 break;
586 if (msgs[i].flags & I2C_M_RD) {
587 /* it is a read operation */
588 dev->cli.operation = I2C_READ;
589 status = read_i2c(dev);
590 } else {
591 /* write operation */
592 dev->cli.operation = I2C_WRITE;
593 status = write_i2c(dev);
594 }
595 if (status || (dev->result)) {
596 /* get the abort cause */
597 cause = (readl(dev->virtbase + I2C_SR) >> 4) & 0x7;
598 dev_err(&dev->pdev->dev, "error during I2C"
599 "message xfer: %d\n", cause);
600 dev_err(&dev->pdev->dev, "%s\n",
601 cause >= ARRAY_SIZE(abort_causes)
602 ? "unknown reason" : abort_causes[cause]);
603 clk_disable(dev->clk);
604 return status;
605 } 661 }
606 udelay(I2C_DELAY); 662 if (status == 0)
663 break;
607 } 664 }
665
666out:
608 clk_disable(dev->clk); 667 clk_disable(dev->clk);
668 pm_runtime_put_sync(&dev->pdev->dev);
669 if (dev->regulator)
670 regulator_disable(dev->regulator);
671
672 dev->busy = false;
609 673
610 /* return the no. messages processed */ 674 /* return the no. messages processed */
611 if (status) 675 if (status)
@@ -666,17 +730,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
666 */ 730 */
667 disable_interrupts(dev, I2C_IT_TXFNE); 731 disable_interrupts(dev, I2C_IT_TXFNE);
668 } else { 732 } else {
669 for (count = (MAX_I2C_FIFO_THRESHOLD - tft - 2); 733 fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft));
670 (count > 0) &&
671 (dev->cli.count != 0);
672 count--) {
673 /* write to the Tx FIFO */
674 writeb(*dev->cli.buffer,
675 dev->virtbase + I2C_TFR);
676 dev->cli.buffer++;
677 dev->cli.count--;
678 dev->cli.xfer_bytes++;
679 }
680 /* 734 /*
681 * if done, close the transfer by disabling the 735 * if done, close the transfer by disabling the
682 * corresponding TXFNE interrupt 736 * corresponding TXFNE interrupt
@@ -729,16 +783,11 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
729 } 783 }
730 } 784 }
731 785
732 i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MTD); 786 disable_all_interrupts(dev);
733 i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MTDWS); 787 clear_all_interrupts(dev);
734
735 disable_interrupts(dev,
736 (I2C_IT_TXFNE | I2C_IT_TXFE | I2C_IT_TXFF
737 | I2C_IT_TXFOVR | I2C_IT_RXFNF
738 | I2C_IT_RXFF | I2C_IT_RXFE));
739 788
740 if (dev->cli.count) { 789 if (dev->cli.count) {
741 dev->result = -1; 790 dev->result = -EIO;
742 dev_err(&dev->pdev->dev, "%lu bytes still remain to be" 791 dev_err(&dev->pdev->dev, "%lu bytes still remain to be"
743 "xfered\n", dev->cli.count); 792 "xfered\n", dev->cli.count);
744 (void) init_hw(dev); 793 (void) init_hw(dev);
@@ -749,7 +798,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
749 798
750 /* Master Arbitration lost interrupt */ 799 /* Master Arbitration lost interrupt */
751 case I2C_IT_MAL: 800 case I2C_IT_MAL:
752 dev->result = -1; 801 dev->result = -EIO;
753 (void) init_hw(dev); 802 (void) init_hw(dev);
754 803
755 i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL); 804 i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL);
@@ -763,7 +812,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
763 * during the transaction. 812 * during the transaction.
764 */ 813 */
765 case I2C_IT_BERR: 814 case I2C_IT_BERR:
766 dev->result = -1; 815 dev->result = -EIO;
767 /* get the status */ 816 /* get the status */
768 if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT) 817 if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT)
769 (void) init_hw(dev); 818 (void) init_hw(dev);
@@ -779,7 +828,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
779 * the Tx FIFO is full. 828 * the Tx FIFO is full.
780 */ 829 */
781 case I2C_IT_TXFOVR: 830 case I2C_IT_TXFOVR:
782 dev->result = -1; 831 dev->result = -EIO;
783 (void) init_hw(dev); 832 (void) init_hw(dev);
784 833
785 dev_err(&dev->pdev->dev, "Tx Fifo Over run\n"); 834 dev_err(&dev->pdev->dev, "Tx Fifo Over run\n");
@@ -805,6 +854,38 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
805 return IRQ_HANDLED; 854 return IRQ_HANDLED;
806} 855}
807 856
857
858#ifdef CONFIG_PM
859static int nmk_i2c_suspend(struct device *dev)
860{
861 struct platform_device *pdev = to_platform_device(dev);
862 struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
863
864 if (nmk_i2c->busy)
865 return -EBUSY;
866
867 return 0;
868}
869
870static int nmk_i2c_resume(struct device *dev)
871{
872 return 0;
873}
874#else
875#define nmk_i2c_suspend NULL
876#define nmk_i2c_resume NULL
877#endif
878
879/*
880 * We use noirq so that we suspend late and resume before the wakeup interrupt
881 * to ensure that we do the !pm_runtime_suspended() check in resume before
882 * there has been a regular pm runtime resume (via pm_runtime_get_sync()).
883 */
884static const struct dev_pm_ops nmk_i2c_pm = {
885 .suspend_noirq = nmk_i2c_suspend,
886 .resume_noirq = nmk_i2c_resume,
887};
888
808static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap) 889static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
809{ 890{
810 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 891 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
@@ -830,7 +911,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
830 ret = -ENOMEM; 911 ret = -ENOMEM;
831 goto err_no_mem; 912 goto err_no_mem;
832 } 913 }
833 914 dev->busy = false;
834 dev->pdev = pdev; 915 dev->pdev = pdev;
835 platform_set_drvdata(pdev, dev); 916 platform_set_drvdata(pdev, dev);
836 917
@@ -860,6 +941,15 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
860 goto err_irq; 941 goto err_irq;
861 } 942 }
862 943
944 dev->regulator = regulator_get(&pdev->dev, "v-i2c");
945 if (IS_ERR(dev->regulator)) {
946 dev_warn(&pdev->dev, "could not get i2c regulator\n");
947 dev->regulator = NULL;
948 }
949
950 pm_suspend_ignore_children(&pdev->dev, true);
951 pm_runtime_enable(&pdev->dev);
952
863 dev->clk = clk_get(&pdev->dev, NULL); 953 dev->clk = clk_get(&pdev->dev, NULL);
864 if (IS_ERR(dev->clk)) { 954 if (IS_ERR(dev->clk)) {
865 dev_err(&pdev->dev, "could not get i2c clock\n"); 955 dev_err(&pdev->dev, "could not get i2c clock\n");
@@ -872,6 +962,8 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
872 adap->owner = THIS_MODULE; 962 adap->owner = THIS_MODULE;
873 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 963 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
874 adap->algo = &nmk_i2c_algo; 964 adap->algo = &nmk_i2c_algo;
965 adap->timeout = pdata->timeout ? msecs_to_jiffies(pdata->timeout) :
966 msecs_to_jiffies(20000);
875 snprintf(adap->name, sizeof(adap->name), 967 snprintf(adap->name, sizeof(adap->name),
876 "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start); 968 "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
877 969
@@ -887,12 +979,6 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
887 979
888 i2c_set_adapdata(adap, dev); 980 i2c_set_adapdata(adap, dev);
889 981
890 ret = init_hw(dev);
891 if (ret != 0) {
892 dev_err(&pdev->dev, "error in initializing i2c hardware\n");
893 goto err_init_hw;
894 }
895
896 dev_info(&pdev->dev, "initialize %s on virtual " 982 dev_info(&pdev->dev, "initialize %s on virtual "
897 "base %p\n", adap->name, dev->virtbase); 983 "base %p\n", adap->name, dev->virtbase);
898 984
@@ -904,10 +990,12 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
904 990
905 return 0; 991 return 0;
906 992
907 err_init_hw:
908 err_add_adap: 993 err_add_adap:
909 clk_put(dev->clk); 994 clk_put(dev->clk);
910 err_no_clk: 995 err_no_clk:
996 if (dev->regulator)
997 regulator_put(dev->regulator);
998 pm_runtime_disable(&pdev->dev);
911 free_irq(dev->irq, dev); 999 free_irq(dev->irq, dev);
912 err_irq: 1000 err_irq:
913 iounmap(dev->virtbase); 1001 iounmap(dev->virtbase);
@@ -938,6 +1026,9 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev)
938 if (res) 1026 if (res)
939 release_mem_region(res->start, resource_size(res)); 1027 release_mem_region(res->start, resource_size(res));
940 clk_put(dev->clk); 1028 clk_put(dev->clk);
1029 if (dev->regulator)
1030 regulator_put(dev->regulator);
1031 pm_runtime_disable(&pdev->dev);
941 platform_set_drvdata(pdev, NULL); 1032 platform_set_drvdata(pdev, NULL);
942 kfree(dev); 1033 kfree(dev);
943 1034
@@ -948,6 +1039,7 @@ static struct platform_driver nmk_i2c_driver = {
948 .driver = { 1039 .driver = {
949 .owner = THIS_MODULE, 1040 .owner = THIS_MODULE,
950 .name = DRIVER_NAME, 1041 .name = DRIVER_NAME,
1042 .pm = &nmk_i2c_pm,
951 }, 1043 },
952 .probe = nmk_i2c_probe, 1044 .probe = nmk_i2c_probe,
953 .remove = __devexit_p(nmk_i2c_remove), 1045 .remove = __devexit_p(nmk_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index fc5fbd1012c9..4b95f7a63a3b 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -2,13 +2,13 @@
2 * i2c-parport-light.c I2C bus over parallel port * 2 * i2c-parport-light.c I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 Based on older i2c-velleman.c driver 6 Based on older i2c-velleman.c driver
7 Copyright (C) 1995-2000 Simon G. Vogl 7 Copyright (C) 1995-2000 Simon G. Vogl
8 With some changes from: 8 With some changes from:
9 Frodo Looijaard <frodol@dds.nl> 9 Frodo Looijaard <frodol@dds.nl>
10 Kyösti Mälkki <kmalkki@cc.hut.fi> 10 Kyösti Mälkki <kmalkki@cc.hut.fi>
11 11
12 This program is free software; you can redistribute it and/or modify 12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by 13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or 14 the Free Software Foundation; either version 2 of the License, or
@@ -114,7 +114,7 @@ static struct i2c_algo_bit_data parport_algo_data = {
114 .getscl = parport_getscl, 114 .getscl = parport_getscl,
115 .udelay = 50, 115 .udelay = 50,
116 .timeout = HZ, 116 .timeout = HZ,
117}; 117};
118 118
119/* ----- Driver registration ---------------------------------------------- */ 119/* ----- Driver registration ---------------------------------------------- */
120 120
@@ -132,7 +132,7 @@ static struct i2c_smbus_alert_setup alert_data = {
132static struct i2c_client *ara; 132static struct i2c_client *ara;
133static struct lineop parport_ctrl_irq = { 133static struct lineop parport_ctrl_irq = {
134 .val = (1 << 4), 134 .val = (1 << 4),
135 .port = CTRL, 135 .port = PORT_CTRL,
136}; 136};
137 137
138static int __devinit i2c_parport_probe(struct platform_device *pdev) 138static int __devinit i2c_parport_probe(struct platform_device *pdev)
@@ -245,7 +245,7 @@ static int __init i2c_parport_init(void)
245 if (irq != 0) 245 if (irq != 0)
246 pr_info(DRVNAME ": using irq %d\n", irq); 246 pr_info(DRVNAME ": using irq %d\n", irq);
247 247
248 if (!adapter_parm[type].getscl.val) 248 if (!adapter_parm[type].getscl.val)
249 parport_algo_data.getscl = NULL; 249 parport_algo_data.getscl = NULL;
250 250
251 /* Sets global pdev as a side effect */ 251 /* Sets global pdev as a side effect */
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 2dbba163b102..24565687ac9b 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -2,13 +2,13 @@
2 * i2c-parport.c I2C bus over parallel port * 2 * i2c-parport.c I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
5 5
6 Based on older i2c-philips-par.c driver 6 Based on older i2c-philips-par.c driver
7 Copyright (C) 1995-2000 Simon G. Vogl 7 Copyright (C) 1995-2000 Simon G. Vogl
8 With some changes from: 8 With some changes from:
9 Frodo Looijaard <frodol@dds.nl> 9 Frodo Looijaard <frodol@dds.nl>
10 Kyösti Mälkki <kmalkki@cc.hut.fi> 10 Kyösti Mälkki <kmalkki@cc.hut.fi>
11 11
12 This program is free software; you can redistribute it and/or modify 12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by 13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or 14 the Free Software Foundation; either version 2 of the License, or
@@ -78,13 +78,13 @@ static unsigned char port_read_control(struct parport *p)
78 return parport_read_control(p); 78 return parport_read_control(p);
79} 79}
80 80
81static void (*port_write[])(struct parport *, unsigned char) = { 81static void (* const port_write[])(struct parport *, unsigned char) = {
82 port_write_data, 82 port_write_data,
83 NULL, 83 NULL,
84 port_write_control, 84 port_write_control,
85}; 85};
86 86
87static unsigned char (*port_read[])(struct parport *) = { 87static unsigned char (* const port_read[])(struct parport *) = {
88 port_read_data, 88 port_read_data,
89 port_read_status, 89 port_read_status,
90 port_read_control, 90 port_read_control,
@@ -147,7 +147,7 @@ static const struct i2c_algo_bit_data parport_algo_data = {
147 .getscl = parport_getscl, 147 .getscl = parport_getscl,
148 .udelay = 10, /* ~50 kbps */ 148 .udelay = 10, /* ~50 kbps */
149 .timeout = HZ, 149 .timeout = HZ,
150}; 150};
151 151
152/* ----- I2c and parallel port call-back functions and structures --------- */ 152/* ----- I2c and parallel port call-back functions and structures --------- */
153 153
@@ -164,10 +164,10 @@ void i2c_parport_irq(void *data)
164 "SMBus alert received but no ARA client!\n"); 164 "SMBus alert received but no ARA client!\n");
165} 165}
166 166
167static void i2c_parport_attach (struct parport *port) 167static void i2c_parport_attach(struct parport *port)
168{ 168{
169 struct i2c_par *adapter; 169 struct i2c_par *adapter;
170 170
171 adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL); 171 adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL);
172 if (adapter == NULL) { 172 if (adapter == NULL) {
173 printk(KERN_ERR "i2c-parport: Failed to kzalloc\n"); 173 printk(KERN_ERR "i2c-parport: Failed to kzalloc\n");
@@ -180,7 +180,7 @@ static void i2c_parport_attach (struct parport *port)
180 NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter); 180 NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
181 if (!adapter->pdev) { 181 if (!adapter->pdev) {
182 printk(KERN_ERR "i2c-parport: Unable to register with parport\n"); 182 printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
183 goto ERROR0; 183 goto err_free;
184 } 184 }
185 185
186 /* Fill the rest of the structure */ 186 /* Fill the rest of the structure */
@@ -200,7 +200,7 @@ static void i2c_parport_attach (struct parport *port)
200 200
201 if (parport_claim_or_block(adapter->pdev) < 0) { 201 if (parport_claim_or_block(adapter->pdev) < 0) {
202 printk(KERN_ERR "i2c-parport: Could not claim parallel port\n"); 202 printk(KERN_ERR "i2c-parport: Could not claim parallel port\n");
203 goto ERROR1; 203 goto err_unregister;
204 } 204 }
205 205
206 /* Reset hardware to a sane state (SCL and SDA high) */ 206 /* Reset hardware to a sane state (SCL and SDA high) */
@@ -215,7 +215,7 @@ static void i2c_parport_attach (struct parport *port)
215 215
216 if (i2c_bit_add_bus(&adapter->adapter) < 0) { 216 if (i2c_bit_add_bus(&adapter->adapter) < 0) {
217 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n"); 217 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
218 goto ERROR1; 218 goto err_unregister;
219 } 219 }
220 220
221 /* Setup SMBus alert if supported */ 221 /* Setup SMBus alert if supported */
@@ -234,16 +234,16 @@ static void i2c_parport_attach (struct parport *port)
234 mutex_lock(&adapter_list_lock); 234 mutex_lock(&adapter_list_lock);
235 list_add_tail(&adapter->node, &adapter_list); 235 list_add_tail(&adapter->node, &adapter_list);
236 mutex_unlock(&adapter_list_lock); 236 mutex_unlock(&adapter_list_lock);
237 return; 237 return;
238 238
239ERROR1: 239 err_unregister:
240 parport_release(adapter->pdev); 240 parport_release(adapter->pdev);
241 parport_unregister_device(adapter->pdev); 241 parport_unregister_device(adapter->pdev);
242ERROR0: 242 err_free:
243 kfree(adapter); 243 kfree(adapter);
244} 244}
245 245
246static void i2c_parport_detach (struct parport *port) 246static void i2c_parport_detach(struct parport *port)
247{ 247{
248 struct i2c_par *adapter, *_n; 248 struct i2c_par *adapter, *_n;
249 249
@@ -260,7 +260,7 @@ static void i2c_parport_detach (struct parport *port)
260 /* Un-init if needed (power off...) */ 260 /* Un-init if needed (power off...) */
261 if (adapter_parm[type].init.val) 261 if (adapter_parm[type].init.val)
262 line_set(port, 0, &adapter_parm[type].init); 262 line_set(port, 0, &adapter_parm[type].init);
263 263
264 parport_release(adapter->pdev); 264 parport_release(adapter->pdev);
265 parport_unregister_device(adapter->pdev); 265 parport_unregister_device(adapter->pdev);
266 list_del(&adapter->node); 266 list_del(&adapter->node);
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index a9f66816546c..3fe652302ea7 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -2,7 +2,7 @@
2 * i2c-parport.h I2C bus over parallel port * 2 * i2c-parport.h I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or 8 the Free Software Foundation; either version 2 of the License, or
@@ -18,13 +18,9 @@
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * ------------------------------------------------------------------------ */ 19 * ------------------------------------------------------------------------ */
20 20
21#ifdef DATA 21#define PORT_DATA 0
22#undef DATA 22#define PORT_STAT 1
23#endif 23#define PORT_CTRL 2
24
25#define DATA 0
26#define STAT 1
27#define CTRL 2
28 24
29struct lineop { 25struct lineop {
30 u8 val; 26 u8 val;
@@ -41,61 +37,61 @@ struct adapter_parm {
41 unsigned int smbus_alert:1; 37 unsigned int smbus_alert:1;
42}; 38};
43 39
44static struct adapter_parm adapter_parm[] = { 40static const struct adapter_parm adapter_parm[] = {
45 /* type 0: Philips adapter */ 41 /* type 0: Philips adapter */
46 { 42 {
47 .setsda = { 0x80, DATA, 1 }, 43 .setsda = { 0x80, PORT_DATA, 1 },
48 .setscl = { 0x08, CTRL, 0 }, 44 .setscl = { 0x08, PORT_CTRL, 0 },
49 .getsda = { 0x80, STAT, 0 }, 45 .getsda = { 0x80, PORT_STAT, 0 },
50 .getscl = { 0x08, STAT, 0 }, 46 .getscl = { 0x08, PORT_STAT, 0 },
51 }, 47 },
52 /* type 1: home brew teletext adapter */ 48 /* type 1: home brew teletext adapter */
53 { 49 {
54 .setsda = { 0x02, DATA, 0 }, 50 .setsda = { 0x02, PORT_DATA, 0 },
55 .setscl = { 0x01, DATA, 0 }, 51 .setscl = { 0x01, PORT_DATA, 0 },
56 .getsda = { 0x80, STAT, 1 }, 52 .getsda = { 0x80, PORT_STAT, 1 },
57 }, 53 },
58 /* type 2: Velleman K8000 adapter */ 54 /* type 2: Velleman K8000 adapter */
59 { 55 {
60 .setsda = { 0x02, CTRL, 1 }, 56 .setsda = { 0x02, PORT_CTRL, 1 },
61 .setscl = { 0x08, CTRL, 1 }, 57 .setscl = { 0x08, PORT_CTRL, 1 },
62 .getsda = { 0x10, STAT, 0 }, 58 .getsda = { 0x10, PORT_STAT, 0 },
63 }, 59 },
64 /* type 3: ELV adapter */ 60 /* type 3: ELV adapter */
65 { 61 {
66 .setsda = { 0x02, DATA, 1 }, 62 .setsda = { 0x02, PORT_DATA, 1 },
67 .setscl = { 0x01, DATA, 1 }, 63 .setscl = { 0x01, PORT_DATA, 1 },
68 .getsda = { 0x40, STAT, 1 }, 64 .getsda = { 0x40, PORT_STAT, 1 },
69 .getscl = { 0x08, STAT, 1 }, 65 .getscl = { 0x08, PORT_STAT, 1 },
70 }, 66 },
71 /* type 4: ADM1032 evaluation board */ 67 /* type 4: ADM1032 evaluation board */
72 { 68 {
73 .setsda = { 0x02, DATA, 1 }, 69 .setsda = { 0x02, PORT_DATA, 1 },
74 .setscl = { 0x01, DATA, 1 }, 70 .setscl = { 0x01, PORT_DATA, 1 },
75 .getsda = { 0x10, STAT, 1 }, 71 .getsda = { 0x10, PORT_STAT, 1 },
76 .init = { 0xf0, DATA, 0 }, 72 .init = { 0xf0, PORT_DATA, 0 },
77 .smbus_alert = 1, 73 .smbus_alert = 1,
78 }, 74 },
79 /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ 75 /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
80 { 76 {
81 .setsda = { 0x02, DATA, 1 }, 77 .setsda = { 0x02, PORT_DATA, 1 },
82 .setscl = { 0x01, DATA, 1 }, 78 .setscl = { 0x01, PORT_DATA, 1 },
83 .getsda = { 0x10, STAT, 1 }, 79 .getsda = { 0x10, PORT_STAT, 1 },
84 }, 80 },
85 /* type 6: Barco LPT->DVI (K5800236) adapter */ 81 /* type 6: Barco LPT->DVI (K5800236) adapter */
86 { 82 {
87 .setsda = { 0x02, DATA, 1 }, 83 .setsda = { 0x02, PORT_DATA, 1 },
88 .setscl = { 0x01, DATA, 1 }, 84 .setscl = { 0x01, PORT_DATA, 1 },
89 .getsda = { 0x20, STAT, 0 }, 85 .getsda = { 0x20, PORT_STAT, 0 },
90 .getscl = { 0x40, STAT, 0 }, 86 .getscl = { 0x40, PORT_STAT, 0 },
91 .init = { 0xfc, DATA, 0 }, 87 .init = { 0xfc, PORT_DATA, 0 },
92 }, 88 },
93 /* type 7: One For All JP1 parallel port adapter */ 89 /* type 7: One For All JP1 parallel port adapter */
94 { 90 {
95 .setsda = { 0x01, DATA, 0 }, 91 .setsda = { 0x01, PORT_DATA, 0 },
96 .setscl = { 0x02, DATA, 0 }, 92 .setscl = { 0x02, PORT_DATA, 0 },
97 .getsda = { 0x80, STAT, 1 }, 93 .getsda = { 0x80, PORT_STAT, 1 },
98 .init = { 0x04, DATA, 1 }, 94 .init = { 0x04, PORT_DATA, 1 },
99 }, 95 },
100}; 96};
101 97
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 81ccd7875627..f633a53b6dbe 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -32,6 +32,7 @@
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/i2c/i2c-sh_mobile.h>
35 36
36/* Transmit operation: */ 37/* Transmit operation: */
37/* */ 38/* */
@@ -117,7 +118,7 @@ struct sh_mobile_i2c_data {
117 struct device *dev; 118 struct device *dev;
118 void __iomem *reg; 119 void __iomem *reg;
119 struct i2c_adapter adap; 120 struct i2c_adapter adap;
120 121 unsigned long bus_speed;
121 struct clk *clk; 122 struct clk *clk;
122 u_int8_t icic; 123 u_int8_t icic;
123 u_int8_t iccl; 124 u_int8_t iccl;
@@ -205,7 +206,7 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
205 * We also round off the result. 206 * We also round off the result.
206 */ 207 */
207 num = i2c_clk * 5; 208 num = i2c_clk * 5;
208 denom = NORMAL_SPEED * 9; 209 denom = pd->bus_speed * 9;
209 tmp = num * 10 / denom; 210 tmp = num * 10 / denom;
210 if (tmp % 10 >= 5) 211 if (tmp % 10 >= 5)
211 pd->iccl = (u_int8_t)((num/denom) + 1); 212 pd->iccl = (u_int8_t)((num/denom) + 1);
@@ -574,10 +575,10 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
574 575
575static int sh_mobile_i2c_probe(struct platform_device *dev) 576static int sh_mobile_i2c_probe(struct platform_device *dev)
576{ 577{
578 struct i2c_sh_mobile_platform_data *pdata = dev->dev.platform_data;
577 struct sh_mobile_i2c_data *pd; 579 struct sh_mobile_i2c_data *pd;
578 struct i2c_adapter *adap; 580 struct i2c_adapter *adap;
579 struct resource *res; 581 struct resource *res;
580 char clk_name[8];
581 int size; 582 int size;
582 int ret; 583 int ret;
583 584
@@ -587,10 +588,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
587 return -ENOMEM; 588 return -ENOMEM;
588 } 589 }
589 590
590 snprintf(clk_name, sizeof(clk_name), "i2c%d", dev->id); 591 pd->clk = clk_get(&dev->dev, NULL);
591 pd->clk = clk_get(&dev->dev, clk_name);
592 if (IS_ERR(pd->clk)) { 592 if (IS_ERR(pd->clk)) {
593 dev_err(&dev->dev, "cannot get clock \"%s\"\n", clk_name); 593 dev_err(&dev->dev, "cannot get clock\n");
594 ret = PTR_ERR(pd->clk); 594 ret = PTR_ERR(pd->clk);
595 goto err; 595 goto err;
596 } 596 }
@@ -620,6 +620,11 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
620 goto err_irq; 620 goto err_irq;
621 } 621 }
622 622
623 /* Use platformd data bus speed or NORMAL_SPEED */
624 pd->bus_speed = NORMAL_SPEED;
625 if (pdata && pdata->bus_speed)
626 pd->bus_speed = pdata->bus_speed;
627
623 /* The IIC blocks on SH-Mobile ARM processors 628 /* The IIC blocks on SH-Mobile ARM processors
624 * come with two new bits in ICIC. 629 * come with two new bits in ICIC.
625 */ 630 */
@@ -660,6 +665,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
660 goto err_all; 665 goto err_all;
661 } 666 }
662 667
668 dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
669 adap->nr, pd->bus_speed);
663 return 0; 670 return 0;
664 671
665 err_all: 672 err_all:
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b4ab39b741eb..4d9319665e32 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -35,8 +35,10 @@
35#define BYTES_PER_FIFO_WORD 4 35#define BYTES_PER_FIFO_WORD 4
36 36
37#define I2C_CNFG 0x000 37#define I2C_CNFG 0x000
38#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
38#define I2C_CNFG_PACKET_MODE_EN (1<<10) 39#define I2C_CNFG_PACKET_MODE_EN (1<<10)
39#define I2C_CNFG_NEW_MASTER_FSM (1<<11) 40#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
41#define I2C_STATUS 0x01C
40#define I2C_SL_CNFG 0x020 42#define I2C_SL_CNFG 0x020
41#define I2C_SL_CNFG_NEWSL (1<<2) 43#define I2C_SL_CNFG_NEWSL (1<<2)
42#define I2C_SL_ADDR1 0x02c 44#define I2C_SL_ADDR1 0x02c
@@ -77,6 +79,7 @@
77#define I2C_ERR_NONE 0x00 79#define I2C_ERR_NONE 0x00
78#define I2C_ERR_NO_ACK 0x01 80#define I2C_ERR_NO_ACK 0x01
79#define I2C_ERR_ARBITRATION_LOST 0x02 81#define I2C_ERR_ARBITRATION_LOST 0x02
82#define I2C_ERR_UNKNOWN_INTERRUPT 0x04
80 83
81#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 84#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
82#define PACKET_HEADER0_PACKET_ID_SHIFT 16 85#define PACKET_HEADER0_PACKET_ID_SHIFT 16
@@ -121,6 +124,7 @@ struct tegra_i2c_dev {
121 void __iomem *base; 124 void __iomem *base;
122 int cont_id; 125 int cont_id;
123 int irq; 126 int irq;
127 bool irq_disabled;
124 int is_dvc; 128 int is_dvc;
125 struct completion msg_complete; 129 struct completion msg_complete;
126 int msg_err; 130 int msg_err;
@@ -325,11 +329,17 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
325 if (i2c_dev->is_dvc) 329 if (i2c_dev->is_dvc)
326 tegra_dvc_init(i2c_dev); 330 tegra_dvc_init(i2c_dev);
327 331
328 val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN; 332 val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN |
333 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
329 i2c_writel(i2c_dev, val, I2C_CNFG); 334 i2c_writel(i2c_dev, val, I2C_CNFG);
330 i2c_writel(i2c_dev, 0, I2C_INT_MASK); 335 i2c_writel(i2c_dev, 0, I2C_INT_MASK);
331 clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8); 336 clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
332 337
338 if (!i2c_dev->is_dvc) {
339 u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
340 i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
341 }
342
333 val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT | 343 val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
334 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT; 344 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
335 i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL); 345 i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
@@ -338,6 +348,12 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
338 err = -ETIMEDOUT; 348 err = -ETIMEDOUT;
339 349
340 clk_disable(i2c_dev->clk); 350 clk_disable(i2c_dev->clk);
351
352 if (i2c_dev->irq_disabled) {
353 i2c_dev->irq_disabled = 0;
354 enable_irq(i2c_dev->irq);
355 }
356
341 return err; 357 return err;
342} 358}
343 359
@@ -350,8 +366,19 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
350 status = i2c_readl(i2c_dev, I2C_INT_STATUS); 366 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
351 367
352 if (status == 0) { 368 if (status == 0) {
353 dev_warn(i2c_dev->dev, "interrupt with no status\n"); 369 dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
354 return IRQ_NONE; 370 i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
371 i2c_readl(i2c_dev, I2C_STATUS),
372 i2c_readl(i2c_dev, I2C_CNFG));
373 i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT;
374
375 if (!i2c_dev->irq_disabled) {
376 disable_irq_nosync(i2c_dev->irq);
377 i2c_dev->irq_disabled = 1;
378 }
379
380 complete(&i2c_dev->msg_complete);
381 goto err;
355 } 382 }
356 383
357 if (unlikely(status & status_err)) { 384 if (unlikely(status & status_err)) {
@@ -391,6 +418,8 @@ err:
391 I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ | 418 I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
392 I2C_INT_RX_FIFO_DATA_REQ); 419 I2C_INT_RX_FIFO_DATA_REQ);
393 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 420 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
421 if (i2c_dev->is_dvc)
422 dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
394 return IRQ_HANDLED; 423 return IRQ_HANDLED;
395} 424}
396 425
@@ -424,12 +453,12 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
424 453
425 packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT; 454 packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
426 packet_header |= I2C_HEADER_IE_ENABLE; 455 packet_header |= I2C_HEADER_IE_ENABLE;
456 if (!stop)
457 packet_header |= I2C_HEADER_REPEAT_START;
427 if (msg->flags & I2C_M_TEN) 458 if (msg->flags & I2C_M_TEN)
428 packet_header |= I2C_HEADER_10BIT_ADDR; 459 packet_header |= I2C_HEADER_10BIT_ADDR;
429 if (msg->flags & I2C_M_IGNORE_NAK) 460 if (msg->flags & I2C_M_IGNORE_NAK)
430 packet_header |= I2C_HEADER_CONT_ON_NAK; 461 packet_header |= I2C_HEADER_CONT_ON_NAK;
431 if (msg->flags & I2C_M_NOSTART)
432 packet_header |= I2C_HEADER_REPEAT_START;
433 if (msg->flags & I2C_M_RD) 462 if (msg->flags & I2C_M_RD)
434 packet_header |= I2C_HEADER_READ; 463 packet_header |= I2C_HEADER_READ;
435 i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); 464 i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 404843e8611b..d2f3db3cf3ed 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -272,7 +272,7 @@ static void ide_release(struct pcmcia_device *link)
272} /* ide_release */ 272} /* ide_release */
273 273
274 274
275static struct pcmcia_device_id ide_ids[] = { 275static const struct pcmcia_device_id ide_ids[] = {
276 PCMCIA_DEVICE_FUNC_ID(4), 276 PCMCIA_DEVICE_FUNC_ID(4),
277 PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ 277 PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
278 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ 278 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 88d8e4cb419a..be0921ef6b52 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -41,6 +41,7 @@ struct evdev {
41struct evdev_client { 41struct evdev_client {
42 unsigned int head; 42 unsigned int head;
43 unsigned int tail; 43 unsigned int tail;
44 unsigned int packet_head; /* [future] position of the first element of next packet */
44 spinlock_t buffer_lock; /* protects access to buffer, head and tail */ 45 spinlock_t buffer_lock; /* protects access to buffer, head and tail */
45 struct fasync_struct *fasync; 46 struct fasync_struct *fasync;
46 struct evdev *evdev; 47 struct evdev *evdev;
@@ -72,12 +73,16 @@ static void evdev_pass_event(struct evdev_client *client,
72 client->buffer[client->tail].type = EV_SYN; 73 client->buffer[client->tail].type = EV_SYN;
73 client->buffer[client->tail].code = SYN_DROPPED; 74 client->buffer[client->tail].code = SYN_DROPPED;
74 client->buffer[client->tail].value = 0; 75 client->buffer[client->tail].value = 0;
75 }
76 76
77 spin_unlock(&client->buffer_lock); 77 client->packet_head = client->tail;
78 }
78 79
79 if (event->type == EV_SYN) 80 if (event->type == EV_SYN && event->code == SYN_REPORT) {
81 client->packet_head = client->head;
80 kill_fasync(&client->fasync, SIGIO, POLL_IN); 82 kill_fasync(&client->fasync, SIGIO, POLL_IN);
83 }
84
85 spin_unlock(&client->buffer_lock);
81} 86}
82 87
83/* 88/*
@@ -159,7 +164,6 @@ static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
159 return error; 164 return error;
160 165
161 rcu_assign_pointer(evdev->grab, client); 166 rcu_assign_pointer(evdev->grab, client);
162 synchronize_rcu();
163 167
164 return 0; 168 return 0;
165} 169}
@@ -182,7 +186,6 @@ static void evdev_attach_client(struct evdev *evdev,
182 spin_lock(&evdev->client_lock); 186 spin_lock(&evdev->client_lock);
183 list_add_tail_rcu(&client->node, &evdev->client_list); 187 list_add_tail_rcu(&client->node, &evdev->client_list);
184 spin_unlock(&evdev->client_lock); 188 spin_unlock(&evdev->client_lock);
185 synchronize_rcu();
186} 189}
187 190
188static void evdev_detach_client(struct evdev *evdev, 191static void evdev_detach_client(struct evdev *evdev,
@@ -387,12 +390,12 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
387 if (count < input_event_size()) 390 if (count < input_event_size())
388 return -EINVAL; 391 return -EINVAL;
389 392
390 if (client->head == client->tail && evdev->exist && 393 if (client->packet_head == client->tail && evdev->exist &&
391 (file->f_flags & O_NONBLOCK)) 394 (file->f_flags & O_NONBLOCK))
392 return -EAGAIN; 395 return -EAGAIN;
393 396
394 retval = wait_event_interruptible(evdev->wait, 397 retval = wait_event_interruptible(evdev->wait,
395 client->head != client->tail || !evdev->exist); 398 client->packet_head != client->tail || !evdev->exist);
396 if (retval) 399 if (retval)
397 return retval; 400 return retval;
398 401
@@ -421,7 +424,7 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
421 poll_wait(file, &evdev->wait, wait); 424 poll_wait(file, &evdev->wait, wait);
422 425
423 mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR; 426 mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
424 if (client->head != client->tail) 427 if (client->packet_head != client->tail)
425 mask |= POLLIN | POLLRDNORM; 428 mask |= POLLIN | POLLRDNORM;
426 429
427 return mask; 430 return mask;
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 3037842a60d8..b1aabde87523 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -13,6 +13,7 @@
13#include <linux/jiffies.h> 13#include <linux/jiffies.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <linux/workqueue.h>
16#include <linux/input-polldev.h> 17#include <linux/input-polldev.h>
17 18
18MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); 19MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
@@ -20,44 +21,6 @@ MODULE_DESCRIPTION("Generic implementation of a polled input device");
20MODULE_LICENSE("GPL v2"); 21MODULE_LICENSE("GPL v2");
21MODULE_VERSION("0.1"); 22MODULE_VERSION("0.1");
22 23
23static DEFINE_MUTEX(polldev_mutex);
24static int polldev_users;
25static struct workqueue_struct *polldev_wq;
26
27static int input_polldev_start_workqueue(void)
28{
29 int retval;
30
31 retval = mutex_lock_interruptible(&polldev_mutex);
32 if (retval)
33 return retval;
34
35 if (!polldev_users) {
36 polldev_wq = create_singlethread_workqueue("ipolldevd");
37 if (!polldev_wq) {
38 pr_err("failed to create ipolldevd workqueue\n");
39 retval = -ENOMEM;
40 goto out;
41 }
42 }
43
44 polldev_users++;
45
46 out:
47 mutex_unlock(&polldev_mutex);
48 return retval;
49}
50
51static void input_polldev_stop_workqueue(void)
52{
53 mutex_lock(&polldev_mutex);
54
55 if (!--polldev_users)
56 destroy_workqueue(polldev_wq);
57
58 mutex_unlock(&polldev_mutex);
59}
60
61static void input_polldev_queue_work(struct input_polled_dev *dev) 24static void input_polldev_queue_work(struct input_polled_dev *dev)
62{ 25{
63 unsigned long delay; 26 unsigned long delay;
@@ -66,7 +29,7 @@ static void input_polldev_queue_work(struct input_polled_dev *dev)
66 if (delay >= HZ) 29 if (delay >= HZ)
67 delay = round_jiffies_relative(delay); 30 delay = round_jiffies_relative(delay);
68 31
69 queue_delayed_work(polldev_wq, &dev->work, delay); 32 queue_delayed_work(system_freezable_wq, &dev->work, delay);
70} 33}
71 34
72static void input_polled_device_work(struct work_struct *work) 35static void input_polled_device_work(struct work_struct *work)
@@ -81,18 +44,13 @@ static void input_polled_device_work(struct work_struct *work)
81static int input_open_polled_device(struct input_dev *input) 44static int input_open_polled_device(struct input_dev *input)
82{ 45{
83 struct input_polled_dev *dev = input_get_drvdata(input); 46 struct input_polled_dev *dev = input_get_drvdata(input);
84 int error;
85
86 error = input_polldev_start_workqueue();
87 if (error)
88 return error;
89 47
90 if (dev->open) 48 if (dev->open)
91 dev->open(dev); 49 dev->open(dev);
92 50
93 /* Only start polling if polling is enabled */ 51 /* Only start polling if polling is enabled */
94 if (dev->poll_interval > 0) 52 if (dev->poll_interval > 0)
95 queue_delayed_work(polldev_wq, &dev->work, 0); 53 queue_delayed_work(system_freezable_wq, &dev->work, 0);
96 54
97 return 0; 55 return 0;
98} 56}
@@ -102,13 +60,6 @@ static void input_close_polled_device(struct input_dev *input)
102 struct input_polled_dev *dev = input_get_drvdata(input); 60 struct input_polled_dev *dev = input_get_drvdata(input);
103 61
104 cancel_delayed_work_sync(&dev->work); 62 cancel_delayed_work_sync(&dev->work);
105 /*
106 * Clean up work struct to remove references to the workqueue.
107 * It may be destroyed by the next call. This causes problems
108 * at next device open-close in case of poll_interval == 0.
109 */
110 INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
111 input_polldev_stop_workqueue();
112 63
113 if (dev->close) 64 if (dev->close)
114 dev->close(dev); 65 dev->close(dev);
@@ -295,4 +246,3 @@ void input_unregister_polled_device(struct input_polled_dev *dev)
295 input_unregister_device(dev->input); 246 input_unregister_device(dev->input);
296} 247}
297EXPORT_SYMBOL(input_unregister_polled_device); 248EXPORT_SYMBOL(input_unregister_polled_device);
298
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ebbceedc92f4..75e11c7b70fd 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -451,7 +451,6 @@ int input_grab_device(struct input_handle *handle)
451 } 451 }
452 452
453 rcu_assign_pointer(dev->grab, handle); 453 rcu_assign_pointer(dev->grab, handle);
454 synchronize_rcu();
455 454
456 out: 455 out:
457 mutex_unlock(&dev->mutex); 456 mutex_unlock(&dev->mutex);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 5688b5c88f24..c24ec2d5f926 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -180,7 +180,6 @@ static void joydev_attach_client(struct joydev *joydev,
180 spin_lock(&joydev->client_lock); 180 spin_lock(&joydev->client_lock);
181 list_add_tail_rcu(&client->node, &joydev->client_list); 181 list_add_tail_rcu(&client->node, &joydev->client_list);
182 spin_unlock(&joydev->client_lock); 182 spin_unlock(&joydev->client_lock);
183 synchronize_rcu();
184} 183}
185 184
186static void joydev_detach_client(struct joydev *joydev, 185static void joydev_detach_client(struct joydev *joydev,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b16bed038f72..69badb4e06aa 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -32,6 +32,16 @@ config KEYBOARD_ADP5588
32 To compile this driver as a module, choose M here: the 32 To compile this driver as a module, choose M here: the
33 module will be called adp5588-keys. 33 module will be called adp5588-keys.
34 34
35config KEYBOARD_ADP5589
36 tristate "ADP5589 I2C QWERTY Keypad and IO Expander"
37 depends on I2C
38 help
39 Say Y here if you want to use a ADP5589 attached to your
40 system I2C bus.
41
42 To compile this driver as a module, choose M here: the
43 module will be called adp5589-keys.
44
35config KEYBOARD_AMIGA 45config KEYBOARD_AMIGA
36 tristate "Amiga keyboard" 46 tristate "Amiga keyboard"
37 depends on AMIGA 47 depends on AMIGA
@@ -325,6 +335,18 @@ config KEYBOARD_MCS
325 To compile this driver as a module, choose M here: the 335 To compile this driver as a module, choose M here: the
326 module will be called mcs_touchkey. 336 module will be called mcs_touchkey.
327 337
338config KEYBOARD_MPR121
339 tristate "Freescale MPR121 Touchkey"
340 depends on I2C
341 help
342 Say Y here if you have Freescale MPR121 touchkey controller
343 chip in your system.
344
345 If unsure, say N.
346
347 To compile this driver as a module, choose M here: the
348 module will be called mpr121_touchkey.
349
328config KEYBOARD_IMX 350config KEYBOARD_IMX
329 tristate "IMX keypad support" 351 tristate "IMX keypad support"
330 depends on ARCH_MXC 352 depends on ARCH_MXC
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 878e6c20deb0..c49cf8e04cd7 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -6,6 +6,7 @@
6 6
7obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o 7obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
8obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o 8obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
9obj-$(CONFIG_KEYBOARD_ADP5589) += adp5589-keys.o
9obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o 10obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
10obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o 11obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
11obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o 12obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
27obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o 28obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
28obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o 29obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
29obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o 30obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
31obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
30obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o 32obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
31obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o 33obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
32obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o 34obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
new file mode 100644
index 000000000000..631598663aab
--- /dev/null
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -0,0 +1,771 @@
1/*
2 * Description: keypad driver for ADP5589
3 * I2C QWERTY Keypad and IO Expander
4 * Bugs: Enter bugs at http://blackfin.uclinux.org/
5 *
6 * Copyright (C) 2010-2011 Analog Devices Inc.
7 * Licensed under the GPL-2.
8 */
9
10#include <linux/module.h>
11#include <linux/version.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/workqueue.h>
16#include <linux/errno.h>
17#include <linux/pm.h>
18#include <linux/platform_device.h>
19#include <linux/input.h>
20#include <linux/i2c.h>
21#include <linux/gpio.h>
22#include <linux/slab.h>
23
24#include <linux/input/adp5589.h>
25
26/* GENERAL_CFG Register */
27#define OSC_EN (1 << 7)
28#define CORE_CLK(x) (((x) & 0x3) << 5)
29#define LCK_TRK_LOGIC (1 << 4)
30#define LCK_TRK_GPI (1 << 3)
31#define INT_CFG (1 << 1)
32#define RST_CFG (1 << 0)
33
34/* INT_EN Register */
35#define LOGIC2_IEN (1 << 5)
36#define LOGIC1_IEN (1 << 4)
37#define LOCK_IEN (1 << 3)
38#define OVRFLOW_IEN (1 << 2)
39#define GPI_IEN (1 << 1)
40#define EVENT_IEN (1 << 0)
41
42/* Interrupt Status Register */
43#define LOGIC2_INT (1 << 5)
44#define LOGIC1_INT (1 << 4)
45#define LOCK_INT (1 << 3)
46#define OVRFLOW_INT (1 << 2)
47#define GPI_INT (1 << 1)
48#define EVENT_INT (1 << 0)
49
50/* STATUS Register */
51
52#define LOGIC2_STAT (1 << 7)
53#define LOGIC1_STAT (1 << 6)
54#define LOCK_STAT (1 << 5)
55#define KEC 0xF
56
57/* PIN_CONFIG_D Register */
58#define C4_EXTEND_CFG (1 << 6) /* RESET2 */
59#define R4_EXTEND_CFG (1 << 5) /* RESET1 */
60
61/* LOCK_CFG */
62#define LOCK_EN (1 << 0)
63
64#define PTIME_MASK 0x3
65#define LTIME_MASK 0x3
66
67/* Key Event Register xy */
68#define KEY_EV_PRESSED (1 << 7)
69#define KEY_EV_MASK (0x7F)
70
71#define KEYP_MAX_EVENT 16
72
73#define MAXGPIO 19
74#define ADP_BANK(offs) ((offs) >> 3)
75#define ADP_BIT(offs) (1u << ((offs) & 0x7))
76
77struct adp5589_kpad {
78 struct i2c_client *client;
79 struct input_dev *input;
80 unsigned short keycode[ADP5589_KEYMAPSIZE];
81 const struct adp5589_gpi_map *gpimap;
82 unsigned short gpimapsize;
83 unsigned extend_cfg;
84#ifdef CONFIG_GPIOLIB
85 unsigned char gpiomap[MAXGPIO];
86 bool export_gpio;
87 struct gpio_chip gc;
88 struct mutex gpio_lock; /* Protect cached dir, dat_out */
89 u8 dat_out[3];
90 u8 dir[3];
91#endif
92};
93
94static int adp5589_read(struct i2c_client *client, u8 reg)
95{
96 int ret = i2c_smbus_read_byte_data(client, reg);
97
98 if (ret < 0)
99 dev_err(&client->dev, "Read Error\n");
100
101 return ret;
102}
103
104static int adp5589_write(struct i2c_client *client, u8 reg, u8 val)
105{
106 return i2c_smbus_write_byte_data(client, reg, val);
107}
108
109#ifdef CONFIG_GPIOLIB
110static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
111{
112 struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
113 unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
114 unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
115
116 return !!(adp5589_read(kpad->client, ADP5589_GPI_STATUS_A + bank) &
117 bit);
118}
119
120static void adp5589_gpio_set_value(struct gpio_chip *chip,
121 unsigned off, int val)
122{
123 struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
124 unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
125 unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
126
127 mutex_lock(&kpad->gpio_lock);
128
129 if (val)
130 kpad->dat_out[bank] |= bit;
131 else
132 kpad->dat_out[bank] &= ~bit;
133
134 adp5589_write(kpad->client, ADP5589_GPO_DATA_OUT_A + bank,
135 kpad->dat_out[bank]);
136
137 mutex_unlock(&kpad->gpio_lock);
138}
139
140static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off)
141{
142 struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
143 unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
144 unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
145 int ret;
146
147 mutex_lock(&kpad->gpio_lock);
148
149 kpad->dir[bank] &= ~bit;
150 ret = adp5589_write(kpad->client, ADP5589_GPIO_DIRECTION_A + bank,
151 kpad->dir[bank]);
152
153 mutex_unlock(&kpad->gpio_lock);
154
155 return ret;
156}
157
158static int adp5589_gpio_direction_output(struct gpio_chip *chip,
159 unsigned off, int val)
160{
161 struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
162 unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
163 unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
164 int ret;
165
166 mutex_lock(&kpad->gpio_lock);
167
168 kpad->dir[bank] |= bit;
169
170 if (val)
171 kpad->dat_out[bank] |= bit;
172 else
173 kpad->dat_out[bank] &= ~bit;
174
175 ret = adp5589_write(kpad->client, ADP5589_GPO_DATA_OUT_A + bank,
176 kpad->dat_out[bank]);
177 ret |= adp5589_write(kpad->client, ADP5589_GPIO_DIRECTION_A + bank,
178 kpad->dir[bank]);
179
180 mutex_unlock(&kpad->gpio_lock);
181
182 return ret;
183}
184
185static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
186 const struct adp5589_kpad_platform_data *pdata)
187{
188 bool pin_used[MAXGPIO];
189 int n_unused = 0;
190 int i;
191
192 memset(pin_used, false, sizeof(pin_used));
193
194 for (i = 0; i < MAXGPIO; i++)
195 if (pdata->keypad_en_mask & (1 << i))
196 pin_used[i] = true;
197
198 for (i = 0; i < kpad->gpimapsize; i++)
199 pin_used[kpad->gpimap[i].pin - ADP5589_GPI_PIN_BASE] = true;
200
201 if (kpad->extend_cfg & R4_EXTEND_CFG)
202 pin_used[4] = true;
203
204 if (kpad->extend_cfg & C4_EXTEND_CFG)
205 pin_used[12] = true;
206
207 for (i = 0; i < MAXGPIO; i++)
208 if (!pin_used[i])
209 kpad->gpiomap[n_unused++] = i;
210
211 return n_unused;
212}
213
214static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
215{
216 struct device *dev = &kpad->client->dev;
217 const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
218 const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
219 int i, error;
220
221 if (!gpio_data)
222 return 0;
223
224 kpad->gc.ngpio = adp5589_build_gpiomap(kpad, pdata);
225 if (kpad->gc.ngpio == 0) {
226 dev_info(dev, "No unused gpios left to export\n");
227 return 0;
228 }
229
230 kpad->export_gpio = true;
231
232 kpad->gc.direction_input = adp5589_gpio_direction_input;
233 kpad->gc.direction_output = adp5589_gpio_direction_output;
234 kpad->gc.get = adp5589_gpio_get_value;
235 kpad->gc.set = adp5589_gpio_set_value;
236 kpad->gc.can_sleep = 1;
237
238 kpad->gc.base = gpio_data->gpio_start;
239 kpad->gc.label = kpad->client->name;
240 kpad->gc.owner = THIS_MODULE;
241
242 mutex_init(&kpad->gpio_lock);
243
244 error = gpiochip_add(&kpad->gc);
245 if (error) {
246 dev_err(dev, "gpiochip_add failed, err: %d\n", error);
247 return error;
248 }
249
250 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
251 kpad->dat_out[i] = adp5589_read(kpad->client,
252 ADP5589_GPO_DATA_OUT_A + i);
253 kpad->dir[i] = adp5589_read(kpad->client,
254 ADP5589_GPIO_DIRECTION_A + i);
255 }
256
257 if (gpio_data->setup) {
258 error = gpio_data->setup(kpad->client,
259 kpad->gc.base, kpad->gc.ngpio,
260 gpio_data->context);
261 if (error)
262 dev_warn(dev, "setup failed, %d\n", error);
263 }
264
265 return 0;
266}
267
268static void __devexit adp5589_gpio_remove(struct adp5589_kpad *kpad)
269{
270 struct device *dev = &kpad->client->dev;
271 const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
272 const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
273 int error;
274
275 if (!kpad->export_gpio)
276 return;
277
278 if (gpio_data->teardown) {
279 error = gpio_data->teardown(kpad->client,
280 kpad->gc.base, kpad->gc.ngpio,
281 gpio_data->context);
282 if (error)
283 dev_warn(dev, "teardown failed %d\n", error);
284 }
285
286 error = gpiochip_remove(&kpad->gc);
287 if (error)
288 dev_warn(dev, "gpiochip_remove failed %d\n", error);
289}
290#else
291static inline int adp5589_gpio_add(struct adp5589_kpad *kpad)
292{
293 return 0;
294}
295
296static inline void adp5589_gpio_remove(struct adp5589_kpad *kpad)
297{
298}
299#endif
300
301static void adp5589_report_switches(struct adp5589_kpad *kpad,
302 int key, int key_val)
303{
304 int i;
305
306 for (i = 0; i < kpad->gpimapsize; i++) {
307 if (key_val == kpad->gpimap[i].pin) {
308 input_report_switch(kpad->input,
309 kpad->gpimap[i].sw_evt,
310 key & KEY_EV_PRESSED);
311 break;
312 }
313 }
314}
315
316static void adp5589_report_events(struct adp5589_kpad *kpad, int ev_cnt)
317{
318 int i;
319
320 for (i = 0; i < ev_cnt; i++) {
321 int key = adp5589_read(kpad->client, ADP5589_FIFO_1 + i);
322 int key_val = key & KEY_EV_MASK;
323
324 if (key_val >= ADP5589_GPI_PIN_BASE &&
325 key_val <= ADP5589_GPI_PIN_END) {
326 adp5589_report_switches(kpad, key, key_val);
327 } else {
328 input_report_key(kpad->input,
329 kpad->keycode[key_val - 1],
330 key & KEY_EV_PRESSED);
331 }
332 }
333}
334
335static irqreturn_t adp5589_irq(int irq, void *handle)
336{
337 struct adp5589_kpad *kpad = handle;
338 struct i2c_client *client = kpad->client;
339 int status, ev_cnt;
340
341 status = adp5589_read(client, ADP5589_INT_STATUS);
342
343 if (status & OVRFLOW_INT) /* Unlikely and should never happen */
344 dev_err(&client->dev, "Event Overflow Error\n");
345
346 if (status & EVENT_INT) {
347 ev_cnt = adp5589_read(client, ADP5589_STATUS) & KEC;
348 if (ev_cnt) {
349 adp5589_report_events(kpad, ev_cnt);
350 input_sync(kpad->input);
351 }
352 }
353
354 adp5589_write(client, ADP5589_INT_STATUS, status); /* Status is W1C */
355
356 return IRQ_HANDLED;
357}
358
359static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key)
360{
361 int i;
362
363 for (i = 0; i < ADP5589_KEYMAPSIZE; i++)
364 if (key == kpad->keycode[i])
365 return (i + 1) | KEY_EV_PRESSED;
366
367 dev_err(&kpad->client->dev, "RESET/UNLOCK key not in keycode map\n");
368
369 return -EINVAL;
370}
371
372static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
373{
374 struct i2c_client *client = kpad->client;
375 const struct adp5589_kpad_platform_data *pdata =
376 client->dev.platform_data;
377 int i, ret;
378 unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
379 unsigned char pull_mask = 0;
380
381 ret = adp5589_write(client, ADP5589_PIN_CONFIG_A,
382 pdata->keypad_en_mask & 0xFF);
383 ret |= adp5589_write(client, ADP5589_PIN_CONFIG_B,
384 (pdata->keypad_en_mask >> 8) & 0xFF);
385 ret |= adp5589_write(client, ADP5589_PIN_CONFIG_C,
386 (pdata->keypad_en_mask >> 16) & 0xFF);
387
388 if (pdata->en_keylock) {
389 ret |= adp5589_write(client, ADP5589_UNLOCK1,
390 pdata->unlock_key1);
391 ret |= adp5589_write(client, ADP5589_UNLOCK2,
392 pdata->unlock_key2);
393 ret |= adp5589_write(client, ADP5589_UNLOCK_TIMERS,
394 pdata->unlock_timer & LTIME_MASK);
395 ret |= adp5589_write(client, ADP5589_LOCK_CFG, LOCK_EN);
396 }
397
398 for (i = 0; i < KEYP_MAX_EVENT; i++)
399 ret |= adp5589_read(client, ADP5589_FIFO_1 + i);
400
401 for (i = 0; i < pdata->gpimapsize; i++) {
402 unsigned short pin = pdata->gpimap[i].pin;
403
404 if (pin <= ADP5589_GPI_PIN_ROW_END) {
405 evt_mode1 |= (1 << (pin - ADP5589_GPI_PIN_ROW_BASE));
406 } else {
407 evt_mode2 |=
408 ((1 << (pin - ADP5589_GPI_PIN_COL_BASE)) & 0xFF);
409 evt_mode3 |=
410 ((1 << (pin - ADP5589_GPI_PIN_COL_BASE)) >> 8);
411 }
412 }
413
414 if (pdata->gpimapsize) {
415 ret |= adp5589_write(client, ADP5589_GPI_EVENT_EN_A, evt_mode1);
416 ret |= adp5589_write(client, ADP5589_GPI_EVENT_EN_B, evt_mode2);
417 ret |= adp5589_write(client, ADP5589_GPI_EVENT_EN_C, evt_mode3);
418 }
419
420 if (pdata->pull_dis_mask & pdata->pullup_en_100k &
421 pdata->pullup_en_300k & pdata->pulldown_en_300k)
422 dev_warn(&client->dev, "Conflicting pull resistor config\n");
423
424 for (i = 0; i < MAXGPIO; i++) {
425 unsigned val = 0;
426
427 if (pdata->pullup_en_300k & (1 << i))
428 val = 0;
429 else if (pdata->pulldown_en_300k & (1 << i))
430 val = 1;
431 else if (pdata->pullup_en_100k & (1 << i))
432 val = 2;
433 else if (pdata->pull_dis_mask & (1 << i))
434 val = 3;
435
436 pull_mask |= val << (2 * (i & 0x3));
437
438 if ((i & 0x3) == 0x3 || i == MAXGPIO - 1) {
439 ret |= adp5589_write(client,
440 ADP5589_RPULL_CONFIG_A + (i >> 2),
441 pull_mask);
442 pull_mask = 0;
443 }
444 }
445
446 if (pdata->reset1_key_1 && pdata->reset1_key_2 && pdata->reset1_key_3) {
447 ret |= adp5589_write(client, ADP5589_RESET1_EVENT_A,
448 adp5589_get_evcode(kpad,
449 pdata->reset1_key_1));
450 ret |= adp5589_write(client, ADP5589_RESET1_EVENT_B,
451 adp5589_get_evcode(kpad,
452 pdata->reset1_key_2));
453 ret |= adp5589_write(client, ADP5589_RESET1_EVENT_C,
454 adp5589_get_evcode(kpad,
455 pdata->reset1_key_3));
456 kpad->extend_cfg |= R4_EXTEND_CFG;
457 }
458
459 if (pdata->reset2_key_1 && pdata->reset2_key_2) {
460 ret |= adp5589_write(client, ADP5589_RESET2_EVENT_A,
461 adp5589_get_evcode(kpad,
462 pdata->reset2_key_1));
463 ret |= adp5589_write(client, ADP5589_RESET2_EVENT_B,
464 adp5589_get_evcode(kpad,
465 pdata->reset2_key_2));
466 kpad->extend_cfg |= C4_EXTEND_CFG;
467 }
468
469 if (kpad->extend_cfg) {
470 ret |= adp5589_write(client, ADP5589_RESET_CFG,
471 pdata->reset_cfg);
472 ret |= adp5589_write(client, ADP5589_PIN_CONFIG_D,
473 kpad->extend_cfg);
474 }
475
476 for (i = 0; i <= ADP_BANK(MAXGPIO); i++)
477 ret |= adp5589_write(client, ADP5589_DEBOUNCE_DIS_A + i,
478 pdata->debounce_dis_mask >> (i * 8));
479
480 ret |= adp5589_write(client, ADP5589_POLL_PTIME_CFG,
481 pdata->scan_cycle_time & PTIME_MASK);
482 ret |= adp5589_write(client, ADP5589_INT_STATUS, LOGIC2_INT |
483 LOGIC1_INT | OVRFLOW_INT | LOCK_INT |
484 GPI_INT | EVENT_INT); /* Status is W1C */
485
486 ret |= adp5589_write(client, ADP5589_GENERAL_CFG,
487 INT_CFG | OSC_EN | CORE_CLK(3));
488 ret |= adp5589_write(client, ADP5589_INT_EN,
489 OVRFLOW_IEN | GPI_IEN | EVENT_IEN);
490
491 if (ret < 0) {
492 dev_err(&client->dev, "Write Error\n");
493 return ret;
494 }
495
496 return 0;
497}
498
499static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
500{
501 int gpi_stat1 = adp5589_read(kpad->client, ADP5589_GPI_STATUS_A);
502 int gpi_stat2 = adp5589_read(kpad->client, ADP5589_GPI_STATUS_B);
503 int gpi_stat3 = adp5589_read(kpad->client, ADP5589_GPI_STATUS_C);
504 int gpi_stat_tmp, pin_loc;
505 int i;
506
507 for (i = 0; i < kpad->gpimapsize; i++) {
508 unsigned short pin = kpad->gpimap[i].pin;
509
510 if (pin <= ADP5589_GPI_PIN_ROW_END) {
511 gpi_stat_tmp = gpi_stat1;
512 pin_loc = pin - ADP5589_GPI_PIN_ROW_BASE;
513 } else if ((pin - ADP5589_GPI_PIN_COL_BASE) < 8) {
514 gpi_stat_tmp = gpi_stat2;
515 pin_loc = pin - ADP5589_GPI_PIN_COL_BASE;
516 } else {
517 gpi_stat_tmp = gpi_stat3;
518 pin_loc = pin - ADP5589_GPI_PIN_COL_BASE - 8;
519 }
520
521 if (gpi_stat_tmp < 0) {
522 dev_err(&kpad->client->dev,
523 "Can't read GPIO_DAT_STAT switch"
524 " %d default to OFF\n", pin);
525 gpi_stat_tmp = 0;
526 }
527
528 input_report_switch(kpad->input,
529 kpad->gpimap[i].sw_evt,
530 !(gpi_stat_tmp & (1 << pin_loc)));
531 }
532
533 input_sync(kpad->input);
534}
535
536static int __devinit adp5589_probe(struct i2c_client *client,
537 const struct i2c_device_id *id)
538{
539 struct adp5589_kpad *kpad;
540 const struct adp5589_kpad_platform_data *pdata;
541 struct input_dev *input;
542 unsigned int revid;
543 int ret, i;
544 int error;
545
546 if (!i2c_check_functionality(client->adapter,
547 I2C_FUNC_SMBUS_BYTE_DATA)) {
548 dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
549 return -EIO;
550 }
551
552 pdata = client->dev.platform_data;
553 if (!pdata) {
554 dev_err(&client->dev, "no platform data?\n");
555 return -EINVAL;
556 }
557
558 if (!((pdata->keypad_en_mask & 0xFF) &&
559 (pdata->keypad_en_mask >> 8)) || !pdata->keymap) {
560 dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
561 return -EINVAL;
562 }
563
564 if (pdata->keymapsize != ADP5589_KEYMAPSIZE) {
565 dev_err(&client->dev, "invalid keymapsize\n");
566 return -EINVAL;
567 }
568
569 if (!pdata->gpimap && pdata->gpimapsize) {
570 dev_err(&client->dev, "invalid gpimap from pdata\n");
571 return -EINVAL;
572 }
573
574 if (pdata->gpimapsize > ADP5589_GPIMAPSIZE_MAX) {
575 dev_err(&client->dev, "invalid gpimapsize\n");
576 return -EINVAL;
577 }
578
579 for (i = 0; i < pdata->gpimapsize; i++) {
580 unsigned short pin = pdata->gpimap[i].pin;
581
582 if (pin < ADP5589_GPI_PIN_BASE || pin > ADP5589_GPI_PIN_END) {
583 dev_err(&client->dev, "invalid gpi pin data\n");
584 return -EINVAL;
585 }
586
587 if ((1 << (pin - ADP5589_GPI_PIN_ROW_BASE)) &
588 pdata->keypad_en_mask) {
589 dev_err(&client->dev, "invalid gpi row/col data\n");
590 return -EINVAL;
591 }
592 }
593
594 if (!client->irq) {
595 dev_err(&client->dev, "no IRQ?\n");
596 return -EINVAL;
597 }
598
599 kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
600 input = input_allocate_device();
601 if (!kpad || !input) {
602 error = -ENOMEM;
603 goto err_free_mem;
604 }
605
606 kpad->client = client;
607 kpad->input = input;
608
609 ret = adp5589_read(client, ADP5589_ID);
610 if (ret < 0) {
611 error = ret;
612 goto err_free_mem;
613 }
614
615 revid = (u8) ret & ADP5589_DEVICE_ID_MASK;
616
617 input->name = client->name;
618 input->phys = "adp5589-keys/input0";
619 input->dev.parent = &client->dev;
620
621 input_set_drvdata(input, kpad);
622
623 input->id.bustype = BUS_I2C;
624 input->id.vendor = 0x0001;
625 input->id.product = 0x0001;
626 input->id.version = revid;
627
628 input->keycodesize = sizeof(kpad->keycode[0]);
629 input->keycodemax = pdata->keymapsize;
630 input->keycode = kpad->keycode;
631
632 memcpy(kpad->keycode, pdata->keymap,
633 pdata->keymapsize * input->keycodesize);
634
635 kpad->gpimap = pdata->gpimap;
636 kpad->gpimapsize = pdata->gpimapsize;
637
638 /* setup input device */
639 __set_bit(EV_KEY, input->evbit);
640
641 if (pdata->repeat)
642 __set_bit(EV_REP, input->evbit);
643
644 for (i = 0; i < input->keycodemax; i++)
645 __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit);
646 __clear_bit(KEY_RESERVED, input->keybit);
647
648 if (kpad->gpimapsize)
649 __set_bit(EV_SW, input->evbit);
650 for (i = 0; i < kpad->gpimapsize; i++)
651 __set_bit(kpad->gpimap[i].sw_evt, input->swbit);
652
653 error = input_register_device(input);
654 if (error) {
655 dev_err(&client->dev, "unable to register input device\n");
656 goto err_free_mem;
657 }
658
659 error = request_threaded_irq(client->irq, NULL, adp5589_irq,
660 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
661 client->dev.driver->name, kpad);
662 if (error) {
663 dev_err(&client->dev, "irq %d busy?\n", client->irq);
664 goto err_unreg_dev;
665 }
666
667 error = adp5589_setup(kpad);
668 if (error)
669 goto err_free_irq;
670
671 if (kpad->gpimapsize)
672 adp5589_report_switch_state(kpad);
673
674 error = adp5589_gpio_add(kpad);
675 if (error)
676 goto err_free_irq;
677
678 device_init_wakeup(&client->dev, 1);
679 i2c_set_clientdata(client, kpad);
680
681 dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
682 return 0;
683
684err_free_irq:
685 free_irq(client->irq, kpad);
686err_unreg_dev:
687 input_unregister_device(input);
688 input = NULL;
689err_free_mem:
690 input_free_device(input);
691 kfree(kpad);
692
693 return error;
694}
695
696static int __devexit adp5589_remove(struct i2c_client *client)
697{
698 struct adp5589_kpad *kpad = i2c_get_clientdata(client);
699
700 adp5589_write(client, ADP5589_GENERAL_CFG, 0);
701 free_irq(client->irq, kpad);
702 input_unregister_device(kpad->input);
703 adp5589_gpio_remove(kpad);
704 kfree(kpad);
705
706 return 0;
707}
708
709#ifdef CONFIG_PM_SLEEP
710static int adp5589_suspend(struct device *dev)
711{
712 struct adp5589_kpad *kpad = dev_get_drvdata(dev);
713 struct i2c_client *client = kpad->client;
714
715 disable_irq(client->irq);
716
717 if (device_may_wakeup(&client->dev))
718 enable_irq_wake(client->irq);
719
720 return 0;
721}
722
723static int adp5589_resume(struct device *dev)
724{
725 struct adp5589_kpad *kpad = dev_get_drvdata(dev);
726 struct i2c_client *client = kpad->client;
727
728 if (device_may_wakeup(&client->dev))
729 disable_irq_wake(client->irq);
730
731 enable_irq(client->irq);
732
733 return 0;
734}
735#endif
736
737static SIMPLE_DEV_PM_OPS(adp5589_dev_pm_ops, adp5589_suspend, adp5589_resume);
738
739static const struct i2c_device_id adp5589_id[] = {
740 {"adp5589-keys", 0},
741 {}
742};
743
744MODULE_DEVICE_TABLE(i2c, adp5589_id);
745
746static struct i2c_driver adp5589_driver = {
747 .driver = {
748 .name = KBUILD_MODNAME,
749 .owner = THIS_MODULE,
750 .pm = &adp5589_dev_pm_ops,
751 },
752 .probe = adp5589_probe,
753 .remove = __devexit_p(adp5589_remove),
754 .id_table = adp5589_id,
755};
756
757static int __init adp5589_init(void)
758{
759 return i2c_add_driver(&adp5589_driver);
760}
761module_init(adp5589_init);
762
763static void __exit adp5589_exit(void)
764{
765 i2c_del_driver(&adp5589_driver);
766}
767module_exit(adp5589_exit);
768
769MODULE_LICENSE("GPL");
770MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
771MODULE_DESCRIPTION("ADP5589 Keypad driver");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index eb3006361ee4..6e6145b9a4c1 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -324,7 +324,12 @@ static void gpio_keys_report_event(struct gpio_button_data *bdata)
324 unsigned int type = button->type ?: EV_KEY; 324 unsigned int type = button->type ?: EV_KEY;
325 int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low; 325 int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
326 326
327 input_event(input, type, button->code, !!state); 327 if (type == EV_ABS) {
328 if (state)
329 input_event(input, type, button->code, button->value);
330 } else {
331 input_event(input, type, button->code, !!state);
332 }
328 input_sync(input); 333 input_sync(input);
329} 334}
330 335
@@ -363,7 +368,7 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
363 struct gpio_button_data *bdata, 368 struct gpio_button_data *bdata,
364 struct gpio_keys_button *button) 369 struct gpio_keys_button *button)
365{ 370{
366 char *desc = button->desc ? button->desc : "gpio_keys"; 371 const char *desc = button->desc ? button->desc : "gpio_keys";
367 struct device *dev = &pdev->dev; 372 struct device *dev = &pdev->dev;
368 unsigned long irqflags; 373 unsigned long irqflags;
369 int irq, error; 374 int irq, error;
@@ -468,7 +473,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
468 platform_set_drvdata(pdev, ddata); 473 platform_set_drvdata(pdev, ddata);
469 input_set_drvdata(input, ddata); 474 input_set_drvdata(input, ddata);
470 475
471 input->name = pdev->name; 476 input->name = pdata->name ? : pdev->name;
472 input->phys = "gpio-keys/input0"; 477 input->phys = "gpio-keys/input0";
473 input->dev.parent = &pdev->dev; 478 input->dev.parent = &pdev->dev;
474 input->open = gpio_keys_open; 479 input->open = gpio_keys_open;
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
new file mode 100644
index 000000000000..0a9e81194888
--- /dev/null
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -0,0 +1,339 @@
1/*
2 * Touchkey driver for Freescale MPR121 Controllor
3 *
4 * Copyright (C) 2011 Freescale Semiconductor, Inc.
5 * Author: Zhang Jiejing <jiejing.zhang@freescale.com>
6 *
7 * Based on mcs_touchkey.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/input.h>
18#include <linux/i2c.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/bitops.h>
22#include <linux/interrupt.h>
23#include <linux/i2c/mpr121_touchkey.h>
24
25/* Register definitions */
26#define ELE_TOUCH_STATUS_0_ADDR 0x0
27#define ELE_TOUCH_STATUS_1_ADDR 0X1
28#define MHD_RISING_ADDR 0x2b
29#define NHD_RISING_ADDR 0x2c
30#define NCL_RISING_ADDR 0x2d
31#define FDL_RISING_ADDR 0x2e
32#define MHD_FALLING_ADDR 0x2f
33#define NHD_FALLING_ADDR 0x30
34#define NCL_FALLING_ADDR 0x31
35#define FDL_FALLING_ADDR 0x32
36#define ELE0_TOUCH_THRESHOLD_ADDR 0x41
37#define ELE0_RELEASE_THRESHOLD_ADDR 0x42
38#define AFE_CONF_ADDR 0x5c
39#define FILTER_CONF_ADDR 0x5d
40
41/*
42 * ELECTRODE_CONF_ADDR: This register configures the number of
43 * enabled capacitance sensing inputs and its run/suspend mode.
44 */
45#define ELECTRODE_CONF_ADDR 0x5e
46#define AUTO_CONFIG_CTRL_ADDR 0x7b
47#define AUTO_CONFIG_USL_ADDR 0x7d
48#define AUTO_CONFIG_LSL_ADDR 0x7e
49#define AUTO_CONFIG_TL_ADDR 0x7f
50
51/* Threshold of touch/release trigger */
52#define TOUCH_THRESHOLD 0x0f
53#define RELEASE_THRESHOLD 0x0a
54/* Masks for touch and release triggers */
55#define TOUCH_STATUS_MASK 0xfff
56/* MPR121 has 12 keys */
57#define MPR121_MAX_KEY_COUNT 12
58
59struct mpr121_touchkey {
60 struct i2c_client *client;
61 struct input_dev *input_dev;
62 unsigned int key_val;
63 unsigned int statusbits;
64 unsigned int keycount;
65 u16 keycodes[MPR121_MAX_KEY_COUNT];
66};
67
68struct mpr121_init_register {
69 int addr;
70 u8 val;
71};
72
73static const struct mpr121_init_register init_reg_table[] __devinitconst = {
74 { MHD_RISING_ADDR, 0x1 },
75 { NHD_RISING_ADDR, 0x1 },
76 { MHD_FALLING_ADDR, 0x1 },
77 { NHD_FALLING_ADDR, 0x1 },
78 { NCL_FALLING_ADDR, 0xff },
79 { FDL_FALLING_ADDR, 0x02 },
80 { FILTER_CONF_ADDR, 0x04 },
81 { AFE_CONF_ADDR, 0x0b },
82 { AUTO_CONFIG_CTRL_ADDR, 0x0b },
83};
84
85static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
86{
87 struct mpr121_touchkey *mpr121 = dev_id;
88 struct i2c_client *client = mpr121->client;
89 struct input_dev *input = mpr121->input_dev;
90 unsigned int key_num, key_val, pressed;
91 int reg;
92
93 reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
94 if (reg < 0) {
95 dev_err(&client->dev, "i2c read error [%d]\n", reg);
96 goto out;
97 }
98
99 reg <<= 8;
100 reg |= i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_0_ADDR);
101 if (reg < 0) {
102 dev_err(&client->dev, "i2c read error [%d]\n", reg);
103 goto out;
104 }
105
106 reg &= TOUCH_STATUS_MASK;
107 /* use old press bit to figure out which bit changed */
108 key_num = ffs(reg ^ mpr121->statusbits) - 1;
109 pressed = reg & (1 << key_num);
110 mpr121->statusbits = reg;
111
112 key_val = mpr121->keycodes[key_num];
113
114 input_event(input, EV_MSC, MSC_SCAN, key_num);
115 input_report_key(input, key_val, pressed);
116 input_sync(input);
117
118 dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
119 pressed ? "pressed" : "released");
120
121out:
122 return IRQ_HANDLED;
123}
124
125static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
126 struct mpr121_touchkey *mpr121,
127 struct i2c_client *client)
128{
129 const struct mpr121_init_register *reg;
130 unsigned char usl, lsl, tl;
131 int i, t, vdd, ret;
132
133 /* Set up touch/release threshold for ele0-ele11 */
134 for (i = 0; i <= MPR121_MAX_KEY_COUNT; i++) {
135 t = ELE0_TOUCH_THRESHOLD_ADDR + (i * 2);
136 ret = i2c_smbus_write_byte_data(client, t, TOUCH_THRESHOLD);
137 if (ret < 0)
138 goto err_i2c_write;
139 ret = i2c_smbus_write_byte_data(client, t + 1,
140 RELEASE_THRESHOLD);
141 if (ret < 0)
142 goto err_i2c_write;
143 }
144
145 /* Set up init register */
146 for (i = 0; i < ARRAY_SIZE(init_reg_table); i++) {
147 reg = &init_reg_table[i];
148 ret = i2c_smbus_write_byte_data(client, reg->addr, reg->val);
149 if (ret < 0)
150 goto err_i2c_write;
151 }
152
153
154 /*
155 * Capacitance on sensing input varies and needs to be compensated.
156 * The internal MPR121-auto-configuration can do this if it's
157 * registers are set properly (based on pdata->vdd_uv).
158 */
159 vdd = pdata->vdd_uv / 1000;
160 usl = ((vdd - 700) * 256) / vdd;
161 lsl = (usl * 65) / 100;
162 tl = (usl * 90) / 100;
163 ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl);
164 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl);
165 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl);
166 ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR,
167 mpr121->keycount);
168 if (ret != 0)
169 goto err_i2c_write;
170
171 dev_dbg(&client->dev, "set up with %x keys.\n", mpr121->keycount);
172
173 return 0;
174
175err_i2c_write:
176 dev_err(&client->dev, "i2c write error: %d\n", ret);
177 return ret;
178}
179
180static int __devinit mpr_touchkey_probe(struct i2c_client *client,
181 const struct i2c_device_id *id)
182{
183 const struct mpr121_platform_data *pdata = client->dev.platform_data;
184 struct mpr121_touchkey *mpr121;
185 struct input_dev *input_dev;
186 int error;
187 int i;
188
189 if (!pdata) {
190 dev_err(&client->dev, "no platform data defined\n");
191 return -EINVAL;
192 }
193
194 if (!pdata->keymap || !pdata->keymap_size) {
195 dev_err(&client->dev, "missing keymap data\n");
196 return -EINVAL;
197 }
198
199 if (pdata->keymap_size > MPR121_MAX_KEY_COUNT) {
200 dev_err(&client->dev, "too many keys defined\n");
201 return -EINVAL;
202 }
203
204 if (!client->irq) {
205 dev_err(&client->dev, "irq number should not be zero\n");
206 return -EINVAL;
207 }
208
209 mpr121 = kzalloc(sizeof(struct mpr121_touchkey), GFP_KERNEL);
210 input_dev = input_allocate_device();
211 if (!mpr121 || !input_dev) {
212 dev_err(&client->dev, "Failed to allocate memory\n");
213 error = -ENOMEM;
214 goto err_free_mem;
215 }
216
217 mpr121->client = client;
218 mpr121->input_dev = input_dev;
219 mpr121->keycount = pdata->keymap_size;
220
221 input_dev->name = "Freescale MPR121 Touchkey";
222 input_dev->id.bustype = BUS_I2C;
223 input_dev->dev.parent = &client->dev;
224 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
225
226 input_dev->keycode = mpr121->keycodes;
227 input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
228 input_dev->keycodemax = mpr121->keycount;
229
230 for (i = 0; i < pdata->keymap_size; i++) {
231 input_set_capability(input_dev, EV_KEY, pdata->keymap[i]);
232 mpr121->keycodes[i] = pdata->keymap[i];
233 }
234
235 error = mpr121_phys_init(pdata, mpr121, client);
236 if (error) {
237 dev_err(&client->dev, "Failed to init register\n");
238 goto err_free_mem;
239 }
240
241 error = request_threaded_irq(client->irq, NULL,
242 mpr_touchkey_interrupt,
243 IRQF_TRIGGER_FALLING,
244 client->dev.driver->name, mpr121);
245 if (error) {
246 dev_err(&client->dev, "Failed to register interrupt\n");
247 goto err_free_mem;
248 }
249
250 error = input_register_device(input_dev);
251 if (error)
252 goto err_free_irq;
253
254 i2c_set_clientdata(client, mpr121);
255 device_init_wakeup(&client->dev, pdata->wakeup);
256
257 return 0;
258
259err_free_irq:
260 free_irq(client->irq, mpr121);
261err_free_mem:
262 input_free_device(input_dev);
263 kfree(mpr121);
264 return error;
265}
266
267static int __devexit mpr_touchkey_remove(struct i2c_client *client)
268{
269 struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
270
271 free_irq(client->irq, mpr121);
272 input_unregister_device(mpr121->input_dev);
273 kfree(mpr121);
274
275 return 0;
276}
277
278#ifdef CONFIG_PM_SLEEP
279static int mpr_suspend(struct device *dev)
280{
281 struct i2c_client *client = to_i2c_client(dev);
282
283 if (device_may_wakeup(&client->dev))
284 enable_irq_wake(client->irq);
285
286 i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, 0x00);
287
288 return 0;
289}
290
291static int mpr_resume(struct device *dev)
292{
293 struct i2c_client *client = to_i2c_client(dev);
294 struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
295
296 if (device_may_wakeup(&client->dev))
297 disable_irq_wake(client->irq);
298
299 i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR,
300 mpr121->keycount);
301
302 return 0;
303}
304#endif
305
306static SIMPLE_DEV_PM_OPS(mpr121_touchkey_pm_ops, mpr_suspend, mpr_resume);
307
308static const struct i2c_device_id mpr121_id[] = {
309 { "mpr121_touchkey", 0 },
310 { }
311};
312MODULE_DEVICE_TABLE(i2c, mpr121_id);
313
314static struct i2c_driver mpr_touchkey_driver = {
315 .driver = {
316 .name = "mpr121",
317 .owner = THIS_MODULE,
318 .pm = &mpr121_touchkey_pm_ops,
319 },
320 .id_table = mpr121_id,
321 .probe = mpr_touchkey_probe,
322 .remove = __devexit_p(mpr_touchkey_remove),
323};
324
325static int __init mpr_touchkey_init(void)
326{
327 return i2c_add_driver(&mpr_touchkey_driver);
328}
329module_init(mpr_touchkey_init);
330
331static void __exit mpr_touchkey_exit(void)
332{
333 i2c_del_driver(&mpr_touchkey_driver);
334}
335module_exit(mpr_touchkey_exit);
336
337MODULE_LICENSE("GPL");
338MODULE_AUTHOR("Zhang Jiejing <jiejing.zhang@freescale.com>");
339MODULE_DESCRIPTION("Touch Key driver for Freescale MPR121 Chip");
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 0e2a19cb43d8..f23a743817db 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -413,7 +413,7 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
413 return 0; 413 return 0;
414err5: 414err5:
415 for (i = irq_idx - 1; i >=0; i--) 415 for (i = irq_idx - 1; i >=0; i--)
416 free_irq(row_gpios[i], NULL); 416 free_irq(row_gpios[i], omap_kp);
417err4: 417err4:
418 input_unregister_device(omap_kp->input); 418 input_unregister_device(omap_kp->input);
419 input_dev = NULL; 419 input_dev = NULL;
@@ -444,11 +444,11 @@ static int __devexit omap_kp_remove(struct platform_device *pdev)
444 gpio_free(col_gpios[i]); 444 gpio_free(col_gpios[i]);
445 for (i = 0; i < omap_kp->rows; i++) { 445 for (i = 0; i < omap_kp->rows; i++) {
446 gpio_free(row_gpios[i]); 446 gpio_free(row_gpios[i]);
447 free_irq(gpio_to_irq(row_gpios[i]), NULL); 447 free_irq(gpio_to_irq(row_gpios[i]), omap_kp);
448 } 448 }
449 } else { 449 } else {
450 omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 450 omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
451 free_irq(omap_kp->irq, NULL); 451 free_irq(omap_kp->irq, omap_kp);
452 } 452 }
453 453
454 del_timer_sync(&omap_kp->timer); 454 del_timer_sync(&omap_kp->timer);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index fba8404c7297..ca7b89196ab7 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -248,6 +248,7 @@ static const struct i2c_device_id qt1070_id[] = {
248 { "qt1070", 0 }, 248 { "qt1070", 0 },
249 { }, 249 { },
250}; 250};
251MODULE_DEVICE_TABLE(i2c, qt1070_id);
251 252
252static struct i2c_driver qt1070_driver = { 253static struct i2c_driver qt1070_driver = {
253 .driver = { 254 .driver = {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index d7dafd9425b6..834cf98e7efb 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -20,7 +20,7 @@
20#include <linux/input.h> 20#include <linux/input.h>
21#include <linux/input/sh_keysc.h> 21#include <linux/input/sh_keysc.h>
22#include <linux/bitmap.h> 22#include <linux/bitmap.h>
23#include <linux/clk.h> 23#include <linux/pm_runtime.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
@@ -37,7 +37,6 @@ static const struct {
37 37
38struct sh_keysc_priv { 38struct sh_keysc_priv {
39 void __iomem *iomem_base; 39 void __iomem *iomem_base;
40 struct clk *clk;
41 DECLARE_BITMAP(last_keys, SH_KEYSC_MAXKEYS); 40 DECLARE_BITMAP(last_keys, SH_KEYSC_MAXKEYS);
42 struct input_dev *input; 41 struct input_dev *input;
43 struct sh_keysc_info pdata; 42 struct sh_keysc_info pdata;
@@ -169,7 +168,6 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
169 struct sh_keysc_info *pdata; 168 struct sh_keysc_info *pdata;
170 struct resource *res; 169 struct resource *res;
171 struct input_dev *input; 170 struct input_dev *input;
172 char clk_name[8];
173 int i; 171 int i;
174 int irq, error; 172 int irq, error;
175 173
@@ -210,19 +208,11 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
210 goto err1; 208 goto err1;
211 } 209 }
212 210
213 snprintf(clk_name, sizeof(clk_name), "keysc%d", pdev->id);
214 priv->clk = clk_get(&pdev->dev, clk_name);
215 if (IS_ERR(priv->clk)) {
216 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
217 error = PTR_ERR(priv->clk);
218 goto err2;
219 }
220
221 priv->input = input_allocate_device(); 211 priv->input = input_allocate_device();
222 if (!priv->input) { 212 if (!priv->input) {
223 dev_err(&pdev->dev, "failed to allocate input device\n"); 213 dev_err(&pdev->dev, "failed to allocate input device\n");
224 error = -ENOMEM; 214 error = -ENOMEM;
225 goto err3; 215 goto err2;
226 } 216 }
227 217
228 input = priv->input; 218 input = priv->input;
@@ -241,10 +231,11 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
241 input->keycodesize = sizeof(pdata->keycodes[0]); 231 input->keycodesize = sizeof(pdata->keycodes[0]);
242 input->keycodemax = ARRAY_SIZE(pdata->keycodes); 232 input->keycodemax = ARRAY_SIZE(pdata->keycodes);
243 233
244 error = request_irq(irq, sh_keysc_isr, 0, pdev->name, pdev); 234 error = request_threaded_irq(irq, NULL, sh_keysc_isr, IRQF_ONESHOT,
235 dev_name(&pdev->dev), pdev);
245 if (error) { 236 if (error) {
246 dev_err(&pdev->dev, "failed to request IRQ\n"); 237 dev_err(&pdev->dev, "failed to request IRQ\n");
247 goto err4; 238 goto err3;
248 } 239 }
249 240
250 for (i = 0; i < SH_KEYSC_MAXKEYS; i++) 241 for (i = 0; i < SH_KEYSC_MAXKEYS; i++)
@@ -254,10 +245,11 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
254 error = input_register_device(input); 245 error = input_register_device(input);
255 if (error) { 246 if (error) {
256 dev_err(&pdev->dev, "failed to register input device\n"); 247 dev_err(&pdev->dev, "failed to register input device\n");
257 goto err5; 248 goto err4;
258 } 249 }
259 250
260 clk_enable(priv->clk); 251 pm_runtime_enable(&pdev->dev);
252 pm_runtime_get_sync(&pdev->dev);
261 253
262 sh_keysc_write(priv, KYCR1, (sh_keysc_mode[pdata->mode].kymd << 8) | 254 sh_keysc_write(priv, KYCR1, (sh_keysc_mode[pdata->mode].kymd << 8) |
263 pdata->scan_timing); 255 pdata->scan_timing);
@@ -267,12 +259,10 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
267 259
268 return 0; 260 return 0;
269 261
270 err5:
271 free_irq(irq, pdev);
272 err4: 262 err4:
273 input_free_device(input); 263 free_irq(irq, pdev);
274 err3: 264 err3:
275 clk_put(priv->clk); 265 input_free_device(input);
276 err2: 266 err2:
277 iounmap(priv->iomem_base); 267 iounmap(priv->iomem_base);
278 err1: 268 err1:
@@ -292,8 +282,8 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev)
292 free_irq(platform_get_irq(pdev, 0), pdev); 282 free_irq(platform_get_irq(pdev, 0), pdev);
293 iounmap(priv->iomem_base); 283 iounmap(priv->iomem_base);
294 284
295 clk_disable(priv->clk); 285 pm_runtime_put_sync(&pdev->dev);
296 clk_put(priv->clk); 286 pm_runtime_disable(&pdev->dev);
297 287
298 platform_set_drvdata(pdev, NULL); 288 platform_set_drvdata(pdev, NULL);
299 kfree(priv); 289 kfree(priv);
@@ -301,6 +291,7 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev)
301 return 0; 291 return 0;
302} 292}
303 293
294#if CONFIG_PM_SLEEP
304static int sh_keysc_suspend(struct device *dev) 295static int sh_keysc_suspend(struct device *dev)
305{ 296{
306 struct platform_device *pdev = to_platform_device(dev); 297 struct platform_device *pdev = to_platform_device(dev);
@@ -311,14 +302,13 @@ static int sh_keysc_suspend(struct device *dev)
311 value = sh_keysc_read(priv, KYCR1); 302 value = sh_keysc_read(priv, KYCR1);
312 303
313 if (device_may_wakeup(dev)) { 304 if (device_may_wakeup(dev)) {
314 value |= 0x80; 305 sh_keysc_write(priv, KYCR1, value | 0x80);
315 enable_irq_wake(irq); 306 enable_irq_wake(irq);
316 } else { 307 } else {
317 value &= ~0x80; 308 sh_keysc_write(priv, KYCR1, value & ~0x80);
309 pm_runtime_put_sync(dev);
318 } 310 }
319 311
320 sh_keysc_write(priv, KYCR1, value);
321
322 return 0; 312 return 0;
323} 313}
324 314
@@ -329,16 +319,17 @@ static int sh_keysc_resume(struct device *dev)
329 319
330 if (device_may_wakeup(dev)) 320 if (device_may_wakeup(dev))
331 disable_irq_wake(irq); 321 disable_irq_wake(irq);
322 else
323 pm_runtime_get_sync(dev);
332 324
333 return 0; 325 return 0;
334} 326}
327#endif
335 328
336static const struct dev_pm_ops sh_keysc_dev_pm_ops = { 329static SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops,
337 .suspend = sh_keysc_suspend, 330 sh_keysc_suspend, sh_keysc_resume);
338 .resume = sh_keysc_resume,
339};
340 331
341struct platform_driver sh_keysc_device_driver = { 332static struct platform_driver sh_keysc_device_driver = {
342 .probe = sh_keysc_probe, 333 .probe = sh_keysc_probe,
343 .remove = __devexit_p(sh_keysc_remove), 334 .remove = __devexit_p(sh_keysc_remove),
344 .driver = { 335 .driver = {
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 99ce9032d08c..2b3b73ec6689 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -66,12 +66,11 @@ struct tegra_kbc {
66 void __iomem *mmio; 66 void __iomem *mmio;
67 struct input_dev *idev; 67 struct input_dev *idev;
68 unsigned int irq; 68 unsigned int irq;
69 unsigned int wake_enable_rows;
70 unsigned int wake_enable_cols;
71 spinlock_t lock; 69 spinlock_t lock;
72 unsigned int repoll_dly; 70 unsigned int repoll_dly;
73 unsigned long cp_dly_jiffies; 71 unsigned long cp_dly_jiffies;
74 bool use_fn_map; 72 bool use_fn_map;
73 bool use_ghost_filter;
75 const struct tegra_kbc_platform_data *pdata; 74 const struct tegra_kbc_platform_data *pdata;
76 unsigned short keycode[KBC_MAX_KEY * 2]; 75 unsigned short keycode[KBC_MAX_KEY * 2];
77 unsigned short current_keys[KBC_MAX_KPENT]; 76 unsigned short current_keys[KBC_MAX_KPENT];
@@ -260,6 +259,8 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
260 unsigned int num_down = 0; 259 unsigned int num_down = 0;
261 unsigned long flags; 260 unsigned long flags;
262 bool fn_keypress = false; 261 bool fn_keypress = false;
262 bool key_in_same_row = false;
263 bool key_in_same_col = false;
263 264
264 spin_lock_irqsave(&kbc->lock, flags); 265 spin_lock_irqsave(&kbc->lock, flags);
265 for (i = 0; i < KBC_MAX_KPENT; i++) { 266 for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -285,6 +286,34 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
285 } 286 }
286 287
287 /* 288 /*
289 * Matrix keyboard designs are prone to keyboard ghosting.
290 * Ghosting occurs if there are 3 keys such that -
291 * any 2 of the 3 keys share a row, and any 2 of them share a column.
292 * If so ignore the key presses for this iteration.
293 */
294 if ((kbc->use_ghost_filter) && (num_down >= 3)) {
295 for (i = 0; i < num_down; i++) {
296 unsigned int j;
297 u8 curr_col = scancodes[i] & 0x07;
298 u8 curr_row = scancodes[i] >> KBC_ROW_SHIFT;
299
300 /*
301 * Find 2 keys such that one key is in the same row
302 * and the other is in the same column as the i-th key.
303 */
304 for (j = i + 1; j < num_down; j++) {
305 u8 col = scancodes[j] & 0x07;
306 u8 row = scancodes[j] >> KBC_ROW_SHIFT;
307
308 if (col == curr_col)
309 key_in_same_col = true;
310 if (row == curr_row)
311 key_in_same_row = true;
312 }
313 }
314 }
315
316 /*
288 * If the platform uses Fn keymaps, translate keys on a Fn keypress. 317 * If the platform uses Fn keymaps, translate keys on a Fn keypress.
289 * Function keycodes are KBC_MAX_KEY apart from the plain keycodes. 318 * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
290 */ 319 */
@@ -297,6 +326,10 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
297 326
298 spin_unlock_irqrestore(&kbc->lock, flags); 327 spin_unlock_irqrestore(&kbc->lock, flags);
299 328
329 /* Ignore the key presses for this iteration? */
330 if (key_in_same_col && key_in_same_row)
331 return;
332
300 tegra_kbc_report_released_keys(kbc->idev, 333 tegra_kbc_report_released_keys(kbc->idev,
301 kbc->current_keys, kbc->num_pressed_keys, 334 kbc->current_keys, kbc->num_pressed_keys,
302 keycodes, num_down); 335 keycodes, num_down);
@@ -383,21 +416,11 @@ static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
383 int i; 416 int i;
384 unsigned int rst_val; 417 unsigned int rst_val;
385 418
386 BUG_ON(pdata->wake_cnt > KBC_MAX_KEY); 419 /* Either mask all keys or none. */
387 rst_val = (filter && pdata->wake_cnt) ? ~0 : 0; 420 rst_val = (filter && !pdata->wakeup) ? ~0 : 0;
388 421
389 for (i = 0; i < KBC_MAX_ROW; i++) 422 for (i = 0; i < KBC_MAX_ROW; i++)
390 writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4); 423 writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
391
392 if (filter) {
393 for (i = 0; i < pdata->wake_cnt; i++) {
394 u32 val, addr;
395 addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
396 val = readl(kbc->mmio + addr);
397 val &= ~(1 << pdata->wake_cfg[i].col);
398 writel(val, kbc->mmio + addr);
399 }
400 }
401} 424}
402 425
403static void tegra_kbc_config_pins(struct tegra_kbc *kbc) 426static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
@@ -559,7 +582,6 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
559 struct resource *res; 582 struct resource *res;
560 int irq; 583 int irq;
561 int err; 584 int err;
562 int i;
563 int num_rows = 0; 585 int num_rows = 0;
564 unsigned int debounce_cnt; 586 unsigned int debounce_cnt;
565 unsigned int scan_time_rows; 587 unsigned int scan_time_rows;
@@ -616,13 +638,6 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
616 goto err_iounmap; 638 goto err_iounmap;
617 } 639 }
618 640
619 kbc->wake_enable_rows = 0;
620 kbc->wake_enable_cols = 0;
621 for (i = 0; i < pdata->wake_cnt; i++) {
622 kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
623 kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
624 }
625
626 /* 641 /*
627 * The time delay between two consecutive reads of the FIFO is 642 * The time delay between two consecutive reads of the FIFO is
628 * the sum of the repeat time and the time taken for scanning 643 * the sum of the repeat time and the time taken for scanning
@@ -652,6 +667,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
652 input_dev->keycodemax *= 2; 667 input_dev->keycodemax *= 2;
653 668
654 kbc->use_fn_map = pdata->use_fn_map; 669 kbc->use_fn_map = pdata->use_fn_map;
670 kbc->use_ghost_filter = pdata->use_ghost_filter;
655 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; 671 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
656 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, 672 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
657 input_dev->keycode, input_dev->keybit); 673 input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index c431d09e401a..c3a62c42cd28 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -79,13 +79,7 @@ struct ad714x_slider_drv {
79struct ad714x_wheel_drv { 79struct ad714x_wheel_drv {
80 int abs_pos; 80 int abs_pos;
81 int flt_pos; 81 int flt_pos;
82 int pre_mean_value;
83 int pre_highest_stage; 82 int pre_highest_stage;
84 int pre_mean_value_no_offset;
85 int mean_value;
86 int mean_value_no_offset;
87 int pos_offset;
88 int pos_ratio;
89 int highest_stage; 83 int highest_stage;
90 enum ad714x_device_state state; 84 enum ad714x_device_state state;
91 struct input_dev *input; 85 struct input_dev *input;
@@ -158,10 +152,10 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x,
158 unsigned short data; 152 unsigned short data;
159 unsigned short mask; 153 unsigned short mask;
160 154
161 mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage); 155 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
162 156
163 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 157 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
164 data |= 1 << start_stage; 158 data |= 1 << end_stage;
165 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 159 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
166 160
167 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 161 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
@@ -175,10 +169,10 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
175 unsigned short data; 169 unsigned short data;
176 unsigned short mask; 170 unsigned short mask;
177 171
178 mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage); 172 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
179 173
180 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 174 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
181 data &= ~(1 << start_stage); 175 data &= ~(1 << end_stage);
182 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 176 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
183 177
184 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 178 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
@@ -404,7 +398,6 @@ static void ad714x_slider_state_machine(struct ad714x_chip *ad714x, int idx)
404 ad714x_slider_cal_highest_stage(ad714x, idx); 398 ad714x_slider_cal_highest_stage(ad714x, idx);
405 ad714x_slider_cal_abs_pos(ad714x, idx); 399 ad714x_slider_cal_abs_pos(ad714x, idx);
406 ad714x_slider_cal_flt_pos(ad714x, idx); 400 ad714x_slider_cal_flt_pos(ad714x, idx);
407
408 input_report_abs(sw->input, ABS_X, sw->flt_pos); 401 input_report_abs(sw->input, ABS_X, sw->flt_pos);
409 input_report_key(sw->input, BTN_TOUCH, 1); 402 input_report_key(sw->input, BTN_TOUCH, 1);
410 } else { 403 } else {
@@ -468,104 +461,41 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
468/* 461/*
469 * When the scroll wheel is activated, we compute the absolute position based 462 * When the scroll wheel is activated, we compute the absolute position based
470 * on the sensor values. To calculate the position, we first determine the 463 * on the sensor values. To calculate the position, we first determine the
471 * sensor that has the greatest response among the 8 sensors that constitutes 464 * sensor that has the greatest response among the sensors that constitutes
472 * the scrollwheel. Then we determined the 2 sensors on either sides of the 465 * the scrollwheel. Then we determined the sensors on either sides of the
473 * sensor with the highest response and we apply weights to these sensors. The 466 * sensor with the highest response and we apply weights to these sensors. The
474 * result of this computation gives us the mean value which defined by the 467 * result of this computation gives us the mean value.
475 * following formula:
476 * For i= second_before_highest_stage to i= second_after_highest_stage
477 * v += Sensor response(i)*WEIGHT*(i+3)
478 * w += Sensor response(i)
479 * Mean_Value=v/w
480 * pos_on_scrollwheel = (Mean_Value - position_offset) / position_ratio
481 */ 468 */
482 469
483#define WEIGHT_FACTOR 30
484/* This constant prevents the "PositionOffset" from reaching a big value */
485#define OFFSET_POSITION_CLAMP 120
486static void ad714x_wheel_cal_abs_pos(struct ad714x_chip *ad714x, int idx) 470static void ad714x_wheel_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
487{ 471{
488 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; 472 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
489 struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx]; 473 struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
490 int stage_num = hw->end_stage - hw->start_stage + 1; 474 int stage_num = hw->end_stage - hw->start_stage + 1;
491 int second_before, first_before, highest, first_after, second_after; 475 int first_before, highest, first_after;
492 int a_param, b_param; 476 int a_param, b_param;
493 477
494 /* Calculate Mean value */
495
496 second_before = (sw->highest_stage + stage_num - 2) % stage_num;
497 first_before = (sw->highest_stage + stage_num - 1) % stage_num; 478 first_before = (sw->highest_stage + stage_num - 1) % stage_num;
498 highest = sw->highest_stage; 479 highest = sw->highest_stage;
499 first_after = (sw->highest_stage + stage_num + 1) % stage_num; 480 first_after = (sw->highest_stage + stage_num + 1) % stage_num;
500 second_after = (sw->highest_stage + stage_num + 2) % stage_num;
501
502 if (((sw->highest_stage - hw->start_stage) > 1) &&
503 ((hw->end_stage - sw->highest_stage) > 1)) {
504 a_param = ad714x->sensor_val[second_before] *
505 (second_before - hw->start_stage + 3) +
506 ad714x->sensor_val[first_before] *
507 (second_before - hw->start_stage + 3) +
508 ad714x->sensor_val[highest] *
509 (second_before - hw->start_stage + 3) +
510 ad714x->sensor_val[first_after] *
511 (first_after - hw->start_stage + 3) +
512 ad714x->sensor_val[second_after] *
513 (second_after - hw->start_stage + 3);
514 } else {
515 a_param = ad714x->sensor_val[second_before] *
516 (second_before - hw->start_stage + 1) +
517 ad714x->sensor_val[first_before] *
518 (second_before - hw->start_stage + 2) +
519 ad714x->sensor_val[highest] *
520 (second_before - hw->start_stage + 3) +
521 ad714x->sensor_val[first_after] *
522 (first_after - hw->start_stage + 4) +
523 ad714x->sensor_val[second_after] *
524 (second_after - hw->start_stage + 5);
525 }
526 a_param *= WEIGHT_FACTOR;
527 481
528 b_param = ad714x->sensor_val[second_before] + 482 a_param = ad714x->sensor_val[highest] *
483 (highest - hw->start_stage) +
484 ad714x->sensor_val[first_before] *
485 (highest - hw->start_stage - 1) +
486 ad714x->sensor_val[first_after] *
487 (highest - hw->start_stage + 1);
488 b_param = ad714x->sensor_val[highest] +
529 ad714x->sensor_val[first_before] + 489 ad714x->sensor_val[first_before] +
530 ad714x->sensor_val[highest] + 490 ad714x->sensor_val[first_after];
531 ad714x->sensor_val[first_after] + 491
532 ad714x->sensor_val[second_after]; 492 sw->abs_pos = ((hw->max_coord / (hw->end_stage - hw->start_stage)) *
533 493 a_param) / b_param;
534 sw->pre_mean_value = sw->mean_value; 494
535 sw->mean_value = a_param / b_param;
536
537 /* Calculate the offset */
538
539 if ((sw->pre_highest_stage == hw->end_stage) &&
540 (sw->highest_stage == hw->start_stage))
541 sw->pos_offset = sw->mean_value;
542 else if ((sw->pre_highest_stage == hw->start_stage) &&
543 (sw->highest_stage == hw->end_stage))
544 sw->pos_offset = sw->pre_mean_value;
545
546 if (sw->pos_offset > OFFSET_POSITION_CLAMP)
547 sw->pos_offset = OFFSET_POSITION_CLAMP;
548
549 /* Calculate the mean value without the offset */
550
551 sw->pre_mean_value_no_offset = sw->mean_value_no_offset;
552 sw->mean_value_no_offset = sw->mean_value - sw->pos_offset;
553 if (sw->mean_value_no_offset < 0)
554 sw->mean_value_no_offset = 0;
555
556 /* Calculate ratio to scale down to NUMBER_OF_WANTED_POSITIONS */
557
558 if ((sw->pre_highest_stage == hw->end_stage) &&
559 (sw->highest_stage == hw->start_stage))
560 sw->pos_ratio = (sw->pre_mean_value_no_offset * 100) /
561 hw->max_coord;
562 else if ((sw->pre_highest_stage == hw->start_stage) &&
563 (sw->highest_stage == hw->end_stage))
564 sw->pos_ratio = (sw->mean_value_no_offset * 100) /
565 hw->max_coord;
566 sw->abs_pos = (sw->mean_value_no_offset * 100) / sw->pos_ratio;
567 if (sw->abs_pos > hw->max_coord) 495 if (sw->abs_pos > hw->max_coord)
568 sw->abs_pos = hw->max_coord; 496 sw->abs_pos = hw->max_coord;
497 else if (sw->abs_pos < 0)
498 sw->abs_pos = 0;
569} 499}
570 500
571static void ad714x_wheel_cal_flt_pos(struct ad714x_chip *ad714x, int idx) 501static void ad714x_wheel_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
@@ -639,9 +569,8 @@ static void ad714x_wheel_state_machine(struct ad714x_chip *ad714x, int idx)
639 ad714x_wheel_cal_highest_stage(ad714x, idx); 569 ad714x_wheel_cal_highest_stage(ad714x, idx);
640 ad714x_wheel_cal_abs_pos(ad714x, idx); 570 ad714x_wheel_cal_abs_pos(ad714x, idx);
641 ad714x_wheel_cal_flt_pos(ad714x, idx); 571 ad714x_wheel_cal_flt_pos(ad714x, idx);
642
643 input_report_abs(sw->input, ABS_WHEEL, 572 input_report_abs(sw->input, ABS_WHEEL,
644 sw->abs_pos); 573 sw->flt_pos);
645 input_report_key(sw->input, BTN_TOUCH, 1); 574 input_report_key(sw->input, BTN_TOUCH, 1);
646 } else { 575 } else {
647 /* When the user lifts off the sensor, configure 576 /* When the user lifts off the sensor, configure
@@ -1149,6 +1078,8 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
1149 input[alloc_idx]->id.bustype = bus_type; 1078 input[alloc_idx]->id.bustype = bus_type;
1150 input[alloc_idx]->id.product = ad714x->product; 1079 input[alloc_idx]->id.product = ad714x->product;
1151 input[alloc_idx]->id.version = ad714x->version; 1080 input[alloc_idx]->id.version = ad714x->version;
1081 input[alloc_idx]->name = "ad714x_captouch_slider";
1082 input[alloc_idx]->dev.parent = dev;
1152 1083
1153 error = input_register_device(input[alloc_idx]); 1084 error = input_register_device(input[alloc_idx]);
1154 if (error) 1085 if (error)
@@ -1179,6 +1110,8 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
1179 input[alloc_idx]->id.bustype = bus_type; 1110 input[alloc_idx]->id.bustype = bus_type;
1180 input[alloc_idx]->id.product = ad714x->product; 1111 input[alloc_idx]->id.product = ad714x->product;
1181 input[alloc_idx]->id.version = ad714x->version; 1112 input[alloc_idx]->id.version = ad714x->version;
1113 input[alloc_idx]->name = "ad714x_captouch_wheel";
1114 input[alloc_idx]->dev.parent = dev;
1182 1115
1183 error = input_register_device(input[alloc_idx]); 1116 error = input_register_device(input[alloc_idx]);
1184 if (error) 1117 if (error)
@@ -1212,6 +1145,8 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
1212 input[alloc_idx]->id.bustype = bus_type; 1145 input[alloc_idx]->id.bustype = bus_type;
1213 input[alloc_idx]->id.product = ad714x->product; 1146 input[alloc_idx]->id.product = ad714x->product;
1214 input[alloc_idx]->id.version = ad714x->version; 1147 input[alloc_idx]->id.version = ad714x->version;
1148 input[alloc_idx]->name = "ad714x_captouch_pad";
1149 input[alloc_idx]->dev.parent = dev;
1215 1150
1216 error = input_register_device(input[alloc_idx]); 1151 error = input_register_device(input[alloc_idx]);
1217 if (error) 1152 if (error)
@@ -1240,6 +1175,8 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
1240 input[alloc_idx]->id.bustype = bus_type; 1175 input[alloc_idx]->id.bustype = bus_type;
1241 input[alloc_idx]->id.product = ad714x->product; 1176 input[alloc_idx]->id.product = ad714x->product;
1242 input[alloc_idx]->id.version = ad714x->version; 1177 input[alloc_idx]->id.version = ad714x->version;
1178 input[alloc_idx]->name = "ad714x_captouch_button";
1179 input[alloc_idx]->dev.parent = dev;
1243 1180
1244 error = input_register_device(input[alloc_idx]); 1181 error = input_register_device(input[alloc_idx]);
1245 if (error) 1182 if (error)
@@ -1249,7 +1186,9 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
1249 } 1186 }
1250 1187
1251 error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread, 1188 error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
1252 IRQF_TRIGGER_FALLING, "ad714x_captouch", ad714x); 1189 plat_data->irqflags ?
1190 plat_data->irqflags : IRQF_TRIGGER_FALLING,
1191 "ad714x_captouch", ad714x);
1253 if (error) { 1192 if (error) {
1254 dev_err(dev, "can't allocate irq %d\n", ad714x->irq); 1193 dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
1255 goto err_unreg_dev; 1194 goto err_unreg_dev;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 9ccdb82d869a..1de58e8a1b71 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -737,14 +737,17 @@ static ssize_t ati_remote2_store_channel_mask(struct device *dev,
737 737
738 mutex_lock(&ati_remote2_mutex); 738 mutex_lock(&ati_remote2_mutex);
739 739
740 if (mask != ar2->channel_mask && !ati_remote2_setup(ar2, mask)) 740 if (mask != ar2->channel_mask) {
741 ar2->channel_mask = mask; 741 r = ati_remote2_setup(ar2, mask);
742 if (!r)
743 ar2->channel_mask = mask;
744 }
742 745
743 mutex_unlock(&ati_remote2_mutex); 746 mutex_unlock(&ati_remote2_mutex);
744 747
745 usb_autopm_put_interface(ar2->intf[0]); 748 usb_autopm_put_interface(ar2->intf[0]);
746 749
747 return count; 750 return r ? r : count;
748} 751}
749 752
750static ssize_t ati_remote2_show_mode_mask(struct device *dev, 753static ssize_t ati_remote2_show_mode_mask(struct device *dev,
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 7e64d01da2be..2c8b84dd9dac 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -2,6 +2,7 @@
2 * rotary_encoder.c 2 * rotary_encoder.c
3 * 3 *
4 * (c) 2009 Daniel Mack <daniel@caiaq.de> 4 * (c) 2009 Daniel Mack <daniel@caiaq.de>
5 * Copyright (C) 2011 Johan Hovold <jhovold@gmail.com>
5 * 6 *
6 * state machine code inspired by code from Tim Ruetz 7 * state machine code inspired by code from Tim Ruetz
7 * 8 *
@@ -38,52 +39,66 @@ struct rotary_encoder {
38 39
39 bool armed; 40 bool armed;
40 unsigned char dir; /* 0 - clockwise, 1 - CCW */ 41 unsigned char dir; /* 0 - clockwise, 1 - CCW */
42
43 char last_stable;
41}; 44};
42 45
43static irqreturn_t rotary_encoder_irq(int irq, void *dev_id) 46static int rotary_encoder_get_state(struct rotary_encoder_platform_data *pdata)
44{ 47{
45 struct rotary_encoder *encoder = dev_id;
46 struct rotary_encoder_platform_data *pdata = encoder->pdata;
47 int a = !!gpio_get_value(pdata->gpio_a); 48 int a = !!gpio_get_value(pdata->gpio_a);
48 int b = !!gpio_get_value(pdata->gpio_b); 49 int b = !!gpio_get_value(pdata->gpio_b);
49 int state;
50 50
51 a ^= pdata->inverted_a; 51 a ^= pdata->inverted_a;
52 b ^= pdata->inverted_b; 52 b ^= pdata->inverted_b;
53 state = (a << 1) | b;
54 53
55 switch (state) { 54 return ((a << 1) | b);
55}
56 56
57 case 0x0: 57static void rotary_encoder_report_event(struct rotary_encoder *encoder)
58 if (!encoder->armed) 58{
59 break; 59 struct rotary_encoder_platform_data *pdata = encoder->pdata;
60 60
61 if (pdata->relative_axis) { 61 if (pdata->relative_axis) {
62 input_report_rel(encoder->input, pdata->axis, 62 input_report_rel(encoder->input,
63 encoder->dir ? -1 : 1); 63 pdata->axis, encoder->dir ? -1 : 1);
64 } else { 64 } else {
65 unsigned int pos = encoder->pos; 65 unsigned int pos = encoder->pos;
66 66
67 if (encoder->dir) { 67 if (encoder->dir) {
68 /* turning counter-clockwise */ 68 /* turning counter-clockwise */
69 if (pdata->rollover)
70 pos += pdata->steps;
71 if (pos)
72 pos--;
73 } else {
74 /* turning clockwise */
75 if (pdata->rollover || pos < pdata->steps)
76 pos++;
77 }
78 if (pdata->rollover) 69 if (pdata->rollover)
79 pos %= pdata->steps; 70 pos += pdata->steps;
80 encoder->pos = pos; 71 if (pos)
81 input_report_abs(encoder->input, pdata->axis, 72 pos--;
82 encoder->pos); 73 } else {
74 /* turning clockwise */
75 if (pdata->rollover || pos < pdata->steps)
76 pos++;
83 } 77 }
84 input_sync(encoder->input);
85 78
86 encoder->armed = false; 79 if (pdata->rollover)
80 pos %= pdata->steps;
81
82 encoder->pos = pos;
83 input_report_abs(encoder->input, pdata->axis, encoder->pos);
84 }
85
86 input_sync(encoder->input);
87}
88
89static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
90{
91 struct rotary_encoder *encoder = dev_id;
92 int state;
93
94 state = rotary_encoder_get_state(encoder->pdata);
95
96 switch (state) {
97 case 0x0:
98 if (encoder->armed) {
99 rotary_encoder_report_event(encoder);
100 encoder->armed = false;
101 }
87 break; 102 break;
88 103
89 case 0x1: 104 case 0x1:
@@ -100,11 +115,37 @@ static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
100 return IRQ_HANDLED; 115 return IRQ_HANDLED;
101} 116}
102 117
118static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
119{
120 struct rotary_encoder *encoder = dev_id;
121 int state;
122
123 state = rotary_encoder_get_state(encoder->pdata);
124
125 switch (state) {
126 case 0x00:
127 case 0x03:
128 if (state != encoder->last_stable) {
129 rotary_encoder_report_event(encoder);
130 encoder->last_stable = state;
131 }
132 break;
133
134 case 0x01:
135 case 0x02:
136 encoder->dir = (encoder->last_stable + state) & 0x01;
137 break;
138 }
139
140 return IRQ_HANDLED;
141}
142
103static int __devinit rotary_encoder_probe(struct platform_device *pdev) 143static int __devinit rotary_encoder_probe(struct platform_device *pdev)
104{ 144{
105 struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data; 145 struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
106 struct rotary_encoder *encoder; 146 struct rotary_encoder *encoder;
107 struct input_dev *input; 147 struct input_dev *input;
148 irq_handler_t handler;
108 int err; 149 int err;
109 150
110 if (!pdata) { 151 if (!pdata) {
@@ -175,7 +216,14 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
175 } 216 }
176 217
177 /* request the IRQs */ 218 /* request the IRQs */
178 err = request_irq(encoder->irq_a, &rotary_encoder_irq, 219 if (pdata->half_period) {
220 handler = &rotary_encoder_half_period_irq;
221 encoder->last_stable = rotary_encoder_get_state(pdata);
222 } else {
223 handler = &rotary_encoder_irq;
224 }
225
226 err = request_irq(encoder->irq_a, handler,
179 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 227 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
180 DRV_NAME, encoder); 228 DRV_NAME, encoder);
181 if (err) { 229 if (err) {
@@ -184,7 +232,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
184 goto exit_free_gpio_b; 232 goto exit_free_gpio_b;
185 } 233 }
186 234
187 err = request_irq(encoder->irq_b, &rotary_encoder_irq, 235 err = request_irq(encoder->irq_b, handler,
188 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 236 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
189 DRV_NAME, encoder); 237 DRV_NAME, encoder);
190 if (err) { 238 if (err) {
@@ -252,6 +300,5 @@ module_exit(rotary_encoder_exit);
252 300
253MODULE_ALIAS("platform:" DRV_NAME); 301MODULE_ALIAS("platform:" DRV_NAME);
254MODULE_DESCRIPTION("GPIO rotary encoder driver"); 302MODULE_DESCRIPTION("GPIO rotary encoder driver");
255MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 303MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>, Johan Hovold");
256MODULE_LICENSE("GPL v2"); 304MODULE_LICENSE("GPL v2");
257
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index f16972bddca4..38e4b507b94c 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -89,7 +89,7 @@ static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
89 return 0; 89 return 0;
90 90
91free_irq: 91free_irq:
92 free_irq(irq, NULL); 92 free_irq(irq, pwr);
93free_input_dev: 93free_input_dev:
94 input_free_device(pwr); 94 input_free_device(pwr);
95 return err; 95 return err;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 04d9bf320a4f..32503565faf9 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/input/mt.h>
19#include <linux/serio.h> 20#include <linux/serio.h>
20#include <linux/libps2.h> 21#include <linux/libps2.h>
21#include "psmouse.h" 22#include "psmouse.h"
@@ -242,15 +243,37 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
242 input_sync(dev); 243 input_sync(dev);
243} 244}
244 245
246static void elantech_set_slot(struct input_dev *dev, int slot, bool active,
247 unsigned int x, unsigned int y)
248{
249 input_mt_slot(dev, slot);
250 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
251 if (active) {
252 input_report_abs(dev, ABS_MT_POSITION_X, x);
253 input_report_abs(dev, ABS_MT_POSITION_Y, y);
254 }
255}
256
257/* x1 < x2 and y1 < y2 when two fingers, x = y = 0 when not pressed */
258static void elantech_report_semi_mt_data(struct input_dev *dev,
259 unsigned int num_fingers,
260 unsigned int x1, unsigned int y1,
261 unsigned int x2, unsigned int y2)
262{
263 elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
264 elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
265}
266
245/* 267/*
246 * Interpret complete data packets and report absolute mode input events for 268 * Interpret complete data packets and report absolute mode input events for
247 * hardware version 2. (6 byte packets) 269 * hardware version 2. (6 byte packets)
248 */ 270 */
249static void elantech_report_absolute_v2(struct psmouse *psmouse) 271static void elantech_report_absolute_v2(struct psmouse *psmouse)
250{ 272{
273 struct elantech_data *etd = psmouse->private;
251 struct input_dev *dev = psmouse->dev; 274 struct input_dev *dev = psmouse->dev;
252 unsigned char *packet = psmouse->packet; 275 unsigned char *packet = psmouse->packet;
253 int fingers, x1, y1, x2, y2; 276 unsigned int fingers, x1 = 0, y1 = 0, x2 = 0, y2 = 0, width = 0, pres = 0;
254 277
255 /* byte 0: n1 n0 . . . . R L */ 278 /* byte 0: n1 n0 . . . . R L */
256 fingers = (packet[0] & 0xc0) >> 6; 279 fingers = (packet[0] & 0xc0) >> 6;
@@ -270,14 +293,18 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
270 * byte 1: . . . . . x10 x9 x8 293 * byte 1: . . . . . x10 x9 x8
271 * byte 2: x7 x6 x5 x4 x4 x2 x1 x0 294 * byte 2: x7 x6 x5 x4 x4 x2 x1 x0
272 */ 295 */
273 input_report_abs(dev, ABS_X, 296 x1 = ((packet[1] & 0x07) << 8) | packet[2];
274 ((packet[1] & 0x07) << 8) | packet[2]);
275 /* 297 /*
276 * byte 4: . . . . . . y9 y8 298 * byte 4: . . . . . . y9 y8
277 * byte 5: y7 y6 y5 y4 y3 y2 y1 y0 299 * byte 5: y7 y6 y5 y4 y3 y2 y1 y0
278 */ 300 */
279 input_report_abs(dev, ABS_Y, 301 y1 = ETP_YMAX_V2 - (((packet[4] & 0x03) << 8) | packet[5]);
280 ETP_YMAX_V2 - (((packet[4] & 0x03) << 8) | packet[5])); 302
303 input_report_abs(dev, ABS_X, x1);
304 input_report_abs(dev, ABS_Y, y1);
305
306 pres = (packet[1] & 0xf0) | ((packet[4] & 0xf0) >> 4);
307 width = ((packet[0] & 0x30) >> 2) | ((packet[3] & 0x30) >> 4);
281 break; 308 break;
282 309
283 case 2: 310 case 2:
@@ -303,23 +330,24 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
303 */ 330 */
304 input_report_abs(dev, ABS_X, x1 << 2); 331 input_report_abs(dev, ABS_X, x1 << 2);
305 input_report_abs(dev, ABS_Y, y1 << 2); 332 input_report_abs(dev, ABS_Y, y1 << 2);
306 /* 333
307 * For compatibility with the proprietary X Elantech driver 334 /* Unknown so just report sensible values */
308 * report both coordinates as hat coordinates 335 pres = 127;
309 */ 336 width = 7;
310 input_report_abs(dev, ABS_HAT0X, x1);
311 input_report_abs(dev, ABS_HAT0Y, y1);
312 input_report_abs(dev, ABS_HAT1X, x2);
313 input_report_abs(dev, ABS_HAT1Y, y2);
314 break; 337 break;
315 } 338 }
316 339
340 elantech_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
317 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1); 341 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
318 input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2); 342 input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
319 input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3); 343 input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
320 input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4); 344 input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
321 input_report_key(dev, BTN_LEFT, packet[0] & 0x01); 345 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
322 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); 346 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
347 if (etd->reports_pressure) {
348 input_report_abs(dev, ABS_PRESSURE, pres);
349 input_report_abs(dev, ABS_TOOL_WIDTH, width);
350 }
323 351
324 input_sync(dev); 352 input_sync(dev);
325} 353}
@@ -478,10 +506,16 @@ static void elantech_set_input_params(struct psmouse *psmouse)
478 __set_bit(BTN_TOOL_QUADTAP, dev->keybit); 506 __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
479 input_set_abs_params(dev, ABS_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0); 507 input_set_abs_params(dev, ABS_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0);
480 input_set_abs_params(dev, ABS_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0); 508 input_set_abs_params(dev, ABS_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0);
481 input_set_abs_params(dev, ABS_HAT0X, ETP_2FT_XMIN, ETP_2FT_XMAX, 0, 0); 509 if (etd->reports_pressure) {
482 input_set_abs_params(dev, ABS_HAT0Y, ETP_2FT_YMIN, ETP_2FT_YMAX, 0, 0); 510 input_set_abs_params(dev, ABS_PRESSURE, ETP_PMIN_V2,
483 input_set_abs_params(dev, ABS_HAT1X, ETP_2FT_XMIN, ETP_2FT_XMAX, 0, 0); 511 ETP_PMAX_V2, 0, 0);
484 input_set_abs_params(dev, ABS_HAT1Y, ETP_2FT_YMIN, ETP_2FT_YMAX, 0, 0); 512 input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
513 ETP_WMAX_V2, 0, 0);
514 }
515 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
516 input_mt_init_slots(dev, 2);
517 input_set_abs_params(dev, ABS_MT_POSITION_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0);
518 input_set_abs_params(dev, ABS_MT_POSITION_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0);
485 break; 519 break;
486 } 520 }
487} 521}
@@ -725,6 +759,10 @@ int elantech_init(struct psmouse *psmouse)
725 etd->debug = 1; 759 etd->debug = 1;
726 /* Don't know how to do parity checking for version 2 */ 760 /* Don't know how to do parity checking for version 2 */
727 etd->paritycheck = 0; 761 etd->paritycheck = 0;
762
763 if (etd->fw_version >= 0x020800)
764 etd->reports_pressure = true;
765
728 } else { 766 } else {
729 etd->hw_version = 1; 767 etd->hw_version = 1;
730 etd->paritycheck = 1; 768 etd->paritycheck = 1;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index aa4aac5d2198..fabb2b99615c 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -77,6 +77,11 @@
77#define ETP_YMIN_V2 ( 0 + ETP_EDGE_FUZZ_V2) 77#define ETP_YMIN_V2 ( 0 + ETP_EDGE_FUZZ_V2)
78#define ETP_YMAX_V2 ( 768 - ETP_EDGE_FUZZ_V2) 78#define ETP_YMAX_V2 ( 768 - ETP_EDGE_FUZZ_V2)
79 79
80#define ETP_PMIN_V2 0
81#define ETP_PMAX_V2 255
82#define ETP_WMIN_V2 0
83#define ETP_WMAX_V2 15
84
80/* 85/*
81 * For two finger touches the coordinate of each finger gets reported 86 * For two finger touches the coordinate of each finger gets reported
82 * separately but with reduced resolution. 87 * separately but with reduced resolution.
@@ -102,6 +107,7 @@ struct elantech_data {
102 unsigned char capabilities; 107 unsigned char capabilities;
103 bool paritycheck; 108 bool paritycheck;
104 bool jumpy_cursor; 109 bool jumpy_cursor;
110 bool reports_pressure;
105 unsigned char hw_version; 111 unsigned char hw_version;
106 unsigned int fw_version; 112 unsigned int fw_version;
107 unsigned int single_finger_reports; 113 unsigned int single_finger_reports;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 7630273e9474..257e033986e4 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -508,7 +508,6 @@ static void mousedev_attach_client(struct mousedev *mousedev,
508 spin_lock(&mousedev->client_lock); 508 spin_lock(&mousedev->client_lock);
509 list_add_tail_rcu(&client->node, &mousedev->client_list); 509 list_add_tail_rcu(&client->node, &mousedev->client_list);
510 spin_unlock(&mousedev->client_lock); 510 spin_unlock(&mousedev->client_lock);
511 synchronize_rcu();
512} 511}
513 512
514static void mousedev_detach_client(struct mousedev *mousedev, 513static void mousedev_detach_client(struct mousedev *mousedev,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 434fd800cd24..cabd9e54863f 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -248,6 +248,18 @@ config TOUCHSCREEN_LPC32XX
248 To compile this driver as a module, choose M here: the 248 To compile this driver as a module, choose M here: the
249 module will be called lpc32xx_ts. 249 module will be called lpc32xx_ts.
250 250
251config TOUCHSCREEN_MAX11801
252 tristate "MAX11801 based touchscreens"
253 depends on I2C
254 help
255 Say Y here if you have a MAX11801 based touchscreen
256 controller.
257
258 If unsure, say N.
259
260 To compile this driver as a module, choose M here: the
261 module will be called max11801_ts.
262
251config TOUCHSCREEN_MCS5000 263config TOUCHSCREEN_MCS5000
252 tristate "MELFAS MCS-5000 touchscreen" 264 tristate "MELFAS MCS-5000 touchscreen"
253 depends on I2C 265 depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index ca94098d4c92..282d6f76ae26 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
27obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o 27obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
28obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o 28obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
29obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o 29obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
30obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
30obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o 31obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
31obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o 32obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
32obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o 33obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 1de1c19dad30..5196861b86ef 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -109,6 +109,7 @@ struct ads7846 {
109 u16 pressure_max; 109 u16 pressure_max;
110 110
111 bool swap_xy; 111 bool swap_xy;
112 bool use_internal;
112 113
113 struct ads7846_packet *packet; 114 struct ads7846_packet *packet;
114 115
@@ -307,7 +308,6 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
307 struct ads7846 *ts = dev_get_drvdata(dev); 308 struct ads7846 *ts = dev_get_drvdata(dev);
308 struct ser_req *req; 309 struct ser_req *req;
309 int status; 310 int status;
310 int use_internal;
311 311
312 req = kzalloc(sizeof *req, GFP_KERNEL); 312 req = kzalloc(sizeof *req, GFP_KERNEL);
313 if (!req) 313 if (!req)
@@ -315,11 +315,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
315 315
316 spi_message_init(&req->msg); 316 spi_message_init(&req->msg);
317 317
318 /* FIXME boards with ads7846 might use external vref instead ... */
319 use_internal = (ts->model == 7846);
320
321 /* maybe turn on internal vREF, and let it settle */ 318 /* maybe turn on internal vREF, and let it settle */
322 if (use_internal) { 319 if (ts->use_internal) {
323 req->ref_on = REF_ON; 320 req->ref_on = REF_ON;
324 req->xfer[0].tx_buf = &req->ref_on; 321 req->xfer[0].tx_buf = &req->ref_on;
325 req->xfer[0].len = 1; 322 req->xfer[0].len = 1;
@@ -331,8 +328,14 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
331 /* for 1uF, settle for 800 usec; no cap, 100 usec. */ 328 /* for 1uF, settle for 800 usec; no cap, 100 usec. */
332 req->xfer[1].delay_usecs = ts->vref_delay_usecs; 329 req->xfer[1].delay_usecs = ts->vref_delay_usecs;
333 spi_message_add_tail(&req->xfer[1], &req->msg); 330 spi_message_add_tail(&req->xfer[1], &req->msg);
331
332 /* Enable reference voltage */
333 command |= ADS_PD10_REF_ON;
334 } 334 }
335 335
336 /* Enable ADC in every case */
337 command |= ADS_PD10_ADC_ON;
338
336 /* take sample */ 339 /* take sample */
337 req->command = (u8) command; 340 req->command = (u8) command;
338 req->xfer[2].tx_buf = &req->command; 341 req->xfer[2].tx_buf = &req->command;
@@ -416,7 +419,7 @@ name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
416{ \ 419{ \
417 struct ads7846 *ts = dev_get_drvdata(dev); \ 420 struct ads7846 *ts = dev_get_drvdata(dev); \
418 ssize_t v = ads7846_read12_ser(dev, \ 421 ssize_t v = ads7846_read12_ser(dev, \
419 READ_12BIT_SER(var) | ADS_PD10_ALL_ON); \ 422 READ_12BIT_SER(var)); \
420 if (v < 0) \ 423 if (v < 0) \
421 return v; \ 424 return v; \
422 return sprintf(buf, "%u\n", adjust(ts, v)); \ 425 return sprintf(buf, "%u\n", adjust(ts, v)); \
@@ -509,6 +512,7 @@ static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
509 if (!ts->vref_mv) { 512 if (!ts->vref_mv) {
510 dev_dbg(&spi->dev, "assuming 2.5V internal vREF\n"); 513 dev_dbg(&spi->dev, "assuming 2.5V internal vREF\n");
511 ts->vref_mv = 2500; 514 ts->vref_mv = 2500;
515 ts->use_internal = true;
512 } 516 }
513 break; 517 break;
514 case 7845: 518 case 7845:
@@ -969,6 +973,13 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
969 pdata->gpio_pendown); 973 pdata->gpio_pendown);
970 return err; 974 return err;
971 } 975 }
976 err = gpio_direction_input(pdata->gpio_pendown);
977 if (err) {
978 dev_err(&spi->dev, "failed to setup pendown GPIO%d\n",
979 pdata->gpio_pendown);
980 gpio_free(pdata->gpio_pendown);
981 return err;
982 }
972 983
973 ts->gpio_pendown = pdata->gpio_pendown; 984 ts->gpio_pendown = pdata->gpio_pendown;
974 985
@@ -1340,8 +1351,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1340 if (ts->model == 7845) 1351 if (ts->model == 7845)
1341 ads7845_read12_ser(&spi->dev, PWRDOWN); 1352 ads7845_read12_ser(&spi->dev, PWRDOWN);
1342 else 1353 else
1343 (void) ads7846_read12_ser(&spi->dev, 1354 (void) ads7846_read12_ser(&spi->dev, READ_12BIT_SER(vaux));
1344 READ_12BIT_SER(vaux) | ADS_PD10_ALL_ON);
1345 1355
1346 err = sysfs_create_group(&spi->dev.kobj, &ads784x_attr_group); 1356 err = sysfs_create_group(&spi->dev.kobj, &ads784x_attr_group);
1347 if (err) 1357 if (err)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 4012436633b1..1e61387c73ca 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -17,7 +17,7 @@
17#include <linux/firmware.h> 17#include <linux/firmware.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/i2c/atmel_mxt_ts.h> 19#include <linux/i2c/atmel_mxt_ts.h>
20#include <linux/input.h> 20#include <linux/input/mt.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
@@ -196,9 +196,12 @@
196#define MXT_PRESS (1 << 6) 196#define MXT_PRESS (1 << 6)
197#define MXT_DETECT (1 << 7) 197#define MXT_DETECT (1 << 7)
198 198
199/* Touch orient bits */
200#define MXT_XY_SWITCH (1 << 0)
201#define MXT_X_INVERT (1 << 1)
202#define MXT_Y_INVERT (1 << 2)
203
199/* Touchscreen absolute values */ 204/* Touchscreen absolute values */
200#define MXT_MAX_XC 0x3ff
201#define MXT_MAX_YC 0x3ff
202#define MXT_MAX_AREA 0xff 205#define MXT_MAX_AREA 0xff
203 206
204#define MXT_MAX_FINGER 10 207#define MXT_MAX_FINGER 10
@@ -246,6 +249,8 @@ struct mxt_data {
246 struct mxt_info info; 249 struct mxt_info info;
247 struct mxt_finger finger[MXT_MAX_FINGER]; 250 struct mxt_finger finger[MXT_MAX_FINGER];
248 unsigned int irq; 251 unsigned int irq;
252 unsigned int max_x;
253 unsigned int max_y;
249}; 254};
250 255
251static bool mxt_object_readable(unsigned int type) 256static bool mxt_object_readable(unsigned int type)
@@ -499,19 +504,21 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
499 if (!finger[id].status) 504 if (!finger[id].status)
500 continue; 505 continue;
501 506
502 input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, 507 input_mt_slot(input_dev, id);
503 finger[id].status != MXT_RELEASE ? 508 input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
504 finger[id].area : 0); 509 finger[id].status != MXT_RELEASE);
505 input_report_abs(input_dev, ABS_MT_POSITION_X,
506 finger[id].x);
507 input_report_abs(input_dev, ABS_MT_POSITION_Y,
508 finger[id].y);
509 input_mt_sync(input_dev);
510 510
511 if (finger[id].status == MXT_RELEASE) 511 if (finger[id].status != MXT_RELEASE) {
512 finger[id].status = 0;
513 else
514 finger_num++; 512 finger_num++;
513 input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
514 finger[id].area);
515 input_report_abs(input_dev, ABS_MT_POSITION_X,
516 finger[id].x);
517 input_report_abs(input_dev, ABS_MT_POSITION_Y,
518 finger[id].y);
519 } else {
520 finger[id].status = 0;
521 }
515 } 522 }
516 523
517 input_report_key(input_dev, BTN_TOUCH, finger_num > 0); 524 input_report_key(input_dev, BTN_TOUCH, finger_num > 0);
@@ -549,8 +556,13 @@ static void mxt_input_touchevent(struct mxt_data *data,
549 if (!(status & (MXT_PRESS | MXT_MOVE))) 556 if (!(status & (MXT_PRESS | MXT_MOVE)))
550 return; 557 return;
551 558
552 x = (message->message[1] << 2) | ((message->message[3] & ~0x3f) >> 6); 559 x = (message->message[1] << 4) | ((message->message[3] >> 4) & 0xf);
553 y = (message->message[2] << 2) | ((message->message[3] & ~0xf3) >> 2); 560 y = (message->message[2] << 4) | ((message->message[3] & 0xf));
561 if (data->max_x < 1024)
562 x = x >> 2;
563 if (data->max_y < 1024)
564 y = y >> 2;
565
554 area = message->message[4]; 566 area = message->message[4];
555 567
556 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, 568 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
@@ -804,10 +816,6 @@ static int mxt_initialize(struct mxt_data *data)
804 if (error) 816 if (error)
805 return error; 817 return error;
806 818
807 error = mxt_make_highchg(data);
808 if (error)
809 return error;
810
811 mxt_handle_pdata(data); 819 mxt_handle_pdata(data);
812 820
813 /* Backup to memory */ 821 /* Backup to memory */
@@ -845,6 +853,20 @@ static int mxt_initialize(struct mxt_data *data)
845 return 0; 853 return 0;
846} 854}
847 855
856static void mxt_calc_resolution(struct mxt_data *data)
857{
858 unsigned int max_x = data->pdata->x_size - 1;
859 unsigned int max_y = data->pdata->y_size - 1;
860
861 if (data->pdata->orient & MXT_XY_SWITCH) {
862 data->max_x = max_y;
863 data->max_y = max_x;
864 } else {
865 data->max_x = max_x;
866 data->max_y = max_y;
867 }
868}
869
848static ssize_t mxt_object_show(struct device *dev, 870static ssize_t mxt_object_show(struct device *dev,
849 struct device_attribute *attr, char *buf) 871 struct device_attribute *attr, char *buf)
850{ 872{
@@ -981,6 +1003,10 @@ static ssize_t mxt_update_fw_store(struct device *dev,
981 1003
982 enable_irq(data->irq); 1004 enable_irq(data->irq);
983 1005
1006 error = mxt_make_highchg(data);
1007 if (error)
1008 return error;
1009
984 return count; 1010 return count;
985} 1011}
986 1012
@@ -1052,31 +1078,33 @@ static int __devinit mxt_probe(struct i2c_client *client,
1052 input_dev->open = mxt_input_open; 1078 input_dev->open = mxt_input_open;
1053 input_dev->close = mxt_input_close; 1079 input_dev->close = mxt_input_close;
1054 1080
1081 data->client = client;
1082 data->input_dev = input_dev;
1083 data->pdata = pdata;
1084 data->irq = client->irq;
1085
1086 mxt_calc_resolution(data);
1087
1055 __set_bit(EV_ABS, input_dev->evbit); 1088 __set_bit(EV_ABS, input_dev->evbit);
1056 __set_bit(EV_KEY, input_dev->evbit); 1089 __set_bit(EV_KEY, input_dev->evbit);
1057 __set_bit(BTN_TOUCH, input_dev->keybit); 1090 __set_bit(BTN_TOUCH, input_dev->keybit);
1058 1091
1059 /* For single touch */ 1092 /* For single touch */
1060 input_set_abs_params(input_dev, ABS_X, 1093 input_set_abs_params(input_dev, ABS_X,
1061 0, MXT_MAX_XC, 0, 0); 1094 0, data->max_x, 0, 0);
1062 input_set_abs_params(input_dev, ABS_Y, 1095 input_set_abs_params(input_dev, ABS_Y,
1063 0, MXT_MAX_YC, 0, 0); 1096 0, data->max_y, 0, 0);
1064 1097
1065 /* For multi touch */ 1098 /* For multi touch */
1099 input_mt_init_slots(input_dev, MXT_MAX_FINGER);
1066 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 1100 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
1067 0, MXT_MAX_AREA, 0, 0); 1101 0, MXT_MAX_AREA, 0, 0);
1068 input_set_abs_params(input_dev, ABS_MT_POSITION_X, 1102 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
1069 0, MXT_MAX_XC, 0, 0); 1103 0, data->max_x, 0, 0);
1070 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 1104 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
1071 0, MXT_MAX_YC, 0, 0); 1105 0, data->max_y, 0, 0);
1072 1106
1073 input_set_drvdata(input_dev, data); 1107 input_set_drvdata(input_dev, data);
1074
1075 data->client = client;
1076 data->input_dev = input_dev;
1077 data->pdata = pdata;
1078 data->irq = client->irq;
1079
1080 i2c_set_clientdata(client, data); 1108 i2c_set_clientdata(client, data);
1081 1109
1082 error = mxt_initialize(data); 1110 error = mxt_initialize(data);
@@ -1090,6 +1118,10 @@ static int __devinit mxt_probe(struct i2c_client *client,
1090 goto err_free_object; 1118 goto err_free_object;
1091 } 1119 }
1092 1120
1121 error = mxt_make_highchg(data);
1122 if (error)
1123 goto err_free_irq;
1124
1093 error = input_register_device(input_dev); 1125 error = input_register_device(input_dev);
1094 if (error) 1126 if (error)
1095 goto err_free_irq; 1127 goto err_free_irq;
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index 3d9b5166ebe9..432c69be6ac6 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -317,7 +317,7 @@ err_unmap_regs:
317err_release_mem: 317err_release_mem:
318 release_mem_region(res->start, resource_size(res)); 318 release_mem_region(res->start, resource_size(res));
319err_free_dev: 319err_free_dev:
320 input_free_device(ts_dev->input); 320 input_free_device(input_dev);
321err_free_mem: 321err_free_mem:
322 kfree(ts_dev); 322 kfree(ts_dev);
323 return err; 323 return err;
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 45f93d0f5592..211811ae5525 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -396,14 +396,14 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
396 set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE); 396 set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE);
397 397
398 if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler, 398 if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler,
399 IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { 399 IRQF_SHARED | IRQF_DISABLED, "h3600_action", ts->dev)) {
400 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); 400 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
401 err = -EBUSY; 401 err = -EBUSY;
402 goto fail1; 402 goto fail1;
403 } 403 }
404 404
405 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, 405 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
406 IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { 406 IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", ts->dev)) {
407 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); 407 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
408 err = -EBUSY; 408 err = -EBUSY;
409 goto fail2; 409 goto fail2;
@@ -439,8 +439,8 @@ static void h3600ts_disconnect(struct serio *serio)
439{ 439{
440 struct h3600_dev *ts = serio_get_drvdata(serio); 440 struct h3600_dev *ts = serio_get_drvdata(serio);
441 441
442 free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, &ts->dev); 442 free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
443 free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, &ts->dev); 443 free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
444 input_get_device(ts->dev); 444 input_get_device(ts->dev);
445 input_unregister_device(ts->dev); 445 input_unregister_device(ts->dev);
446 serio_close(serio); 446 serio_close(serio);
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
new file mode 100644
index 000000000000..4f2713d92791
--- /dev/null
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -0,0 +1,272 @@
1/*
2 * Driver for MAXI MAX11801 - A Resistive touch screen controller with
3 * i2c interface
4 *
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 * Author: Zhang Jiejing <jiejing.zhang@freescale.com>
7 *
8 * Based on mcs5000_ts.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License.
13 */
14
15/*
16 * This driver aims to support the series of MAXI touch chips max11801
17 * through max11803. The main difference between these 4 chips can be
18 * found in the table below:
19 * -----------------------------------------------------
20 * | CHIP | AUTO MODE SUPPORT(FIFO) | INTERFACE |
21 * |----------------------------------------------------|
22 * | max11800 | YES | SPI |
23 * | max11801 | YES | I2C |
24 * | max11802 | NO | SPI |
25 * | max11803 | NO | I2C |
26 * ------------------------------------------------------
27 *
28 * Currently, this driver only supports max11801.
29 *
30 * Data Sheet:
31 * http://www.maxim-ic.com/datasheet/index.mvp/id/5943
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/i2c.h>
37#include <linux/interrupt.h>
38#include <linux/input.h>
39#include <linux/slab.h>
40#include <linux/bitops.h>
41
42/* Register Address define */
43#define GENERNAL_STATUS_REG 0x00
44#define GENERNAL_CONF_REG 0x01
45#define MESURE_RES_CONF_REG 0x02
46#define MESURE_AVER_CONF_REG 0x03
47#define ADC_SAMPLE_TIME_CONF_REG 0x04
48#define PANEL_SETUPTIME_CONF_REG 0x05
49#define DELAY_CONVERSION_CONF_REG 0x06
50#define TOUCH_DETECT_PULLUP_CONF_REG 0x07
51#define AUTO_MODE_TIME_CONF_REG 0x08 /* only for max11800/max11801 */
52#define APERTURE_CONF_REG 0x09 /* only for max11800/max11801 */
53#define AUX_MESURE_CONF_REG 0x0a
54#define OP_MODE_CONF_REG 0x0b
55
56/* FIFO is found only in max11800 and max11801 */
57#define FIFO_RD_CMD (0x50 << 1)
58#define MAX11801_FIFO_INT (1 << 2)
59#define MAX11801_FIFO_OVERFLOW (1 << 3)
60
61#define XY_BUFSIZE 4
62#define XY_BUF_OFFSET 4
63
64#define MAX11801_MAX_X 0xfff
65#define MAX11801_MAX_Y 0xfff
66
67#define MEASURE_TAG_OFFSET 2
68#define MEASURE_TAG_MASK (3 << MEASURE_TAG_OFFSET)
69#define EVENT_TAG_OFFSET 0
70#define EVENT_TAG_MASK (3 << EVENT_TAG_OFFSET)
71#define MEASURE_X_TAG (0 << MEASURE_TAG_OFFSET)
72#define MEASURE_Y_TAG (1 << MEASURE_TAG_OFFSET)
73
74/* These are the state of touch event state machine */
75enum {
76 EVENT_INIT,
77 EVENT_MIDDLE,
78 EVENT_RELEASE,
79 EVENT_FIFO_END
80};
81
82struct max11801_data {
83 struct i2c_client *client;
84 struct input_dev *input_dev;
85};
86
87static u8 read_register(struct i2c_client *client, int addr)
88{
89 /* XXX: The chip ignores LSB of register address */
90 return i2c_smbus_read_byte_data(client, addr << 1);
91}
92
93static int max11801_write_reg(struct i2c_client *client, int addr, int data)
94{
95 /* XXX: The chip ignores LSB of register address */
96 return i2c_smbus_write_byte_data(client, addr << 1, data);
97}
98
99static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id)
100{
101 struct max11801_data *data = dev_id;
102 struct i2c_client *client = data->client;
103 int status, i, ret;
104 u8 buf[XY_BUFSIZE];
105 int x = -1;
106 int y = -1;
107
108 status = read_register(data->client, GENERNAL_STATUS_REG);
109
110 if (status & (MAX11801_FIFO_INT | MAX11801_FIFO_OVERFLOW)) {
111 status = read_register(data->client, GENERNAL_STATUS_REG);
112
113 ret = i2c_smbus_read_i2c_block_data(client, FIFO_RD_CMD,
114 XY_BUFSIZE, buf);
115
116 /*
117 * We should get 4 bytes buffer that contains X,Y
118 * and event tag
119 */
120 if (ret < XY_BUFSIZE)
121 goto out;
122
123 for (i = 0; i < XY_BUFSIZE; i += XY_BUFSIZE / 2) {
124 if ((buf[i + 1] & MEASURE_TAG_MASK) == MEASURE_X_TAG)
125 x = (buf[i] << XY_BUF_OFFSET) +
126 (buf[i + 1] >> XY_BUF_OFFSET);
127 else if ((buf[i + 1] & MEASURE_TAG_MASK) == MEASURE_Y_TAG)
128 y = (buf[i] << XY_BUF_OFFSET) +
129 (buf[i + 1] >> XY_BUF_OFFSET);
130 }
131
132 if ((buf[1] & EVENT_TAG_MASK) != (buf[3] & EVENT_TAG_MASK))
133 goto out;
134
135 switch (buf[1] & EVENT_TAG_MASK) {
136 case EVENT_INIT:
137 /* fall through */
138 case EVENT_MIDDLE:
139 input_report_abs(data->input_dev, ABS_X, x);
140 input_report_abs(data->input_dev, ABS_Y, y);
141 input_event(data->input_dev, EV_KEY, BTN_TOUCH, 1);
142 input_sync(data->input_dev);
143 break;
144
145 case EVENT_RELEASE:
146 input_event(data->input_dev, EV_KEY, BTN_TOUCH, 0);
147 input_sync(data->input_dev);
148 break;
149
150 case EVENT_FIFO_END:
151 break;
152 }
153 }
154out:
155 return IRQ_HANDLED;
156}
157
158static void __devinit max11801_ts_phy_init(struct max11801_data *data)
159{
160 struct i2c_client *client = data->client;
161
162 /* Average X,Y, take 16 samples, average eight media sample */
163 max11801_write_reg(client, MESURE_AVER_CONF_REG, 0xff);
164 /* X,Y panel setup time set to 20us */
165 max11801_write_reg(client, PANEL_SETUPTIME_CONF_REG, 0x11);
166 /* Rough pullup time (2uS), Fine pullup time (10us) */
167 max11801_write_reg(client, TOUCH_DETECT_PULLUP_CONF_REG, 0x10);
168 /* Auto mode init period = 5ms , scan period = 5ms*/
169 max11801_write_reg(client, AUTO_MODE_TIME_CONF_REG, 0xaa);
170 /* Aperture X,Y set to +- 4LSB */
171 max11801_write_reg(client, APERTURE_CONF_REG, 0x33);
172 /* Enable Power, enable Automode, enable Aperture, enable Average X,Y */
173 max11801_write_reg(client, OP_MODE_CONF_REG, 0x36);
174}
175
176static int __devinit max11801_ts_probe(struct i2c_client *client,
177 const struct i2c_device_id *id)
178{
179 struct max11801_data *data;
180 struct input_dev *input_dev;
181 int error;
182
183 data = kzalloc(sizeof(struct max11801_data), GFP_KERNEL);
184 input_dev = input_allocate_device();
185 if (!data || !input_dev) {
186 dev_err(&client->dev, "Failed to allocate memory\n");
187 error = -ENOMEM;
188 goto err_free_mem;
189 }
190
191 data->client = client;
192 data->input_dev = input_dev;
193
194 input_dev->name = "max11801_ts";
195 input_dev->id.bustype = BUS_I2C;
196 input_dev->dev.parent = &client->dev;
197
198 __set_bit(EV_ABS, input_dev->evbit);
199 __set_bit(EV_KEY, input_dev->evbit);
200 __set_bit(BTN_TOUCH, input_dev->keybit);
201 input_set_abs_params(input_dev, ABS_X, 0, MAX11801_MAX_X, 0, 0);
202 input_set_abs_params(input_dev, ABS_Y, 0, MAX11801_MAX_Y, 0, 0);
203 input_set_drvdata(input_dev, data);
204
205 max11801_ts_phy_init(data);
206
207 error = request_threaded_irq(client->irq, NULL, max11801_ts_interrupt,
208 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
209 "max11801_ts", data);
210 if (error) {
211 dev_err(&client->dev, "Failed to register interrupt\n");
212 goto err_free_mem;
213 }
214
215 error = input_register_device(data->input_dev);
216 if (error)
217 goto err_free_irq;
218
219 i2c_set_clientdata(client, data);
220 return 0;
221
222err_free_irq:
223 free_irq(client->irq, data);
224err_free_mem:
225 input_free_device(input_dev);
226 kfree(data);
227 return error;
228}
229
230static __devexit int max11801_ts_remove(struct i2c_client *client)
231{
232 struct max11801_data *data = i2c_get_clientdata(client);
233
234 free_irq(client->irq, data);
235 input_unregister_device(data->input_dev);
236 kfree(data);
237
238 return 0;
239}
240
241static const struct i2c_device_id max11801_ts_id[] = {
242 {"max11801", 0},
243 { }
244};
245MODULE_DEVICE_TABLE(i2c, max11801_ts_id);
246
247static struct i2c_driver max11801_ts_driver = {
248 .driver = {
249 .name = "max11801_ts",
250 .owner = THIS_MODULE,
251 },
252 .id_table = max11801_ts_id,
253 .probe = max11801_ts_probe,
254 .remove = __devexit_p(max11801_ts_remove),
255};
256
257static int __init max11801_ts_init(void)
258{
259 return i2c_add_driver(&max11801_ts_driver);
260}
261
262static void __exit max11801_ts_exit(void)
263{
264 i2c_del_driver(&max11801_ts_driver);
265}
266
267module_init(max11801_ts_init);
268module_exit(max11801_ts_exit);
269
270MODULE_AUTHOR("Zhang Jiejing <jiejing.zhang@freescale.com>");
271MODULE_DESCRIPTION("Touchscreen driver for MAXI MAX11801 controller");
272MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 80467f262331..fadc11545b1e 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -27,9 +27,6 @@
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/i2c/tsc2007.h> 28#include <linux/i2c/tsc2007.h>
29 29
30#define TS_POLL_DELAY 1 /* ms delay between samples */
31#define TS_POLL_PERIOD 1 /* ms delay between samples */
32
33#define TSC2007_MEASURE_TEMP0 (0x0 << 4) 30#define TSC2007_MEASURE_TEMP0 (0x0 << 4)
34#define TSC2007_MEASURE_AUX (0x2 << 4) 31#define TSC2007_MEASURE_AUX (0x2 << 4)
35#define TSC2007_MEASURE_TEMP1 (0x4 << 4) 32#define TSC2007_MEASURE_TEMP1 (0x4 << 4)
@@ -75,6 +72,9 @@ struct tsc2007 {
75 72
76 u16 model; 73 u16 model;
77 u16 x_plate_ohms; 74 u16 x_plate_ohms;
75 u16 max_rt;
76 unsigned long poll_delay;
77 unsigned long poll_period;
78 78
79 bool pendown; 79 bool pendown;
80 int irq; 80 int irq;
@@ -156,6 +156,7 @@ static void tsc2007_work(struct work_struct *work)
156{ 156{
157 struct tsc2007 *ts = 157 struct tsc2007 *ts =
158 container_of(to_delayed_work(work), struct tsc2007, work); 158 container_of(to_delayed_work(work), struct tsc2007, work);
159 bool debounced = false;
159 struct ts_event tc; 160 struct ts_event tc;
160 u32 rt; 161 u32 rt;
161 162
@@ -184,13 +185,14 @@ static void tsc2007_work(struct work_struct *work)
184 tsc2007_read_values(ts, &tc); 185 tsc2007_read_values(ts, &tc);
185 186
186 rt = tsc2007_calculate_pressure(ts, &tc); 187 rt = tsc2007_calculate_pressure(ts, &tc);
187 if (rt > MAX_12BIT) { 188 if (rt > ts->max_rt) {
188 /* 189 /*
189 * Sample found inconsistent by debouncing or pressure is 190 * Sample found inconsistent by debouncing or pressure is
190 * beyond the maximum. Don't report it to user space, 191 * beyond the maximum. Don't report it to user space,
191 * repeat at least once more the measurement. 192 * repeat at least once more the measurement.
192 */ 193 */
193 dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt); 194 dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
195 debounced = true;
194 goto out; 196 goto out;
195 197
196 } 198 }
@@ -225,9 +227,9 @@ static void tsc2007_work(struct work_struct *work)
225 } 227 }
226 228
227 out: 229 out:
228 if (ts->pendown) 230 if (ts->pendown || debounced)
229 schedule_delayed_work(&ts->work, 231 schedule_delayed_work(&ts->work,
230 msecs_to_jiffies(TS_POLL_PERIOD)); 232 msecs_to_jiffies(ts->poll_period));
231 else 233 else
232 enable_irq(ts->irq); 234 enable_irq(ts->irq);
233} 235}
@@ -239,7 +241,7 @@ static irqreturn_t tsc2007_irq(int irq, void *handle)
239 if (!ts->get_pendown_state || likely(ts->get_pendown_state())) { 241 if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
240 disable_irq_nosync(ts->irq); 242 disable_irq_nosync(ts->irq);
241 schedule_delayed_work(&ts->work, 243 schedule_delayed_work(&ts->work,
242 msecs_to_jiffies(TS_POLL_DELAY)); 244 msecs_to_jiffies(ts->poll_delay));
243 } 245 }
244 246
245 if (ts->clear_penirq) 247 if (ts->clear_penirq)
@@ -292,6 +294,9 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
292 294
293 ts->model = pdata->model; 295 ts->model = pdata->model;
294 ts->x_plate_ohms = pdata->x_plate_ohms; 296 ts->x_plate_ohms = pdata->x_plate_ohms;
297 ts->max_rt = pdata->max_rt ? : MAX_12BIT;
298 ts->poll_delay = pdata->poll_delay ? : 1;
299 ts->poll_period = pdata->poll_period ? : 1;
295 ts->get_pendown_state = pdata->get_pendown_state; 300 ts->get_pendown_state = pdata->get_pendown_state;
296 ts->clear_penirq = pdata->clear_penirq; 301 ts->clear_penirq = pdata->clear_penirq;
297 302
@@ -305,9 +310,10 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
305 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 310 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
306 input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); 311 input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
307 312
308 input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0); 313 input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, pdata->fuzzx, 0);
309 input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0); 314 input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, pdata->fuzzy, 0);
310 input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0); 315 input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT,
316 pdata->fuzzz, 0);
311 317
312 if (pdata->init_platform_hw) 318 if (pdata->init_platform_hw)
313 pdata->init_platform_hw(); 319 pdata->init_platform_hw();
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 91f06a3ef002..61f516f376dc 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -149,7 +149,7 @@ static void avmcs_release(struct pcmcia_device *link)
149} /* avmcs_release */ 149} /* avmcs_release */
150 150
151 151
152static struct pcmcia_device_id avmcs_ids[] = { 152static const struct pcmcia_device_id avmcs_ids[] = {
153 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335), 153 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
154 PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430), 154 PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430),
155 PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a), 155 PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a),
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index ac4dd7857cbd..8f0ad2a52e87 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -146,7 +146,7 @@ static void avma1cs_release(struct pcmcia_device *link)
146 pcmcia_disable_device(link); 146 pcmcia_disable_device(link);
147} /* avma1cs_release */ 147} /* avma1cs_release */
148 148
149static struct pcmcia_device_id avma1cs_ids[] = { 149static const struct pcmcia_device_id avma1cs_ids[] = {
150 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), 150 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb),
151 PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b), 151 PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b),
152 PCMCIA_DEVICE_NULL 152 PCMCIA_DEVICE_NULL
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index 9e5e87be756b..f0b6c0ef99bb 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -200,7 +200,7 @@ static int elsa_resume(struct pcmcia_device *link)
200 return 0; 200 return 0;
201} 201}
202 202
203static struct pcmcia_device_id elsa_ids[] = { 203static const struct pcmcia_device_id elsa_ids[] = {
204 PCMCIA_DEVICE_PROD_ID12("ELSA AG (Aachen, Germany)", "MicroLink ISDN/MC ", 0x983de2c4, 0x333ba257), 204 PCMCIA_DEVICE_PROD_ID12("ELSA AG (Aachen, Germany)", "MicroLink ISDN/MC ", 0x983de2c4, 0x333ba257),
205 PCMCIA_DEVICE_PROD_ID12("ELSA GmbH, Aachen", "MicroLink ISDN/MC ", 0x639e5718, 0x333ba257), 205 PCMCIA_DEVICE_PROD_ID12("ELSA GmbH, Aachen", "MicroLink ISDN/MC ", 0x639e5718, 0x333ba257),
206 PCMCIA_DEVICE_NULL 206 PCMCIA_DEVICE_NULL
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 360204bc2777..06473f81f039 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -186,7 +186,7 @@ static int sedlbauer_resume(struct pcmcia_device *link)
186} 186}
187 187
188 188
189static struct pcmcia_device_id sedlbauer_ids[] = { 189static const struct pcmcia_device_id sedlbauer_ids[] = {
190 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a), 190 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a),
191 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90), 191 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90),
192 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce), 192 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce),
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 360f9ec7c802..161a1938552e 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -183,7 +183,7 @@ static int teles_resume(struct pcmcia_device *link)
183} 183}
184 184
185 185
186static struct pcmcia_device_id teles_ids[] = { 186static const struct pcmcia_device_id teles_ids[] = {
187 PCMCIA_DEVICE_PROD_ID12("TELES", "S0/PC", 0x67b50eae, 0xe9e70119), 187 PCMCIA_DEVICE_PROD_ID12("TELES", "S0/PC", 0x67b50eae, 0xe9e70119),
188 PCMCIA_DEVICE_NULL, 188 PCMCIA_DEVICE_NULL,
189}; 189};
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index bbc298fd2a15..496b7efbc6b0 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -76,7 +76,7 @@ static unsigned int switchlocked;
76#define BUSY_TIMEOUT 32767 76#define BUSY_TIMEOUT 32767
77 77
78/* list of supported pcmcia devices */ 78/* list of supported pcmcia devices */
79static struct pcmcia_device_id pcmcia_ids[] = { 79static const struct pcmcia_device_id pcmcia_ids[] = {
80 /* vendor and device strings followed by their crc32 hashes */ 80 /* vendor and device strings followed by their crc32 hashes */
81 PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, 81 PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,
82 0xc3901202), 82 0xc3901202),
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 6799e75d74e0..33dc2829b01b 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -694,7 +694,7 @@ static int pcmciamtd_probe(struct pcmcia_device *link)
694 return pcmciamtd_config(link); 694 return pcmciamtd_config(link);
695} 695}
696 696
697static struct pcmcia_device_id pcmciamtd_ids[] = { 697static const struct pcmcia_device_id pcmciamtd_ids[] = {
698 PCMCIA_DEVICE_FUNC_ID(1), 698 PCMCIA_DEVICE_FUNC_ID(1),
699 PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCS-2M", "2MB SRAM", 0x547e66dc, 0x1fed36cd, 0x36eadd21), 699 PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCS-2M", "2MB SRAM", 0x547e66dc, 0x1fed36cd, 0x36eadd21),
700 PCMCIA_DEVICE_PROD_ID12("IBM", "2MB SRAM", 0xb569a6e5, 0x36eadd21), 700 PCMCIA_DEVICE_PROD_ID12("IBM", "2MB SRAM", 0xb569a6e5, 0x36eadd21),
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index af9fb0ff8210..191f3bb3c41a 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -115,7 +115,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
115 mode = UBI_READONLY; 115 mode = UBI_READONLY;
116 116
117 dbg_gen("open device %d, volume %d, mode %d", 117 dbg_gen("open device %d, volume %d, mode %d",
118 ubi_num, vol_id, mode); 118 ubi_num, vol_id, mode);
119 119
120 desc = ubi_open_volume(ubi_num, vol_id, mode); 120 desc = ubi_open_volume(ubi_num, vol_id, mode);
121 if (IS_ERR(desc)) 121 if (IS_ERR(desc))
@@ -158,7 +158,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
158 loff_t new_offset; 158 loff_t new_offset;
159 159
160 if (vol->updating) { 160 if (vol->updating) {
161 /* Update is in progress, seeking is prohibited */ 161 /* Update is in progress, seeking is prohibited */
162 dbg_err("updating"); 162 dbg_err("updating");
163 return -EBUSY; 163 return -EBUSY;
164 } 164 }
@@ -561,18 +561,18 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
561 } 561 }
562 562
563 /* Set volume property command */ 563 /* Set volume property command */
564 case UBI_IOCSETPROP: 564 case UBI_IOCSETVOLPROP:
565 { 565 {
566 struct ubi_set_prop_req req; 566 struct ubi_set_vol_prop_req req;
567 567
568 err = copy_from_user(&req, argp, 568 err = copy_from_user(&req, argp,
569 sizeof(struct ubi_set_prop_req)); 569 sizeof(struct ubi_set_vol_prop_req));
570 if (err) { 570 if (err) {
571 err = -EFAULT; 571 err = -EFAULT;
572 break; 572 break;
573 } 573 }
574 switch (req.property) { 574 switch (req.property) {
575 case UBI_PROP_DIRECT_WRITE: 575 case UBI_VOL_PROP_DIRECT_WRITE:
576 mutex_lock(&ubi->device_mutex); 576 mutex_lock(&ubi->device_mutex);
577 desc->vol->direct_writes = !!req.value; 577 desc->vol->direct_writes = !!req.value;
578 mutex_unlock(&ubi->device_mutex); 578 mutex_unlock(&ubi->device_mutex);
@@ -1100,5 +1100,5 @@ const struct file_operations ubi_ctrl_cdev_operations = {
1100 .owner = THIS_MODULE, 1100 .owner = THIS_MODULE,
1101 .unlocked_ioctl = ctrl_cdev_ioctl, 1101 .unlocked_ioctl = ctrl_cdev_ioctl,
1102 .compat_ioctl = ctrl_cdev_compat_ioctl, 1102 .compat_ioctl = ctrl_cdev_compat_ioctl,
1103 .llseek = noop_llseek, 1103 .llseek = no_llseek,
1104}; 1104};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index d4d07e5f138f..2224cbe41ddf 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -30,15 +30,12 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
32 32
33unsigned int ubi_msg_flags;
34unsigned int ubi_chk_flags; 33unsigned int ubi_chk_flags;
35unsigned int ubi_tst_flags; 34unsigned int ubi_tst_flags;
36 35
37module_param_named(debug_msgs, ubi_msg_flags, uint, S_IRUGO | S_IWUSR);
38module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR); 36module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
39module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR); 37module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
40 38
41MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
42MODULE_PARM_DESC(debug_chks, "Debug check flags"); 39MODULE_PARM_DESC(debug_chks, "Debug check flags");
43MODULE_PARM_DESC(debug_tsts, "Debug special test flags"); 40MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
44 41
@@ -75,15 +72,15 @@ void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
75{ 72{
76 printk(KERN_DEBUG "Volume identifier header dump:\n"); 73 printk(KERN_DEBUG "Volume identifier header dump:\n");
77 printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic)); 74 printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
78 printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version); 75 printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
79 printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type); 76 printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
80 printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag); 77 printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
81 printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat); 78 printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
82 printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id)); 79 printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
83 printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum)); 80 printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
84 printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size)); 81 printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
85 printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs)); 82 printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
86 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad)); 83 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
87 printk(KERN_DEBUG "\tsqnum %llu\n", 84 printk(KERN_DEBUG "\tsqnum %llu\n",
88 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); 85 (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
89 printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc)); 86 printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 0b0c2888c656..3f1a09c5c438 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,11 +21,17 @@
21#ifndef __UBI_DEBUG_H__ 21#ifndef __UBI_DEBUG_H__
22#define __UBI_DEBUG_H__ 22#define __UBI_DEBUG_H__
23 23
24struct ubi_ec_hdr;
25struct ubi_vid_hdr;
26struct ubi_volume;
27struct ubi_vtbl_record;
28struct ubi_scan_volume;
29struct ubi_scan_leb;
30struct ubi_mkvol_req;
31
24#ifdef CONFIG_MTD_UBI_DEBUG 32#ifdef CONFIG_MTD_UBI_DEBUG
25#include <linux/random.h> 33#include <linux/random.h>
26 34
27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
28
29#define ubi_assert(expr) do { \ 35#define ubi_assert(expr) do { \
30 if (unlikely(!(expr))) { \ 36 if (unlikely(!(expr))) { \
31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ 37 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
@@ -34,24 +40,28 @@
34 } \ 40 } \
35} while (0) 41} while (0)
36 42
37#define dbg_msg(fmt, ...) \ 43#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
38 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
39 current->pid, __func__, ##__VA_ARGS__)
40
41#define dbg_do_msg(typ, fmt, ...) do { \
42 if (ubi_msg_flags & typ) \
43 dbg_msg(fmt, ##__VA_ARGS__); \
44} while (0)
45 44
46#define ubi_dbg_dump_stack() dump_stack() 45#define ubi_dbg_dump_stack() dump_stack()
47 46
48struct ubi_ec_hdr; 47#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
49struct ubi_vid_hdr; 48 print_hex_dump(l, ps, pt, r, g, b, len, a)
50struct ubi_volume; 49
51struct ubi_vtbl_record; 50#define ubi_dbg_msg(type, fmt, ...) \
52struct ubi_scan_volume; 51 pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
53struct ubi_scan_leb; 52
54struct ubi_mkvol_req; 53/* Just a debugging messages not related to any specific UBI subsystem */
54#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
55/* General debugging messages */
56#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
57/* Messages from the eraseblock association sub-system */
58#define dbg_eba(fmt, ...) ubi_dbg_msg("eba", fmt, ##__VA_ARGS__)
59/* Messages from the wear-leveling sub-system */
60#define dbg_wl(fmt, ...) ubi_dbg_msg("wl", fmt, ##__VA_ARGS__)
61/* Messages from the input/output sub-system */
62#define dbg_io(fmt, ...) ubi_dbg_msg("io", fmt, ##__VA_ARGS__)
63/* Initialization and build messages */
64#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
55 65
56void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr); 66void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
57void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr); 67void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
@@ -62,43 +72,6 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
62void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); 72void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
63void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len); 73void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
64 74
65extern unsigned int ubi_msg_flags;
66
67/*
68 * Debugging message type flags (must match msg_type_names in debug.c).
69 *
70 * UBI_MSG_GEN: general messages
71 * UBI_MSG_EBA: journal messages
72 * UBI_MSG_WL: mount messages
73 * UBI_MSG_IO: commit messages
74 * UBI_MSG_BLD: LEB find messages
75 */
76enum {
77 UBI_MSG_GEN = 0x1,
78 UBI_MSG_EBA = 0x2,
79 UBI_MSG_WL = 0x4,
80 UBI_MSG_IO = 0x8,
81 UBI_MSG_BLD = 0x10,
82};
83
84#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
85 print_hex_dump(l, ps, pt, r, g, b, len, a)
86
87/* General debugging messages */
88#define dbg_gen(fmt, ...) dbg_do_msg(UBI_MSG_GEN, fmt, ##__VA_ARGS__)
89
90/* Messages from the eraseblock association sub-system */
91#define dbg_eba(fmt, ...) dbg_do_msg(UBI_MSG_EBA, fmt, ##__VA_ARGS__)
92
93/* Messages from the wear-leveling sub-system */
94#define dbg_wl(fmt, ...) dbg_do_msg(UBI_MSG_WL, fmt, ##__VA_ARGS__)
95
96/* Messages from the input/output sub-system */
97#define dbg_io(fmt, ...) dbg_do_msg(UBI_MSG_IO, fmt, ##__VA_ARGS__)
98
99/* Initialization and build messages */
100#define dbg_bld(fmt, ...) dbg_do_msg(UBI_MSG_BLD, fmt, ##__VA_ARGS__)
101
102extern unsigned int ubi_chk_flags; 75extern unsigned int ubi_chk_flags;
103 76
104/* 77/*
@@ -184,31 +157,61 @@ static inline int ubi_dbg_is_erase_failure(void)
184 157
185#else 158#else
186 159
187#define ubi_assert(expr) ({}) 160/* Use "if (0)" to make compiler check arguments even if debugging is off */
188#define dbg_err(fmt, ...) ({}) 161#define ubi_assert(expr) do { \
189#define dbg_msg(fmt, ...) ({}) 162 if (0) { \
190#define dbg_gen(fmt, ...) ({}) 163 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
191#define dbg_eba(fmt, ...) ({}) 164 __func__, __LINE__, current->pid); \
192#define dbg_wl(fmt, ...) ({}) 165 } \
193#define dbg_io(fmt, ...) ({}) 166} while (0)
194#define dbg_bld(fmt, ...) ({}) 167
195#define ubi_dbg_dump_stack() ({}) 168#define dbg_err(fmt, ...) do { \
196#define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) 169 if (0) \
197#define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) 170 ubi_err(fmt, ##__VA_ARGS__); \
198#define ubi_dbg_dump_vol_info(vol) ({}) 171} while (0)
199#define ubi_dbg_dump_vtbl_record(r, idx) ({}) 172
200#define ubi_dbg_dump_sv(sv) ({}) 173#define ubi_dbg_msg(fmt, ...) do { \
201#define ubi_dbg_dump_seb(seb, type) ({}) 174 if (0) \
202#define ubi_dbg_dump_mkvol_req(req) ({}) 175 pr_debug(fmt "\n", ##__VA_ARGS__); \
203#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({}) 176} while (0)
204#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({}) 177
205 178#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
206#define ubi_dbg_is_bgt_disabled() 0 179#define dbg_gen(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
207#define ubi_dbg_is_bitflip() 0 180#define dbg_eba(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
208#define ubi_dbg_is_write_failure() 0 181#define dbg_wl(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
209#define ubi_dbg_is_erase_failure() 0 182#define dbg_io(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
210#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0 183#define dbg_bld(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
211#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0 184
185static inline void ubi_dbg_dump_stack(void) { return; }
186static inline void
187ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { return; }
188static inline void
189ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { return; }
190static inline void
191ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { return; }
192static inline void
193ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { return; }
194static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
195static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
196 int type) { return; }
197static inline void
198ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { return; }
199static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
200 int pnum, int offset, int len) { return; }
201static inline void
202ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
203 int g, const void *b, size_t len, bool a) { return; }
204
205static inline int ubi_dbg_is_bgt_disabled(void) { return 0; }
206static inline int ubi_dbg_is_bitflip(void) { return 0; }
207static inline int ubi_dbg_is_write_failure(void) { return 0; }
208static inline int ubi_dbg_is_erase_failure(void) { return 0; }
209static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
210 int pnum, int offset,
211 int len) { return 0; }
212static inline int ubi_dbg_check_write(struct ubi_device *ubi,
213 const void *buf, int pnum,
214 int offset, int len) { return 0; }
212 215
213#endif /* !CONFIG_MTD_UBI_DEBUG */ 216#endif /* !CONFIG_MTD_UBI_DEBUG */
214#endif /* !__UBI_DEBUG_H__ */ 217#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index e347cc4388ed..8c1b1c7bc4a7 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -189,8 +189,8 @@ retry:
189 } 189 }
190 190
191 if (retries++ < UBI_IO_RETRIES) { 191 if (retries++ < UBI_IO_RETRIES) {
192 dbg_io("error %d%s while reading %d bytes from PEB %d:%d," 192 dbg_io("error %d%s while reading %d bytes from PEB "
193 " read only %zd bytes, retry", 193 "%d:%d, read only %zd bytes, retry",
194 err, errstr, len, pnum, offset, read); 194 err, errstr, len, pnum, offset, read);
195 yield(); 195 yield();
196 goto retry; 196 goto retry;
@@ -465,7 +465,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
465 } 465 }
466 466
467 err = patt_count; 467 err = patt_count;
468 ubi_msg("PEB %d passed torture test, do not mark it a bad", pnum); 468 ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
469 469
470out: 470out:
471 mutex_unlock(&ubi->buf_mutex); 471 mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index d2d12ab7def4..2135a53732ff 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -1103,7 +1103,7 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
1103 * otherwise, only print a warning. 1103 * otherwise, only print a warning.
1104 */ 1104 */
1105 if (si->corr_peb_count >= max_corr) { 1105 if (si->corr_peb_count >= max_corr) {
1106 ubi_err("too many corrupted PEBs, refusing this device"); 1106 ubi_err("too many corrupted PEBs, refusing");
1107 return -EINVAL; 1107 return -EINVAL;
1108 } 1108 }
1109 } 1109 }
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 503ea9b27309..6fb8ec2174a5 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -164,7 +164,7 @@ struct ubi_ec_hdr {
164 __be32 image_seq; 164 __be32 image_seq;
165 __u8 padding2[32]; 165 __u8 padding2[32];
166 __be32 hdr_crc; 166 __be32 hdr_crc;
167} __attribute__ ((packed)); 167} __packed;
168 168
169/** 169/**
170 * struct ubi_vid_hdr - on-flash UBI volume identifier header. 170 * struct ubi_vid_hdr - on-flash UBI volume identifier header.
@@ -292,7 +292,7 @@ struct ubi_vid_hdr {
292 __be64 sqnum; 292 __be64 sqnum;
293 __u8 padding3[12]; 293 __u8 padding3[12];
294 __be32 hdr_crc; 294 __be32 hdr_crc;
295} __attribute__ ((packed)); 295} __packed;
296 296
297/* Internal UBI volumes count */ 297/* Internal UBI volumes count */
298#define UBI_INT_VOL_COUNT 1 298#define UBI_INT_VOL_COUNT 1
@@ -373,6 +373,6 @@ struct ubi_vtbl_record {
373 __u8 flags; 373 __u8 flags;
374 __u8 padding[23]; 374 __u8 padding[23];
375 __be32 crc; 375 __be32 crc;
376} __attribute__ ((packed)); 376} __packed;
377 377
378#endif /* !__UBI_MEDIA_H__ */ 378#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f1be8b79663c..c6c22295898e 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -341,8 +341,8 @@ struct ubi_wl_entry;
341 * protected from the wear-leveling worker) 341 * protected from the wear-leveling worker)
342 * @pq_head: protection queue head 342 * @pq_head: protection queue head
343 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, 343 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
344 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, 344 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
345 * @erroneous, and @erroneous_peb_count fields 345 * @erroneous, and @erroneous_peb_count fields
346 * @move_mutex: serializes eraseblock moves 346 * @move_mutex: serializes eraseblock moves
347 * @work_sem: synchronizes the WL worker with use tasks 347 * @work_sem: synchronizes the WL worker with use tasks
348 * @wl_scheduled: non-zero if the wear-leveling was scheduled 348 * @wl_scheduled: non-zero if the wear-leveling was scheduled
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b4cf57db2556..ff2c4956eeff 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1570,7 +1570,8 @@ void ubi_wl_close(struct ubi_device *ubi)
1570 * @ec: the erase counter to check 1570 * @ec: the erase counter to check
1571 * 1571 *
1572 * This function returns zero if the erase counter of physical eraseblock @pnum 1572 * This function returns zero if the erase counter of physical eraseblock @pnum
1573 * is equivalent to @ec, and a negative error code if not or if an error occurred. 1573 * is equivalent to @ec, and a negative error code if not or if an error
1574 * occurred.
1574 */ 1575 */
1575static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) 1576static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1576{ 1577{
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index c11bb4de8630..c0e1b1eb87a9 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -315,7 +315,7 @@ pcmcia_failed:
315 return ret ?: -ENODEV; 315 return ret ?: -ENODEV;
316} 316}
317 317
318static /*const*/ struct pcmcia_device_id softingcs_ids[] = { 318static const struct pcmcia_device_id softingcs_ids[] = {
319 /* softing */ 319 /* softing */
320 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001), 320 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
321 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002), 321 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 81ac330f931d..34c5e1cbf65d 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1150,7 +1150,7 @@ static int el3_close(struct net_device *dev)
1150 return 0; 1150 return 0;
1151} 1151}
1152 1152
1153static struct pcmcia_device_id tc574_ids[] = { 1153static const struct pcmcia_device_id tc574_ids[] = {
1154 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574), 1154 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574),
1155 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"), 1155 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
1156 PCMCIA_DEVICE_NULL, 1156 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 79b9ca0dbdb4..4a1a35809807 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -908,7 +908,7 @@ static int el3_close(struct net_device *dev)
908 return 0; 908 return 0;
909} 909}
910 910
911static struct pcmcia_device_id tc589_ids[] = { 911static const struct pcmcia_device_id tc589_ids[] = {
912 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562), 912 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562),
913 PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77), 913 PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77),
914 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589), 914 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589),
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 3077d72e8222..9953db711969 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -687,7 +687,7 @@ static void block_output(struct net_device *dev, int count,
687 outsw(nic_base + AXNET_DATAPORT, buf, count>>1); 687 outsw(nic_base + AXNET_DATAPORT, buf, count>>1);
688} 688}
689 689
690static struct pcmcia_device_id axnet_ids[] = { 690static const struct pcmcia_device_id axnet_ids[] = {
691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
692 PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), 692 PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
693 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), 693 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 27bfad76fc40..980e65c14936 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -316,7 +316,7 @@ static int com20020_resume(struct pcmcia_device *link)
316 return 0; 316 return 0;
317} 317}
318 318
319static struct pcmcia_device_id com20020_ids[] = { 319static const struct pcmcia_device_id com20020_ids[] = {
320 PCMCIA_DEVICE_PROD_ID12("Contemporary Control Systems, Inc.", 320 PCMCIA_DEVICE_PROD_ID12("Contemporary Control Systems, Inc.",
321 "PCM20 Arcnet Adapter", 0x59991666, 0x95dfffaf), 321 "PCM20 Arcnet Adapter", 0x59991666, 0x95dfffaf),
322 PCMCIA_DEVICE_PROD_ID12("SoHard AG", 322 PCMCIA_DEVICE_PROD_ID12("SoHard AG",
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 530ab5a10bd3..723815e7a997 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -667,7 +667,7 @@ static int fmvj18x_resume(struct pcmcia_device *link)
667 667
668/*====================================================================*/ 668/*====================================================================*/
669 669
670static struct pcmcia_device_id fmvj18x_ids[] = { 670static const struct pcmcia_device_id fmvj18x_ids[] = {
671 PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004), 671 PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004),
672 PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59), 672 PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59),
673 PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922), 673 PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922),
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 15d57f5b6f29..6006d5488fbe 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -340,7 +340,7 @@ static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
340 outb(0x40, dev->base_addr); 340 outb(0x40, dev->base_addr);
341} 341}
342 342
343static struct pcmcia_device_id ibmtr_ids[] = { 343static const struct pcmcia_device_id ibmtr_ids[] = {
344 PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e), 344 PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e),
345 PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47), 345 PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47),
346 PCMCIA_DEVICE_NULL, 346 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 76683d97d83b..9d70b6595220 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1494,7 +1494,7 @@ static void set_multicast_list(struct net_device *dev)
1494 1494
1495} /* set_multicast_list */ 1495} /* set_multicast_list */
1496 1496
1497static struct pcmcia_device_id nmclan_ids[] = { 1497static const struct pcmcia_device_id nmclan_ids[] = {
1498 PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941), 1498 PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941),
1499 PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf), 1499 PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf),
1500 PCMCIA_DEVICE_NULL, 1500 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index e953793a33ff..b4fd7c3ed077 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1463,7 +1463,7 @@ failed:
1463 1463
1464/*====================================================================*/ 1464/*====================================================================*/
1465 1465
1466static struct pcmcia_device_id pcnet_ids[] = { 1466static const struct pcmcia_device_id pcnet_ids[] = {
1467 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021), 1467 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021),
1468 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a), 1468 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a),
1469 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15), 1469 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 288e4f1317ee..1cd9394c3359 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -2014,7 +2014,7 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
2014 return rc; 2014 return rc;
2015} 2015}
2016 2016
2017static struct pcmcia_device_id smc91c92_ids[] = { 2017static const struct pcmcia_device_id smc91c92_ids[] = {
2018 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0109, 0x0501), 2018 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0109, 0x0501),
2019 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0140, 0x000a), 2019 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0140, 0x000a),
2020 PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63), 2020 PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63),
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index a46b7fd6c0f5..e33b190d716f 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1738,7 +1738,7 @@ do_stop(struct net_device *dev)
1738 return 0; 1738 return 0;
1739} 1739}
1740 1740
1741static struct pcmcia_device_id xirc2ps_ids[] = { 1741static const struct pcmcia_device_id xirc2ps_ids[] = {
1742 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0089, 0x110a), 1742 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0089, 0x110a),
1743 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0138, 0x110a), 1743 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0138, 0x110a),
1744 PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea), 1744 PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea),
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index df2484d45474..c983c10e0f6a 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -164,7 +164,7 @@ static int airo_resume(struct pcmcia_device *link)
164 return 0; 164 return 0;
165} 165}
166 166
167static struct pcmcia_device_id airo_ids[] = { 167static const struct pcmcia_device_id airo_ids[] = {
168 PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a), 168 PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a),
169 PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005), 169 PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005),
170 PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007), 170 PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007),
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 05263516c113..ec295c4f677d 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -122,7 +122,7 @@ static int atmel_config(struct pcmcia_device *link)
122{ 122{
123 local_info_t *dev; 123 local_info_t *dev;
124 int ret; 124 int ret;
125 struct pcmcia_device_id *did; 125 const struct pcmcia_device_id *did;
126 126
127 dev = link->priv; 127 dev = link->priv;
128 did = dev_get_drvdata(&link->dev); 128 did = dev_get_drvdata(&link->dev);
@@ -211,7 +211,7 @@ static int atmel_resume(struct pcmcia_device *link)
211 .prod_id_hash = { (vh1), (vh2), 0, 0 }, \ 211 .prod_id_hash = { (vh1), (vh2), 0, 0 }, \
212 .driver_info = (kernel_ulong_t)(info), } 212 .driver_info = (kernel_ulong_t)(info), }
213 213
214static struct pcmcia_device_id atmel_ids[] = { 214static const struct pcmcia_device_id atmel_ids[] = {
215 PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM), 215 PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM),
216 PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM), 216 PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM),
217 PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E), 217 PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E),
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 7dcba5fafdc7..2c8461dcf1b0 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -32,7 +32,7 @@
32#include <pcmcia/cisreg.h> 32#include <pcmcia/cisreg.h>
33 33
34 34
35static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = { 35static const struct pcmcia_device_id b43_pcmcia_tbl[] = {
36 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), 36 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
37 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476), 37 PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
38 PCMCIA_DEVICE_NULL, 38 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 2176edede39b..c052a0d5cbdd 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -620,7 +620,7 @@ static int hostap_cs_resume(struct pcmcia_device *link)
620 return 0; 620 return 0;
621} 621}
622 622
623static struct pcmcia_device_id hostap_cs_ids[] = { 623static const struct pcmcia_device_id hostap_cs_ids[] = {
624 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), 624 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
625 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), 625 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
626 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), 626 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 63ed5798365c..e26935179861 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -983,7 +983,7 @@ static void if_cs_detach(struct pcmcia_device *p_dev)
983/* Module initialization */ 983/* Module initialization */
984/********************************************************************/ 984/********************************************************************/
985 985
986static struct pcmcia_device_id if_cs_ids[] = { 986static const struct pcmcia_device_id if_cs_ids[] = {
987 PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID), 987 PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID),
988 PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID), 988 PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID),
989 PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID), 989 PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID),
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 32954c4b243a..88e3c0ebcaad 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -237,7 +237,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
237/* Module initialization */ 237/* Module initialization */
238/********************************************************************/ 238/********************************************************************/
239 239
240static struct pcmcia_device_id orinoco_cs_ids[] = { 240static const struct pcmcia_device_id orinoco_cs_ids[] = {
241 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ 241 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
242 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */ 242 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
243 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */ 243 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index db34c282e59b..81f3673d31d4 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -301,7 +301,7 @@ spectrum_cs_resume(struct pcmcia_device *link)
301/* Module initialization */ 301/* Module initialization */
302/********************************************************************/ 302/********************************************************************/
303 303
304static struct pcmcia_device_id spectrum_cs_ids[] = { 304static const struct pcmcia_device_id spectrum_cs_ids[] = {
305 PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */ 305 PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */
306 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ 306 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
307 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */ 307 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0764d1a30d13..2a06ebcd67c5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2781,7 +2781,7 @@ static const struct file_operations int_proc_fops = {
2781}; 2781};
2782#endif 2782#endif
2783 2783
2784static struct pcmcia_device_id ray_ids[] = { 2784static const struct pcmcia_device_id ray_ids[] = {
2785 PCMCIA_DEVICE_MANF_CARD(0x01a6, 0x0000), 2785 PCMCIA_DEVICE_MANF_CARD(0x01a6, 0x0000),
2786 PCMCIA_DEVICE_NULL, 2786 PCMCIA_DEVICE_NULL,
2787}; 2787};
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index fc08f36fe1f5..6bc7c92fbff7 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -2000,7 +2000,7 @@ static int wl3501_resume(struct pcmcia_device *link)
2000} 2000}
2001 2001
2002 2002
2003static struct pcmcia_device_id wl3501_ids[] = { 2003static const struct pcmcia_device_id wl3501_ids[] = {
2004 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0001), 2004 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0001),
2005 PCMCIA_DEVICE_NULL 2005 PCMCIA_DEVICE_NULL
2006}; 2006};
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 787ebdeae310..067ad517c1f5 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -178,7 +178,7 @@ static void parport_cs_release(struct pcmcia_device *link)
178} /* parport_cs_release */ 178} /* parport_cs_release */
179 179
180 180
181static struct pcmcia_device_id parport_ids[] = { 181static const struct pcmcia_device_id parport_ids[] = {
182 PCMCIA_DEVICE_FUNC_ID(3), 182 PCMCIA_DEVICE_FUNC_ID(3),
183 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc), 183 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
184 PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003), 184 PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 22c9b27fdd8d..56098b3e17c0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3284,31 +3284,34 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3284 * @dev: the PCI device 3284 * @dev: the PCI device
3285 * @decode: true = enable decoding, false = disable decoding 3285 * @decode: true = enable decoding, false = disable decoding
3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3287 * @change_bridge: traverse ancestors and change bridges 3287 * @change_bridge_flags: traverse ancestors and change bridges
3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3288 */ 3289 */
3289int pci_set_vga_state(struct pci_dev *dev, bool decode, 3290int pci_set_vga_state(struct pci_dev *dev, bool decode,
3290 unsigned int command_bits, bool change_bridge) 3291 unsigned int command_bits, u32 flags)
3291{ 3292{
3292 struct pci_bus *bus; 3293 struct pci_bus *bus;
3293 struct pci_dev *bridge; 3294 struct pci_dev *bridge;
3294 u16 cmd; 3295 u16 cmd;
3295 int rc; 3296 int rc;
3296 3297
3297 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 3298 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3298 3299
3299 /* ARCH specific VGA enables */ 3300 /* ARCH specific VGA enables */
3300 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); 3301 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3301 if (rc) 3302 if (rc)
3302 return rc; 3303 return rc;
3303 3304
3304 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3305 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3305 if (decode == true) 3306 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3306 cmd |= command_bits; 3307 if (decode == true)
3307 else 3308 cmd |= command_bits;
3308 cmd &= ~command_bits; 3309 else
3309 pci_write_config_word(dev, PCI_COMMAND, cmd); 3310 cmd &= ~command_bits;
3311 pci_write_config_word(dev, PCI_COMMAND, cmd);
3312 }
3310 3313
3311 if (change_bridge == false) 3314 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3312 return 0; 3315 return 0;
3313 3316
3314 bus = dev->bus; 3317 bus = dev->bus;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 100c4412457d..749c2a16012c 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
45 45
46static void pcmcia_check_driver(struct pcmcia_driver *p_drv) 46static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
47{ 47{
48 struct pcmcia_device_id *did = p_drv->id_table; 48 const struct pcmcia_device_id *did = p_drv->id_table;
49 unsigned int i; 49 unsigned int i;
50 u32 hash; 50 u32 hash;
51 51
@@ -784,7 +784,7 @@ static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filenam
784 784
785 785
786static inline int pcmcia_devmatch(struct pcmcia_device *dev, 786static inline int pcmcia_devmatch(struct pcmcia_device *dev,
787 struct pcmcia_device_id *did) 787 const struct pcmcia_device_id *did)
788{ 788{
789 if (did->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID) { 789 if (did->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID) {
790 if ((!dev->has_manf_id) || (dev->manf_id != did->manf_id)) 790 if ((!dev->has_manf_id) || (dev->manf_id != did->manf_id))
@@ -890,7 +890,7 @@ static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
890{ 890{
891 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 891 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
892 struct pcmcia_driver *p_drv = to_pcmcia_drv(drv); 892 struct pcmcia_driver *p_drv = to_pcmcia_drv(drv);
893 struct pcmcia_device_id *did = p_drv->id_table; 893 const struct pcmcia_device_id *did = p_drv->id_table;
894 struct pcmcia_dynid *dynid; 894 struct pcmcia_dynid *dynid;
895 895
896 /* match dynamic devices first */ 896 /* match dynamic devices first */
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index fb9740d3e9a7..2eea664bc079 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -43,7 +43,7 @@
43 43
44int __init pcmcia_collie_init(struct device *dev); 44int __init pcmcia_collie_init(struct device *dev);
45 45
46static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = { 46static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) __devinitdata = {
47#ifdef CONFIG_SA1100_ASSABET 47#ifdef CONFIG_SA1100_ASSABET
48 pcmcia_assabet_init, 48 pcmcia_assabet_init,
49#endif 49#endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 485c09eef424..5cb999b50f95 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -753,4 +753,11 @@ config SAMSUNG_LAPTOP
753 To compile this driver as a module, choose M here: the module 753 To compile this driver as a module, choose M here: the module
754 will be called samsung-laptop. 754 will be called samsung-laptop.
755 755
756config MXM_WMI
757 tristate "WMI support for MXM Laptop Graphics"
758 depends on ACPI_WMI
759 ---help---
760 MXM is a standard for laptop graphics cards, the WMI interface
761 is required for switchable nvidia graphics machines
762
756endif # X86_PLATFORM_DEVICES 763endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 029e8861d086..a7ab3bc7b3a1 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o 42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o 43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
44obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o 44obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
45obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
new file mode 100644
index 000000000000..0aea63b3729a
--- /dev/null
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -0,0 +1,111 @@
1/*
2 * MXM WMI driver
3 *
4 * Copyright(C) 2010 Red Hat.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <acpi/acpi_bus.h>
24#include <acpi/acpi_drivers.h>
25
26MODULE_AUTHOR("Dave Airlie");
27MODULE_DESCRIPTION("MXM WMI Driver");
28MODULE_LICENSE("GPL");
29
30#define MXM_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
31
32MODULE_ALIAS("wmi:"MXM_WMMX_GUID);
33
34#define MXM_WMMX_FUNC_MXDS 0x5344584D /* "MXDS" */
35#define MXM_WMMX_FUNC_MXMX 0x53445344 /* "MXMX" */
36
37struct mxds_args {
38 u32 func;
39 u32 args;
40 u32 xarg;
41};
42
43int mxm_wmi_call_mxds(int adapter)
44{
45 struct mxds_args args = {
46 .func = MXM_WMMX_FUNC_MXDS,
47 .args = 0,
48 .xarg = 1,
49 };
50 struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
51 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
52 acpi_status status;
53
54 printk("calling mux switch %d\n", adapter);
55
56 status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input,
57 &output);
58
59 if (ACPI_FAILURE(status))
60 return status;
61
62 printk("mux switched %d\n", status);
63 return 0;
64
65}
66EXPORT_SYMBOL_GPL(mxm_wmi_call_mxds);
67
68int mxm_wmi_call_mxmx(int adapter)
69{
70 struct mxds_args args = {
71 .func = MXM_WMMX_FUNC_MXMX,
72 .args = 0,
73 .xarg = 1,
74 };
75 struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
76 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
77 acpi_status status;
78
79 printk("calling mux switch %d\n", adapter);
80
81 status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input,
82 &output);
83
84 if (ACPI_FAILURE(status))
85 return status;
86
87 printk("mux mutex set switched %d\n", status);
88 return 0;
89
90}
91EXPORT_SYMBOL_GPL(mxm_wmi_call_mxmx);
92
93bool mxm_wmi_supported(void)
94{
95 bool guid_valid;
96 guid_valid = wmi_has_guid(MXM_WMMX_GUID);
97 return guid_valid;
98}
99EXPORT_SYMBOL_GPL(mxm_wmi_supported);
100
101static int __init mxm_wmi_init(void)
102{
103 return 0;
104}
105
106static void __exit mxm_wmi_exit(void)
107{
108}
109
110module_init(mxm_wmi_init);
111module_exit(mxm_wmi_exit);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 2b771f18d1ad..c388eda1e2b1 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -253,13 +253,11 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
253 */ 253 */
254void dasd_alias_lcu_setup_complete(struct dasd_device *device) 254void dasd_alias_lcu_setup_complete(struct dasd_device *device)
255{ 255{
256 struct dasd_eckd_private *private;
257 unsigned long flags; 256 unsigned long flags;
258 struct alias_server *server; 257 struct alias_server *server;
259 struct alias_lcu *lcu; 258 struct alias_lcu *lcu;
260 struct dasd_uid uid; 259 struct dasd_uid uid;
261 260
262 private = (struct dasd_eckd_private *) device->private;
263 device->discipline->get_uid(device, &uid); 261 device->discipline->get_uid(device, &uid);
264 lcu = NULL; 262 lcu = NULL;
265 spin_lock_irqsave(&aliastree.lock, flags); 263 spin_lock_irqsave(&aliastree.lock, flags);
@@ -279,13 +277,11 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
279 277
280void dasd_alias_wait_for_lcu_setup(struct dasd_device *device) 278void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
281{ 279{
282 struct dasd_eckd_private *private;
283 unsigned long flags; 280 unsigned long flags;
284 struct alias_server *server; 281 struct alias_server *server;
285 struct alias_lcu *lcu; 282 struct alias_lcu *lcu;
286 struct dasd_uid uid; 283 struct dasd_uid uid;
287 284
288 private = (struct dasd_eckd_private *) device->private;
289 device->discipline->get_uid(device, &uid); 285 device->discipline->get_uid(device, &uid);
290 lcu = NULL; 286 lcu = NULL;
291 spin_lock_irqsave(&aliastree.lock, flags); 287 spin_lock_irqsave(&aliastree.lock, flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 3ebdf5f92f8f..30fb979d684d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1611,10 +1611,8 @@ static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1611 1611
1612static int dasd_eckd_start_analysis(struct dasd_block *block) 1612static int dasd_eckd_start_analysis(struct dasd_block *block)
1613{ 1613{
1614 struct dasd_eckd_private *private;
1615 struct dasd_ccw_req *init_cqr; 1614 struct dasd_ccw_req *init_cqr;
1616 1615
1617 private = (struct dasd_eckd_private *) block->base->private;
1618 init_cqr = dasd_eckd_analysis_ccw(block->base); 1616 init_cqr = dasd_eckd_analysis_ccw(block->base);
1619 if (IS_ERR(init_cqr)) 1617 if (IS_ERR(init_cqr))
1620 return PTR_ERR(init_cqr); 1618 return PTR_ERR(init_cqr);
@@ -2264,7 +2262,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2264 unsigned int blk_per_trk, 2262 unsigned int blk_per_trk,
2265 unsigned int blksize) 2263 unsigned int blksize)
2266{ 2264{
2267 struct dasd_eckd_private *private;
2268 unsigned long *idaws; 2265 unsigned long *idaws;
2269 struct dasd_ccw_req *cqr; 2266 struct dasd_ccw_req *cqr;
2270 struct ccw1 *ccw; 2267 struct ccw1 *ccw;
@@ -2283,7 +2280,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2283 unsigned int recoffs; 2280 unsigned int recoffs;
2284 2281
2285 basedev = block->base; 2282 basedev = block->base;
2286 private = (struct dasd_eckd_private *) basedev->private;
2287 if (rq_data_dir(req) == READ) 2283 if (rq_data_dir(req) == READ)
2288 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2284 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2289 else if (rq_data_dir(req) == WRITE) 2285 else if (rq_data_dir(req) == WRITE)
@@ -2556,8 +2552,7 @@ static int prepare_itcw(struct itcw *itcw,
2556 2552
2557 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 2553 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2558 &pfxdata, sizeof(pfxdata), total_data_size); 2554 &pfxdata, sizeof(pfxdata), total_data_size);
2559 2555 return IS_ERR(dcw) ? PTR_ERR(dcw) : 0;
2560 return rc;
2561} 2556}
2562 2557
2563static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 2558static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
@@ -2573,7 +2568,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2573 unsigned int blk_per_trk, 2568 unsigned int blk_per_trk,
2574 unsigned int blksize) 2569 unsigned int blksize)
2575{ 2570{
2576 struct dasd_eckd_private *private;
2577 struct dasd_ccw_req *cqr; 2571 struct dasd_ccw_req *cqr;
2578 struct req_iterator iter; 2572 struct req_iterator iter;
2579 struct bio_vec *bv; 2573 struct bio_vec *bv;
@@ -2594,7 +2588,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2594 unsigned int count, count_to_trk_end; 2588 unsigned int count, count_to_trk_end;
2595 2589
2596 basedev = block->base; 2590 basedev = block->base;
2597 private = (struct dasd_eckd_private *) basedev->private;
2598 if (rq_data_dir(req) == READ) { 2591 if (rq_data_dir(req) == READ) {
2599 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2592 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2600 itcw_op = ITCW_OP_READ; 2593 itcw_op = ITCW_OP_READ;
@@ -2801,7 +2794,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
2801 struct dasd_block *block, 2794 struct dasd_block *block,
2802 struct request *req) 2795 struct request *req)
2803{ 2796{
2804 struct dasd_eckd_private *private;
2805 unsigned long *idaws; 2797 unsigned long *idaws;
2806 struct dasd_device *basedev; 2798 struct dasd_device *basedev;
2807 struct dasd_ccw_req *cqr; 2799 struct dasd_ccw_req *cqr;
@@ -2836,7 +2828,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
2836 trkcount = last_trk - first_trk + 1; 2828 trkcount = last_trk - first_trk + 1;
2837 first_offs = 0; 2829 first_offs = 0;
2838 basedev = block->base; 2830 basedev = block->base;
2839 private = (struct dasd_eckd_private *) basedev->private;
2840 2831
2841 if (rq_data_dir(req) == READ) 2832 if (rq_data_dir(req) == READ)
2842 cmd = DASD_ECKD_CCW_READ_TRACK; 2833 cmd = DASD_ECKD_CCW_READ_TRACK;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index dcee3c5c8954..a4f117d9fdc6 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -119,18 +119,6 @@ config S390_TAPE
119comment "S/390 tape interface support" 119comment "S/390 tape interface support"
120 depends on S390_TAPE 120 depends on S390_TAPE
121 121
122config S390_TAPE_BLOCK
123 def_bool y
124 prompt "Support for tape block devices"
125 depends on S390_TAPE && BLOCK
126 help
127 Select this option if you want to access your channel-attached tape
128 devices using the block device interface. This interface is similar
129 to CD-ROM devices on other platforms. The tapes can only be
130 accessed read-only when using this interface. Have a look at
131 <file:Documentation/s390/TAPE> for further information about creating
132 volumes for and using this interface. It is safe to say "Y" here.
133
134comment "S/390 tape hardware support" 122comment "S/390 tape hardware support"
135 depends on S390_TAPE 123 depends on S390_TAPE
136 124
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index efb500ab66c0..f3c325207445 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o 6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -22,7 +22,6 @@ obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o 22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
23obj-$(CONFIG_VMCP) += vmcp.o 23obj-$(CONFIG_VMCP) += vmcp.o
24 24
25tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
26tape-$(CONFIG_PROC_FS) += tape_proc.o 25tape-$(CONFIG_PROC_FS) += tape_proc.o
27tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y) 26tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
28obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o 27obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index e0702d3ea33b..4600aa10a1c6 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -97,7 +97,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
97{ 97{
98 struct monwrite_hdr *monhdr = &monpriv->hdr; 98 struct monwrite_hdr *monhdr = &monpriv->hdr;
99 struct mon_buf *monbuf; 99 struct mon_buf *monbuf;
100 int rc; 100 int rc = 0;
101 101
102 if (monhdr->datalen > MONWRITE_MAX_DATALEN || 102 if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
103 monhdr->mon_function > MONWRITE_START_CONFIG || 103 monhdr->mon_function > MONWRITE_START_CONFIG ||
@@ -135,7 +135,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
135 mon_buf_count++; 135 mon_buf_count++;
136 } 136 }
137 monpriv->current_buf = monbuf; 137 monpriv->current_buf = monbuf;
138 return 0; 138 return rc;
139} 139}
140 140
141static int monwrite_new_data(struct mon_private *monpriv) 141static int monwrite_new_data(struct mon_private *monpriv)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index e21a5c39ef20..810ac38631c3 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -598,7 +598,6 @@ __raw3270_size_device(struct raw3270 *rp)
598 static const unsigned char wbuf[] = 598 static const unsigned char wbuf[] =
599 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; 599 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
600 struct raw3270_ua *uap; 600 struct raw3270_ua *uap;
601 unsigned short count;
602 int rc; 601 int rc;
603 602
604 /* 603 /*
@@ -653,7 +652,6 @@ __raw3270_size_device(struct raw3270 *rp)
653 if (rc) 652 if (rc)
654 return rc; 653 return rc;
655 /* Got a Query Reply */ 654 /* Got a Query Reply */
656 count = sizeof(rp->init_data) - rp->init_request.rescnt;
657 uap = (struct raw3270_ua *) (rp->init_data + 1); 655 uap = (struct raw3270_ua *) (rp->init_data + 1);
658 /* Paranoia check. */ 656 /* Paranoia check. */
659 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) 657 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6bb5a6bdfab5..49a1bb52bc87 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -28,6 +28,7 @@
28#define EVTYP_CONFMGMDATA 0x04 28#define EVTYP_CONFMGMDATA 0x04
29#define EVTYP_SDIAS 0x1C 29#define EVTYP_SDIAS 0x1C
30#define EVTYP_ASYNC 0x0A 30#define EVTYP_ASYNC 0x0A
31#define EVTYP_OCF 0x1E
31 32
32#define EVTYP_OPCMD_MASK 0x80000000 33#define EVTYP_OPCMD_MASK 0x80000000
33#define EVTYP_MSG_MASK 0x40000000 34#define EVTYP_MSG_MASK 0x40000000
@@ -40,6 +41,7 @@
40#define EVTYP_CONFMGMDATA_MASK 0x10000000 41#define EVTYP_CONFMGMDATA_MASK 0x10000000
41#define EVTYP_SDIAS_MASK 0x00000010 42#define EVTYP_SDIAS_MASK 0x00000010
42#define EVTYP_ASYNC_MASK 0x00400000 43#define EVTYP_ASYNC_MASK 0x00400000
44#define EVTYP_OCF_MASK 0x00000004
43 45
44#define GNRLMSGFLGS_DOM 0x8000 46#define GNRLMSGFLGS_DOM 0x8000
45#define GNRLMSGFLGS_SNDALRM 0x4000 47#define GNRLMSGFLGS_SNDALRM 0x4000
@@ -186,4 +188,26 @@ sclp_ascebc_str(unsigned char *str, int nr)
186 (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); 188 (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
187} 189}
188 190
191static inline struct gds_vector *
192sclp_find_gds_vector(void *start, void *end, u16 id)
193{
194 struct gds_vector *v;
195
196 for (v = start; (void *) v < end; v = (void *) v + v->length)
197 if (v->gds_id == id)
198 return v;
199 return NULL;
200}
201
202static inline struct gds_subvector *
203sclp_find_gds_subvector(void *start, void *end, u8 key)
204{
205 struct gds_subvector *sv;
206
207 for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
208 if (sv->key == key)
209 return sv;
210 return NULL;
211}
212
189#endif /* __SCLP_H__ */ 213#endif /* __SCLP_H__ */
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 16e232a99fb7..95b909ac2b73 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -71,21 +71,9 @@ static struct sclp_register sclp_conf_register =
71 71
72static int __init sclp_conf_init(void) 72static int __init sclp_conf_init(void)
73{ 73{
74 int rc;
75
76 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); 74 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
77 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); 75 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
78 76 return sclp_register(&sclp_conf_register);
79 rc = sclp_register(&sclp_conf_register);
80 if (rc)
81 return rc;
82
83 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
84 pr_warning("no configuration management.\n");
85 sclp_unregister(&sclp_conf_register);
86 rc = -ENOSYS;
87 }
88 return rc;
89} 77}
90 78
91__initcall(sclp_conf_init); 79__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
new file mode 100644
index 000000000000..ab294d5a534e
--- /dev/null
+++ b/drivers/s390/char/sclp_ocf.c
@@ -0,0 +1,145 @@
1/*
2 * drivers/s390/char/sclp_ocf.c
3 * SCLP OCF communication parameters sysfs interface
4 *
5 * Copyright IBM Corp. 2011
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#define KMSG_COMPONENT "sclp_ocf"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/stat.h>
15#include <linux/device.h>
16#include <linux/string.h>
17#include <linux/ctype.h>
18#include <linux/kmod.h>
19#include <linux/timer.h>
20#include <linux/err.h>
21#include <asm/ebcdic.h>
22#include <asm/sclp.h>
23
24#include "sclp.h"
25
26#define OCF_LENGTH_HMC_NETWORK 8UL
27#define OCF_LENGTH_CPC_NAME 8UL
28
29static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
30static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
31
32static DEFINE_SPINLOCK(sclp_ocf_lock);
33static struct work_struct sclp_ocf_change_work;
34
35static struct kset *ocf_kset;
36
37static void sclp_ocf_change_notify(struct work_struct *work)
38{
39 kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
40}
41
42/* Handler for OCF event. Look for the CPC image name. */
43static void sclp_ocf_handler(struct evbuf_header *evbuf)
44{
45 struct gds_vector *v;
46 struct gds_subvector *sv, *netid, *cpc;
47 size_t size;
48
49 /* Find the 0x9f00 block. */
50 v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
51 0x9f00);
52 if (!v)
53 return;
54 /* Find the 0x9f22 block inside the 0x9f00 block. */
55 v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
56 if (!v)
57 return;
58 /* Find the 0x81 block inside the 0x9f22 block. */
59 sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
60 if (!sv)
61 return;
62 /* Find the 0x01 block inside the 0x81 block. */
63 netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
64 /* Find the 0x02 block inside the 0x81 block. */
65 cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
66 /* Copy network name and cpc name. */
67 spin_lock(&sclp_ocf_lock);
68 if (netid) {
69 size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
70 memcpy(hmc_network, netid + 1, size);
71 EBCASC(hmc_network, size);
72 hmc_network[size] = 0;
73 }
74 if (cpc) {
75 size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
76 memcpy(cpc_name, cpc + 1, size);
77 EBCASC(cpc_name, size);
78 cpc_name[size] = 0;
79 }
80 spin_unlock(&sclp_ocf_lock);
81 schedule_work(&sclp_ocf_change_work);
82}
83
84static struct sclp_register sclp_ocf_event = {
85 .receive_mask = EVTYP_OCF_MASK,
86 .receiver_fn = sclp_ocf_handler,
87};
88
89static ssize_t cpc_name_show(struct kobject *kobj,
90 struct kobj_attribute *attr, char *page)
91{
92 int rc;
93
94 spin_lock_irq(&sclp_ocf_lock);
95 rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
96 spin_unlock_irq(&sclp_ocf_lock);
97 return rc;
98}
99
100static struct kobj_attribute cpc_name_attr =
101 __ATTR(cpc_name, 0444, cpc_name_show, NULL);
102
103static ssize_t hmc_network_show(struct kobject *kobj,
104 struct kobj_attribute *attr, char *page)
105{
106 int rc;
107
108 spin_lock_irq(&sclp_ocf_lock);
109 rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
110 spin_unlock_irq(&sclp_ocf_lock);
111 return rc;
112}
113
114static struct kobj_attribute hmc_network_attr =
115 __ATTR(hmc_network, 0444, hmc_network_show, NULL);
116
117static struct attribute *ocf_attrs[] = {
118 &cpc_name_attr.attr,
119 &hmc_network_attr.attr,
120 NULL,
121};
122
123static struct attribute_group ocf_attr_group = {
124 .attrs = ocf_attrs,
125};
126
127static int __init ocf_init(void)
128{
129 int rc;
130
131 INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
132 ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
133 if (!ocf_kset)
134 return -ENOMEM;
135
136 rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
137 if (rc) {
138 kset_unregister(ocf_kset);
139 return rc;
140 }
141
142 return sclp_register(&sclp_ocf_event);
143}
144
145device_initcall(ocf_init);
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a1c58dc61a7..fa733ecd3d70 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -69,9 +69,6 @@ static DEFINE_MUTEX(sdias_mutex);
69 69
70static void sdias_callback(struct sclp_req *request, void *data) 70static void sdias_callback(struct sclp_req *request, void *data)
71{ 71{
72 struct sdias_sccb *cbsccb;
73
74 cbsccb = (struct sdias_sccb *) request->sccb;
75 sclp_req_done = 1; 72 sclp_req_done = 1;
76 wake_up(&sdias_wq); /* Inform caller, that request is complete */ 73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
77 TRACE("callback done\n"); 74 TRACE("callback done\n");
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 8258d590505f..a879c139926a 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -408,118 +408,72 @@ static int sclp_switch_cases(unsigned char *buf, int count)
408 return op - buf; 408 return op - buf;
409} 409}
410 410
411static void 411static void sclp_get_input(struct gds_subvector *sv)
412sclp_get_input(unsigned char *start, unsigned char *end)
413{ 412{
413 unsigned char *str;
414 int count; 414 int count;
415 415
416 count = end - start; 416 str = (unsigned char *) (sv + 1);
417 count = sv->length - sizeof(*sv);
417 if (sclp_tty_tolower) 418 if (sclp_tty_tolower)
418 EBC_TOLOWER(start, count); 419 EBC_TOLOWER(str, count);
419 count = sclp_switch_cases(start, count); 420 count = sclp_switch_cases(str, count);
420 /* convert EBCDIC to ASCII (modify original input in SCCB) */ 421 /* convert EBCDIC to ASCII (modify original input in SCCB) */
421 sclp_ebcasc_str(start, count); 422 sclp_ebcasc_str(str, count);
422 423
423 /* transfer input to high level driver */ 424 /* transfer input to high level driver */
424 sclp_tty_input(start, count); 425 sclp_tty_input(str, count);
425}
426
427static inline struct gds_vector *
428find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
429{
430 struct gds_vector *vec;
431
432 for (vec = start; vec < end; vec = (void *) vec + vec->length)
433 if (vec->gds_id == id)
434 return vec;
435 return NULL;
436} 426}
437 427
438static inline struct gds_subvector * 428static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
439find_gds_subvector(struct gds_subvector *start,
440 struct gds_subvector *end, u8 key)
441{ 429{
442 struct gds_subvector *subvec; 430 void *end;
443 431
444 for (subvec = start; subvec < end; 432 end = (void *) sv + sv->length;
445 subvec = (void *) subvec + subvec->length) 433 for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
446 if (subvec->key == key) 434 if (sv->key == 0x30)
447 return subvec; 435 sclp_get_input(sv);
448 return NULL;
449} 436}
450 437
451static inline void 438static inline void sclp_eval_textcmd(struct gds_vector *v)
452sclp_eval_selfdeftextmsg(struct gds_subvector *start,
453 struct gds_subvector *end)
454{ 439{
455 struct gds_subvector *subvec; 440 struct gds_subvector *sv;
456 441 void *end;
457 subvec = start;
458 while (subvec < end) {
459 subvec = find_gds_subvector(subvec, end, 0x30);
460 if (!subvec)
461 break;
462 sclp_get_input((unsigned char *)(subvec + 1),
463 (unsigned char *) subvec + subvec->length);
464 subvec = (void *) subvec + subvec->length;
465 }
466}
467 442
468static inline void 443 end = (void *) v + v->length;
469sclp_eval_textcmd(struct gds_subvector *start, 444 for (sv = (struct gds_subvector *) (v + 1);
470 struct gds_subvector *end) 445 (void *) sv < end; sv = (void *) sv + sv->length)
471{ 446 if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
472 struct gds_subvector *subvec; 447 sclp_eval_selfdeftextmsg(sv);
473 448
474 subvec = start;
475 while (subvec < end) {
476 subvec = find_gds_subvector(subvec, end,
477 GDS_KEY_SELFDEFTEXTMSG);
478 if (!subvec)
479 break;
480 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
481 (void *)subvec + subvec->length);
482 subvec = (void *) subvec + subvec->length;
483 }
484} 449}
485 450
486static inline void 451static inline void sclp_eval_cpmsu(struct gds_vector *v)
487sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
488{ 452{
489 struct gds_vector *vec; 453 void *end;
490 454
491 vec = start; 455 end = (void *) v + v->length;
492 while (vec < end) { 456 for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
493 vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD); 457 if (v->gds_id == GDS_ID_TEXTCMD)
494 if (!vec) 458 sclp_eval_textcmd(v);
495 break;
496 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
497 (void *) vec + vec->length);
498 vec = (void *) vec + vec->length;
499 }
500} 459}
501 460
502 461
503static inline void 462static inline void sclp_eval_mdsmu(struct gds_vector *v)
504sclp_eval_mdsmu(struct gds_vector *start, void *end)
505{ 463{
506 struct gds_vector *vec; 464 v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
507 465 if (v)
508 vec = find_gds_vector(start, end, GDS_ID_CPMSU); 466 sclp_eval_cpmsu(v);
509 if (vec)
510 sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
511} 467}
512 468
513static void 469static void sclp_tty_receiver(struct evbuf_header *evbuf)
514sclp_tty_receiver(struct evbuf_header *evbuf)
515{ 470{
516 struct gds_vector *start, *end, *vec; 471 struct gds_vector *v;
517 472
518 start = (struct gds_vector *)(evbuf + 1); 473 v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
519 end = (void *) evbuf + evbuf->length; 474 GDS_ID_MDSMU);
520 vec = find_gds_vector(start, end, GDS_ID_MDSMU); 475 if (v)
521 if (vec) 476 sclp_eval_mdsmu(v);
522 sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
523} 477}
524 478
525static void 479static void
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index b98dcbd16711..a7d570728882 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -796,10 +796,8 @@ static void tape_3590_med_state_set(struct tape_device *device,
796static int 796static int
797tape_3590_done(struct tape_device *device, struct tape_request *request) 797tape_3590_done(struct tape_device *device, struct tape_request *request)
798{ 798{
799 struct tape_3590_disc_data *disc_data;
800 799
801 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 800 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
802 disc_data = device->discdata;
803 801
804 switch (request->op) { 802 switch (request->op) {
805 case TO_BSB: 803 case TO_BSB:
@@ -1394,17 +1392,12 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1394static int tape_3590_crypt_error(struct tape_device *device, 1392static int tape_3590_crypt_error(struct tape_device *device,
1395 struct tape_request *request, struct irb *irb) 1393 struct tape_request *request, struct irb *irb)
1396{ 1394{
1397 u8 cu_rc, ekm_rc1; 1395 u8 cu_rc;
1398 u16 ekm_rc2; 1396 u16 ekm_rc2;
1399 u32 drv_rc;
1400 const char *bus_id;
1401 char *sense; 1397 char *sense;
1402 1398
1403 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; 1399 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1404 bus_id = dev_name(&device->cdev->dev);
1405 cu_rc = sense[0]; 1400 cu_rc = sense[0];
1406 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1407 ekm_rc1 = sense[9];
1408 ekm_rc2 = *((u16*) &sense[10]); 1401 ekm_rc2 = *((u16*) &sense[10]);
1409 if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) 1402 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1410 /* key not defined on EKM */ 1403 /* key not defined on EKM */
@@ -1429,7 +1422,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1429 struct irb *irb) 1422 struct irb *irb)
1430{ 1423{
1431 struct tape_3590_sense *sense; 1424 struct tape_3590_sense *sense;
1432 int rc;
1433 1425
1434#ifdef CONFIG_S390_TAPE_BLOCK 1426#ifdef CONFIG_S390_TAPE_BLOCK
1435 if (request->op == TO_BLOCK) { 1427 if (request->op == TO_BLOCK) {
@@ -1454,7 +1446,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1454 * - "break": basic error recovery is done 1446 * - "break": basic error recovery is done
1455 * - "goto out:": just print error message if available 1447 * - "goto out:": just print error message if available
1456 */ 1448 */
1457 rc = -EIO;
1458 switch (sense->rc_rqc) { 1449 switch (sense->rc_rqc) {
1459 1450
1460 case 0x1110: 1451 case 0x1110:
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
deleted file mode 100644
index 1b3924c2fffd..000000000000
--- a/drivers/s390/char/tape_block.c
+++ /dev/null
@@ -1,444 +0,0 @@
1/*
2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
11 */
12
13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <linux/fs.h>
17#include <linux/module.h>
18#include <linux/blkdev.h>
19#include <linux/mutex.h>
20#include <linux/interrupt.h>
21#include <linux/buffer_head.h>
22#include <linux/kernel.h>
23
24#include <asm/debug.h>
25
26#define TAPE_DBF_AREA tape_core_dbf
27
28#include "tape.h"
29
30#define TAPEBLOCK_MAX_SEC 100
31#define TAPEBLOCK_MIN_REQUEUE 3
32
33/*
34 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
35 *
36 * In 2.5/2.6 the block device request function is very likely to be called
37 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
38 * just call any function that tries to allocate CCW requests from that con-
39 * text since it might sleep. There are two choices to work around this:
40 * a) do not allocate with kmalloc but use its own memory pool
41 * b) take requests from the queue outside that context, knowing that
42 * allocation might sleep
43 */
44
45/*
46 * file operation structure for tape block frontend
47 */
48static DEFINE_MUTEX(tape_block_mutex);
49static int tapeblock_open(struct block_device *, fmode_t);
50static int tapeblock_release(struct gendisk *, fmode_t);
51static unsigned int tapeblock_check_events(struct gendisk *, unsigned int);
52static int tapeblock_revalidate_disk(struct gendisk *);
53
54static const struct block_device_operations tapeblock_fops = {
55 .owner = THIS_MODULE,
56 .open = tapeblock_open,
57 .release = tapeblock_release,
58 .check_events = tapeblock_check_events,
59 .revalidate_disk = tapeblock_revalidate_disk,
60};
61
62static int tapeblock_major = 0;
63
64static void
65tapeblock_trigger_requeue(struct tape_device *device)
66{
67 /* Protect against rescheduling. */
68 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
69 return;
70 schedule_work(&device->blk_data.requeue_task);
71}
72
73/*
74 * Post finished request.
75 */
76static void
77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
78{
79 struct tape_device *device;
80 struct request *req;
81
82 DBF_LH(6, "__tapeblock_end_request()\n");
83
84 device = ccw_req->device;
85 req = (struct request *) data;
86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
87 if (ccw_req->rc == 0)
88 /* Update position. */
89 device->blk_data.block_position =
90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
91 else
92 /* We lost the position information due to an error. */
93 device->blk_data.block_position = -1;
94 device->discipline->free_bread(ccw_req);
95 if (!list_empty(&device->req_queue) ||
96 blk_peek_request(device->blk_data.request_queue))
97 tapeblock_trigger_requeue(device);
98}
99
100/*
101 * Feed the tape device CCW queue with requests supplied in a list.
102 */
103static int
104tapeblock_start_request(struct tape_device *device, struct request *req)
105{
106 struct tape_request * ccw_req;
107 int rc;
108
109 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
110
111 ccw_req = device->discipline->bread(device, req);
112 if (IS_ERR(ccw_req)) {
113 DBF_EVENT(1, "TBLOCK: bread failed\n");
114 blk_end_request_all(req, -EIO);
115 return PTR_ERR(ccw_req);
116 }
117 ccw_req->callback = __tapeblock_end_request;
118 ccw_req->callback_data = (void *) req;
119 ccw_req->retries = TAPEBLOCK_RETRIES;
120
121 rc = tape_do_io_async(device, ccw_req);
122 if (rc) {
123 /*
124 * Start/enqueueing failed. No retries in
125 * this case.
126 */
127 blk_end_request_all(req, -EIO);
128 device->discipline->free_bread(ccw_req);
129 }
130
131 return rc;
132}
133
134/*
135 * Move requests from the block device request queue to the tape device ccw
136 * queue.
137 */
138static void
139tapeblock_requeue(struct work_struct *work) {
140 struct tape_blk_data * blkdat;
141 struct tape_device * device;
142 struct request_queue * queue;
143 int nr_queued;
144 struct request * req;
145 struct list_head * l;
146 int rc;
147
148 blkdat = container_of(work, struct tape_blk_data, requeue_task);
149 device = blkdat->device;
150 if (!device)
151 return;
152
153 spin_lock_irq(get_ccwdev_lock(device->cdev));
154 queue = device->blk_data.request_queue;
155
156 /* Count number of requests on ccw queue. */
157 nr_queued = 0;
158 list_for_each(l, &device->req_queue)
159 nr_queued++;
160 spin_unlock(get_ccwdev_lock(device->cdev));
161
162 spin_lock_irq(&device->blk_data.request_queue_lock);
163 while (
164 blk_peek_request(queue) &&
165 nr_queued < TAPEBLOCK_MIN_REQUEUE
166 ) {
167 req = blk_fetch_request(queue);
168 if (rq_data_dir(req) == WRITE) {
169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
170 spin_unlock_irq(&device->blk_data.request_queue_lock);
171 blk_end_request_all(req, -EIO);
172 spin_lock_irq(&device->blk_data.request_queue_lock);
173 continue;
174 }
175 nr_queued++;
176 spin_unlock_irq(&device->blk_data.request_queue_lock);
177 rc = tapeblock_start_request(device, req);
178 spin_lock_irq(&device->blk_data.request_queue_lock);
179 }
180 spin_unlock_irq(&device->blk_data.request_queue_lock);
181 atomic_set(&device->blk_data.requeue_scheduled, 0);
182}
183
184/*
185 * Tape request queue function. Called from ll_rw_blk.c
186 */
187static void
188tapeblock_request_fn(struct request_queue *queue)
189{
190 struct tape_device *device;
191
192 device = (struct tape_device *) queue->queuedata;
193 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
194 BUG_ON(device == NULL);
195 tapeblock_trigger_requeue(device);
196}
197
198/*
199 * This function is called for every new tapedevice
200 */
201int
202tapeblock_setup_device(struct tape_device * device)
203{
204 struct tape_blk_data * blkdat;
205 struct gendisk * disk;
206 int rc;
207
208 blkdat = &device->blk_data;
209 blkdat->device = device;
210 spin_lock_init(&blkdat->request_queue_lock);
211 atomic_set(&blkdat->requeue_scheduled, 0);
212
213 blkdat->request_queue = blk_init_queue(
214 tapeblock_request_fn,
215 &blkdat->request_queue_lock
216 );
217 if (!blkdat->request_queue)
218 return -ENOMEM;
219
220 rc = elevator_change(blkdat->request_queue, "noop");
221 if (rc)
222 goto cleanup_queue;
223
224 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
225 blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
226 blk_queue_max_segments(blkdat->request_queue, -1L);
227 blk_queue_max_segment_size(blkdat->request_queue, -1L);
228 blk_queue_segment_boundary(blkdat->request_queue, -1L);
229
230 disk = alloc_disk(1);
231 if (!disk) {
232 rc = -ENOMEM;
233 goto cleanup_queue;
234 }
235
236 disk->major = tapeblock_major;
237 disk->first_minor = device->first_minor;
238 disk->fops = &tapeblock_fops;
239 disk->private_data = tape_get_device(device);
240 disk->queue = blkdat->request_queue;
241 set_capacity(disk, 0);
242 sprintf(disk->disk_name, "btibm%d",
243 device->first_minor / TAPE_MINORS_PER_DEV);
244
245 blkdat->disk = disk;
246 blkdat->medium_changed = 1;
247 blkdat->request_queue->queuedata = tape_get_device(device);
248
249 add_disk(disk);
250
251 tape_get_device(device);
252 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
253
254 return 0;
255
256cleanup_queue:
257 blk_cleanup_queue(blkdat->request_queue);
258 blkdat->request_queue = NULL;
259
260 return rc;
261}
262
263void
264tapeblock_cleanup_device(struct tape_device *device)
265{
266 flush_work_sync(&device->blk_data.requeue_task);
267 tape_put_device(device);
268
269 if (!device->blk_data.disk) {
270 goto cleanup_queue;
271 }
272
273 del_gendisk(device->blk_data.disk);
274 device->blk_data.disk->private_data = NULL;
275 tape_put_device(device);
276 put_disk(device->blk_data.disk);
277
278 device->blk_data.disk = NULL;
279cleanup_queue:
280 device->blk_data.request_queue->queuedata = NULL;
281 tape_put_device(device);
282
283 blk_cleanup_queue(device->blk_data.request_queue);
284 device->blk_data.request_queue = NULL;
285}
286
287/*
288 * Detect number of blocks of the tape.
289 * FIXME: can we extent this to detect the blocks size as well ?
290 */
291static int
292tapeblock_revalidate_disk(struct gendisk *disk)
293{
294 struct tape_device * device;
295 unsigned int nr_of_blks;
296 int rc;
297
298 device = (struct tape_device *) disk->private_data;
299 BUG_ON(!device);
300
301 if (!device->blk_data.medium_changed)
302 return 0;
303
304 rc = tape_mtop(device, MTFSFM, 1);
305 if (rc)
306 return rc;
307
308 rc = tape_mtop(device, MTTELL, 1);
309 if (rc < 0)
310 return rc;
311
312 pr_info("%s: Determining the size of the recorded area...\n",
313 dev_name(&device->cdev->dev));
314 DBF_LH(3, "Image file ends at %d\n", rc);
315 nr_of_blks = rc;
316
317 /* This will fail for the first file. Catch the error by checking the
318 * position. */
319 tape_mtop(device, MTBSF, 1);
320
321 rc = tape_mtop(device, MTTELL, 1);
322 if (rc < 0)
323 return rc;
324
325 if (rc > nr_of_blks)
326 return -EINVAL;
327
328 DBF_LH(3, "Image file starts at %d\n", rc);
329 device->bof = rc;
330 nr_of_blks -= rc;
331
332 pr_info("%s: The size of the recorded area is %i blocks\n",
333 dev_name(&device->cdev->dev), nr_of_blks);
334 set_capacity(device->blk_data.disk,
335 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
336
337 device->blk_data.block_position = 0;
338 device->blk_data.medium_changed = 0;
339 return 0;
340}
341
342static unsigned int
343tapeblock_check_events(struct gendisk *disk, unsigned int clearing)
344{
345 struct tape_device *device;
346
347 device = (struct tape_device *) disk->private_data;
348 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
349 device, device->blk_data.medium_changed);
350
351 return device->blk_data.medium_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
352}
353
354/*
355 * Block frontend tape device open function.
356 */
357static int
358tapeblock_open(struct block_device *bdev, fmode_t mode)
359{
360 struct gendisk * disk = bdev->bd_disk;
361 struct tape_device * device;
362 int rc;
363
364 mutex_lock(&tape_block_mutex);
365 device = tape_get_device(disk->private_data);
366
367 if (device->required_tapemarks) {
368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
369 pr_warning("%s: Opening the tape failed because of missing "
370 "end-of-file marks\n", dev_name(&device->cdev->dev));
371 rc = -EPERM;
372 goto put_device;
373 }
374
375 rc = tape_open(device);
376 if (rc)
377 goto put_device;
378
379 rc = tapeblock_revalidate_disk(disk);
380 if (rc)
381 goto release;
382
383 /*
384 * Note: The reference to <device> is hold until the release function
385 * is called.
386 */
387 tape_state_set(device, TS_BLKUSE);
388 mutex_unlock(&tape_block_mutex);
389 return 0;
390
391release:
392 tape_release(device);
393 put_device:
394 tape_put_device(device);
395 mutex_unlock(&tape_block_mutex);
396 return rc;
397}
398
399/*
400 * Block frontend tape device release function.
401 *
402 * Note: One reference to the tape device was made by the open function. So
403 * we just get the pointer here and release the reference.
404 */
405static int
406tapeblock_release(struct gendisk *disk, fmode_t mode)
407{
408 struct tape_device *device = disk->private_data;
409
410 mutex_lock(&tape_block_mutex);
411 tape_state_set(device, TS_IN_USE);
412 tape_release(device);
413 tape_put_device(device);
414 mutex_unlock(&tape_block_mutex);
415
416 return 0;
417}
418
419/*
420 * Initialize block device frontend.
421 */
422int
423tapeblock_init(void)
424{
425 int rc;
426
427 /* Register the tape major number to the kernel */
428 rc = register_blkdev(tapeblock_major, "tBLK");
429 if (rc < 0)
430 return rc;
431
432 if (tapeblock_major == 0)
433 tapeblock_major = rc;
434 return 0;
435}
436
437/*
438 * Deregister major for block device frontend
439 */
440void
441tapeblock_exit(void)
442{
443 unregister_blkdev(tapeblock_major, "tBLK");
444}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 3c3f342149ec..e7650170274a 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -564,7 +564,6 @@ int
564tape_std_mtreten(struct tape_device *device, int mt_count) 564tape_std_mtreten(struct tape_device *device, int mt_count)
565{ 565{
566 struct tape_request *request; 566 struct tape_request *request;
567 int rc;
568 567
569 request = tape_alloc_request(4, 0); 568 request = tape_alloc_request(4, 0);
570 if (IS_ERR(request)) 569 if (IS_ERR(request))
@@ -576,7 +575,7 @@ tape_std_mtreten(struct tape_device *device, int mt_count)
576 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); 575 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
577 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); 576 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
578 /* execute it, MTRETEN rc gets ignored */ 577 /* execute it, MTRETEN rc gets ignored */
579 rc = tape_do_io_interruptible(device, request); 578 tape_do_io_interruptible(device, request);
580 tape_free_request(request); 579 tape_free_request(request);
581 return tape_mtop(device, MTREW, 1); 580 return tape_mtop(device, MTREW, 1);
582} 581}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 0689fcf23a11..75c3f1f8fd43 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -326,6 +326,36 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
326 s390_process_res_acc(&link); 326 s390_process_res_acc(&link);
327} 327}
328 328
329static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
330{
331 struct channel_path *chp;
332 struct chp_id chpid;
333 u8 *data;
334 int num;
335
336 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
337 if (sei_area->rs != 0)
338 return;
339 data = sei_area->ccdf;
340 chp_id_init(&chpid);
341 for (num = 0; num <= __MAX_CHPID; num++) {
342 if (!chp_test_bit(data, num))
343 continue;
344 chpid.id = num;
345
346 CIO_CRW_EVENT(4, "Update information for channel path "
347 "%x.%02x\n", chpid.cssid, chpid.id);
348 chp = chpid_to_chp(chpid);
349 if (!chp) {
350 chp_new(chpid);
351 continue;
352 }
353 mutex_lock(&chp->lock);
354 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
355 mutex_unlock(&chp->lock);
356 }
357}
358
329struct chp_config_data { 359struct chp_config_data {
330 u8 map[32]; 360 u8 map[32];
331 u8 op; 361 u8 op;
@@ -376,9 +406,12 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
376 case 1: /* link incident*/ 406 case 1: /* link incident*/
377 chsc_process_sei_link_incident(sei_area); 407 chsc_process_sei_link_incident(sei_area);
378 break; 408 break;
379 case 2: /* i/o resource accessibiliy */ 409 case 2: /* i/o resource accessibility */
380 chsc_process_sei_res_acc(sei_area); 410 chsc_process_sei_res_acc(sei_area);
381 break; 411 break;
412 case 7: /* channel-path-availability information */
413 chsc_process_sei_chp_avail(sei_area);
414 break;
382 case 8: /* channel-path-configuration notification */ 415 case 8: /* channel-path-configuration notification */
383 chsc_process_sei_chp_config(sei_area); 416 chsc_process_sei_chp_config(sei_area);
384 break; 417 break;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 6084103672b5..52c233fa2b12 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -408,9 +408,10 @@ ccw_device_done(struct ccw_device *cdev, int state)
408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
409 "%04x\n", cdev->private->dev_id.devno, 409 "%04x\n", cdev->private->dev_id.devno,
410 sch->schid.sch_no); 410 sch->schid.sch_no);
411 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 411 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
412 cdev->private->state = DEV_STATE_NOT_OPER;
412 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 413 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
413 else 414 } else
414 ccw_device_set_disconnected(cdev); 415 ccw_device_set_disconnected(cdev);
415 cdev->private->flags.donotify = 0; 416 cdev->private->flags.donotify = 0;
416 break; 417 break;
@@ -840,9 +841,6 @@ call_handler:
840static void 841static void
841ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 842ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
842{ 843{
843 struct subchannel *sch;
844
845 sch = to_subchannel(cdev->dev.parent);
846 ccw_device_set_timeout(cdev, 0); 844 ccw_device_set_timeout(cdev, 0);
847 /* Start delayed path verification. */ 845 /* Start delayed path verification. */
848 ccw_device_online_verify(cdev, 0); 846 ccw_device_online_verify(cdev, 0);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 651976b54af8..f98698d5735e 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -418,12 +418,9 @@ int ccw_device_resume(struct ccw_device *cdev)
418int 418int
419ccw_device_call_handler(struct ccw_device *cdev) 419ccw_device_call_handler(struct ccw_device *cdev)
420{ 420{
421 struct subchannel *sch;
422 unsigned int stctl; 421 unsigned int stctl;
423 int ending_status; 422 int ending_status;
424 423
425 sch = to_subchannel(cdev->dev.parent);
426
427 /* 424 /*
428 * we allow for the device action handler if . 425 * we allow for the device action handler if .
429 * - we received ending status 426 * - we received ending status
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e8f267eb8887..55e8f721e38a 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1446,7 +1446,7 @@ set:
1446static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1446static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1447 int bufnr, int count) 1447 int bufnr, int count)
1448{ 1448{
1449 unsigned char state; 1449 unsigned char state = 0;
1450 int used, rc = 0; 1450 int used, rc = 0;
1451 1451
1452 qperf_inc(q, outbound_call); 1452 qperf_inc(q, outbound_call);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 67302b944ab3..16e4a25596e7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1183,8 +1183,12 @@ static void ap_scan_bus(struct work_struct *unused)
1183 INIT_LIST_HEAD(&ap_dev->list); 1183 INIT_LIST_HEAD(&ap_dev->list);
1184 setup_timer(&ap_dev->timeout, ap_request_timeout, 1184 setup_timer(&ap_dev->timeout, ap_request_timeout,
1185 (unsigned long) ap_dev); 1185 (unsigned long) ap_dev);
1186 if (device_type == 0) 1186 if (device_type == 0) {
1187 ap_probe_device_type(ap_dev); 1187 if (ap_probe_device_type(ap_dev)) {
1188 kfree(ap_dev);
1189 continue;
1190 }
1191 }
1188 else 1192 else
1189 ap_dev->device_type = device_type; 1193 ap_dev->device_type = device_type;
1190 1194
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index e77dd02eccdd..7d1609fa233c 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -202,7 +202,7 @@ static int aha152x_resume(struct pcmcia_device *link)
202 return 0; 202 return 0;
203} 203}
204 204
205static struct pcmcia_device_id aha152x_ids[] = { 205static const struct pcmcia_device_id aha152x_ids[] = {
206 PCMCIA_DEVICE_PROD_ID123("New Media", "SCSI", "Bus Toaster", 0xcdf7e4cc, 0x35f26476, 0xa8851d6e), 206 PCMCIA_DEVICE_PROD_ID123("New Media", "SCSI", "Bus Toaster", 0xcdf7e4cc, 0x35f26476, 0xa8851d6e),
207 PCMCIA_DEVICE_PROD_ID123("NOTEWORTHY", "SCSI", "Bus Toaster", 0xad89c6e8, 0x35f26476, 0xa8851d6e), 207 PCMCIA_DEVICE_PROD_ID123("NOTEWORTHY", "SCSI", "Bus Toaster", 0xad89c6e8, 0x35f26476, 0xa8851d6e),
208 PCMCIA_DEVICE_PROD_ID12("Adaptec, Inc.", "APA-1460 SCSI Host Adapter", 0x24ba9738, 0x3a3c3d20), 208 PCMCIA_DEVICE_PROD_ID12("Adaptec, Inc.", "APA-1460 SCSI Host Adapter", 0x24ba9738, 0x3a3c3d20),
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index cd69c2670f81..714b248f5d5e 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -178,7 +178,7 @@ static int fdomain_resume(struct pcmcia_device *link)
178 return 0; 178 return 0;
179} 179}
180 180
181static struct pcmcia_device_id fdomain_ids[] = { 181static const struct pcmcia_device_id fdomain_ids[] = {
182 PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88, 0x859cad20), 182 PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88, 0x859cad20),
183 PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e), 183 PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e),
184 PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation", "SCSI PCMCIA Credit Card Controller", 0x182bdafe, 0xc80d106f), 184 PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation", "SCSI PCMCIA Credit Card Controller", 0x182bdafe, 0xc80d106f),
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 54bdf6d85c6d..ca86721a71b9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1752,7 +1752,7 @@ static int nsp_cs_resume(struct pcmcia_device *link)
1752/*======================================================================* 1752/*======================================================================*
1753 * module entry point 1753 * module entry point
1754 *====================================================================*/ 1754 *====================================================================*/
1755static struct pcmcia_device_id nsp_cs_ids[] = { 1755static const struct pcmcia_device_id nsp_cs_ids[] = {
1756 PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a), 1756 PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a),
1757 PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a), 1757 PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a),
1758 PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a), 1758 PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a),
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 9c96ca889ec9..bcaf89fe0c9e 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -270,7 +270,7 @@ static int qlogic_resume(struct pcmcia_device *link)
270 return 0; 270 return 0;
271} 271}
272 272
273static struct pcmcia_device_id qlogic_ids[] = { 273static const struct pcmcia_device_id qlogic_ids[] = {
274 PCMCIA_DEVICE_PROD_ID12("EIger Labs", "PCMCIA-to-SCSI Adapter", 0x88395fa7, 0x33b7a5e6), 274 PCMCIA_DEVICE_PROD_ID12("EIger Labs", "PCMCIA-to-SCSI Adapter", 0x88395fa7, 0x33b7a5e6),
275 PCMCIA_DEVICE_PROD_ID12("EPSON", "SCSI-2 PC Card SC200", 0xd361772f, 0x299d1751), 275 PCMCIA_DEVICE_PROD_ID12("EPSON", "SCSI-2 PC Card SC200", 0xd361772f, 0x299d1751),
276 PCMCIA_DEVICE_PROD_ID12("MACNICA", "MIRACLE SCSI-II mPS110", 0x20841b68, 0xab3c3b6d), 276 PCMCIA_DEVICE_PROD_ID12("MACNICA", "MIRACLE SCSI-II mPS110", 0x20841b68, 0xab3c3b6d),
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 8552296edaa1..f5b52731abd9 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -865,7 +865,7 @@ MODULE_AUTHOR("Bob Tracy <rct@frus.com>");
865MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver"); 865MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver");
866MODULE_LICENSE("GPL"); 866MODULE_LICENSE("GPL");
867 867
868static struct pcmcia_device_id sym53c500_ids[] = { 868static const struct pcmcia_device_id sym53c500_ids[] = {
869 PCMCIA_DEVICE_PROD_ID12("BASICS by New Media Corporation", "SCSI Sym53C500", 0x23c78a9d, 0x0099e7f7), 869 PCMCIA_DEVICE_PROD_ID12("BASICS by New Media Corporation", "SCSI Sym53C500", 0x23c78a9d, 0x0099e7f7),
870 PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "SCSI Bus Toaster Sym53C500", 0x085a850b, 0x45432eb8), 870 PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "SCSI Bus Toaster Sym53C500", 0x085a850b, 0x45432eb8),
871 PCMCIA_DEVICE_PROD_ID2("SCSI9000", 0x21648f44), 871 PCMCIA_DEVICE_PROD_ID2("SCSI9000", 0x21648f44),
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
index 8856bcca9d29..ae2cd1c1fda8 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/coldfire_qspi.c
@@ -33,6 +33,7 @@
33#include <linux/spi/spi.h> 33#include <linux/spi/spi.h>
34 34
35#include <asm/coldfire.h> 35#include <asm/coldfire.h>
36#include <asm/mcfsim.h>
36#include <asm/mcfqspi.h> 37#include <asm/mcfqspi.h>
37 38
38#define DRIVER_NAME "mcfqspi" 39#define DRIVER_NAME "mcfqspi"
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index bb93685d8b93..8a1b8a7fa15f 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -772,7 +772,7 @@ static int das16cs_pcmcia_resume(struct pcmcia_device *link)
772 772
773/*====================================================================*/ 773/*====================================================================*/
774 774
775static struct pcmcia_device_id das16cs_id_table[] = { 775static const struct pcmcia_device_id das16cs_id_table[] = {
776 PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x0039), 776 PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x0039),
777 PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x4009), 777 PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x4009),
778 PCMCIA_DEVICE_NULL 778 PCMCIA_DEVICE_NULL
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 0b32a2df7768..6d91d3028178 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -219,7 +219,7 @@ static int das08_pcmcia_resume(struct pcmcia_device *link)
219 219
220/*====================================================================*/ 220/*====================================================================*/
221 221
222static struct pcmcia_device_id das08_cs_id_table[] = { 222static const struct pcmcia_device_id das08_cs_id_table[] = {
223 PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x4001), 223 PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x4001),
224 PCMCIA_DEVICE_NULL 224 PCMCIA_DEVICE_NULL
225}; 225};
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 6b7372eed90d..2672629e9ff9 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -552,7 +552,7 @@ static int dio700_cs_resume(struct pcmcia_device *link)
552 552
553/*====================================================================*/ 553/*====================================================================*/
554 554
555static struct pcmcia_device_id dio700_cs_ids[] = { 555static const struct pcmcia_device_id dio700_cs_ids[] = {
556 /* N.B. These IDs should match those in dio700_boards */ 556 /* N.B. These IDs should match those in dio700_boards */
557 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x4743), /* daqcard-700 */ 557 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x4743), /* daqcard-700 */
558 PCMCIA_DEVICE_NULL 558 PCMCIA_DEVICE_NULL
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index c9c28584db67..49b824c7bd2e 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -304,7 +304,7 @@ static int dio24_cs_resume(struct pcmcia_device *link)
304 304
305/*====================================================================*/ 305/*====================================================================*/
306 306
307static struct pcmcia_device_id dio24_cs_ids[] = { 307static const struct pcmcia_device_id dio24_cs_ids[] = {
308 /* N.B. These IDs should match those in dio24_boards */ 308 /* N.B. These IDs should match those in dio24_boards */
309 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x475c), /* daqcard-dio24 */ 309 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x475c), /* daqcard-dio24 */
310 PCMCIA_DEVICE_NULL 310 PCMCIA_DEVICE_NULL
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 6facbc8bf776..832a5178b638 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -267,7 +267,7 @@ static int labpc_cs_resume(struct pcmcia_device *link)
267 return 0; 267 return 0;
268} /* labpc_cs_resume */ 268} /* labpc_cs_resume */
269 269
270static struct pcmcia_device_id labpc_cs_ids[] = { 270static const struct pcmcia_device_id labpc_cs_ids[] = {
271 /* N.B. These IDs should match those in labpc_cs_boards (ni_labpc.c) */ 271 /* N.B. These IDs should match those in labpc_cs_boards (ni_labpc.c) */
272 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0103), /* daqcard-1200 */ 272 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0103), /* daqcard-1200 */
273 PCMCIA_DEVICE_NULL 273 PCMCIA_DEVICE_NULL
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 49563273f605..53ec24bb6dce 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -416,7 +416,7 @@ static int ni_getboardtype(struct comedi_device *dev,
416 416
417#ifdef MODULE 417#ifdef MODULE
418 418
419static struct pcmcia_device_id ni_mio_cs_ids[] = { 419static const struct pcmcia_device_id ni_mio_cs_ids[] = {
420 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010d), /* DAQCard-ai-16xe-50 */ 420 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010d), /* DAQCard-ai-16xe-50 */
421 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010c), /* DAQCard-ai-16e-4 */ 421 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010c), /* DAQCard-ai-16e-4 */
422 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x02c4), /* DAQCard-6062E */ 422 PCMCIA_DEVICE_MANF_CARD(0x010b, 0x02c4), /* DAQCard-6062E */
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 82942e5728a5..e0bb73445dd8 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -1087,7 +1087,7 @@ static int daqp_cs_resume(struct pcmcia_device *link)
1087 1087
1088#ifdef MODULE 1088#ifdef MODULE
1089 1089
1090static struct pcmcia_device_id daqp_cs_id_table[] = { 1090static const struct pcmcia_device_id daqp_cs_id_table[] = {
1091 PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0027), 1091 PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0027),
1092 PCMCIA_DEVICE_NULL 1092 PCMCIA_DEVICE_NULL
1093}; 1093};
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
index 10af47700efb..68ea035635f4 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
@@ -284,7 +284,7 @@ static int ft1000_resume(struct pcmcia_device *link)
284 284
285/*====================================================================*/ 285/*====================================================================*/
286 286
287static struct pcmcia_device_id ft1000_ids[] = { 287static const struct pcmcia_device_id ft1000_ids[] = {
288 PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x0100), 288 PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x0100),
289 PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1000), 289 PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1000),
290 PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1300), 290 PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1300),
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 6555891e149c..a3a727c3b40f 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -378,7 +378,7 @@ int wl_adapter_close(struct net_device *dev)
378} /* wl_adapter_close */ 378} /* wl_adapter_close */
379/*============================================================================*/ 379/*============================================================================*/
380 380
381static struct pcmcia_device_id wl_adapter_ids[] = { 381static const struct pcmcia_device_id wl_adapter_ids[] = {
382#if !((HCF_TYPE) & HCF_TYPE_HII5) 382#if !((HCF_TYPE) & HCF_TYPE_HII5)
383 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0003), 383 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0003),
384 PCMCIA_DEVICE_PROD_ID12("Agere Systems", "Wireless PC Card Model 0110", 384 PCMCIA_DEVICE_PROD_ID12("Agere Systems", "Wireless PC Card Model 0110",
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index d005b9eeebbc..05032e2cc954 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -157,7 +157,7 @@ static void ixj_cs_release(struct pcmcia_device *link)
157 pcmcia_disable_device(link); 157 pcmcia_disable_device(link);
158} 158}
159 159
160static struct pcmcia_device_id ixj_ids[] = { 160static const struct pcmcia_device_id ixj_ids[] = {
161 PCMCIA_DEVICE_MANF_CARD(0x0257, 0x0600), 161 PCMCIA_DEVICE_MANF_CARD(0x0257, 0x0600),
162 PCMCIA_DEVICE_NULL 162 PCMCIA_DEVICE_NULL
163}; 163};
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
index 444155a305ae..655c7948261c 100644
--- a/drivers/tty/ipwireless/main.c
+++ b/drivers/tty/ipwireless/main.c
@@ -33,7 +33,7 @@
33#include <pcmcia/ss.h> 33#include <pcmcia/ss.h>
34#include <pcmcia/ds.h> 34#include <pcmcia/ds.h>
35 35
36static struct pcmcia_device_id ipw_ids[] = { 36static const struct pcmcia_device_id ipw_ids[] = {
37 PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100), 37 PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100),
38 PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200), 38 PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200),
39 PCMCIA_DEVICE_NULL 39 PCMCIA_DEVICE_NULL
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 1ef4df9bf7e4..eef736ff810a 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -670,7 +670,7 @@ failed:
670 return -ENODEV; 670 return -ENODEV;
671} 671}
672 672
673static struct pcmcia_device_id serial_ids[] = { 673static const struct pcmcia_device_id serial_ids[] = {
674 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0057, 0x0021), 674 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0057, 0x0021),
675 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0089, 0x110a), 675 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0089, 0x110a),
676 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0104, 0x000a), 676 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0104, 0x000a),
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 3775c035a6c5..3b6f50eaec91 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -187,7 +187,7 @@ static int sl811_cs_probe(struct pcmcia_device *link)
187 return sl811_cs_config(link); 187 return sl811_cs_config(link);
188} 188}
189 189
190static struct pcmcia_device_id sl811_ids[] = { 190static const struct pcmcia_device_id sl811_ids[] = {
191 PCMCIA_DEVICE_MANF_CARD(0xc015, 0x0001), /* RATOC USB HOST CF+ Card */ 191 PCMCIA_DEVICE_MANF_CARD(0xc015, 0x0001), /* RATOC USB HOST CF+ Card */
192 PCMCIA_DEVICE_NULL, 192 PCMCIA_DEVICE_NULL,
193}; 193};
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 397d3057d336..1bffbe0ed778 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -820,6 +820,8 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
820 int res; 820 int res;
821 char buf[16]; 821 char buf[16];
822 822
823 memset(&bprm, 0, sizeof(bprm));
824
823 /* Create the file name */ 825 /* Create the file name */
824 sprintf(buf, "/lib/lib%d.so", id); 826 sprintf(buf, "/lib/lib%d.so", id);
825 827
@@ -835,6 +837,12 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
835 if (!bprm.cred) 837 if (!bprm.cred)
836 goto out; 838 goto out;
837 839
840 /* We don't really care about recalculating credentials at this point
841 * as we're past the point of no return and are dealing with shared
842 * libraries.
843 */
844 bprm.cred_prepared = 1;
845
838 res = prepare_binprm(&bprm); 846 res = prepare_binprm(&bprm);
839 847
840 if (!IS_ERR_VALUE(res)) 848 if (!IS_ERR_VALUE(res))
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 0d329ff8ed4c..9b026ea8baa9 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -100,6 +100,7 @@ struct dlm_cluster {
100 unsigned int cl_log_debug; 100 unsigned int cl_log_debug;
101 unsigned int cl_protocol; 101 unsigned int cl_protocol;
102 unsigned int cl_timewarn_cs; 102 unsigned int cl_timewarn_cs;
103 unsigned int cl_waitwarn_us;
103}; 104};
104 105
105enum { 106enum {
@@ -114,6 +115,7 @@ enum {
114 CLUSTER_ATTR_LOG_DEBUG, 115 CLUSTER_ATTR_LOG_DEBUG,
115 CLUSTER_ATTR_PROTOCOL, 116 CLUSTER_ATTR_PROTOCOL,
116 CLUSTER_ATTR_TIMEWARN_CS, 117 CLUSTER_ATTR_TIMEWARN_CS,
118 CLUSTER_ATTR_WAITWARN_US,
117}; 119};
118 120
119struct cluster_attribute { 121struct cluster_attribute {
@@ -166,6 +168,7 @@ CLUSTER_ATTR(scan_secs, 1);
166CLUSTER_ATTR(log_debug, 0); 168CLUSTER_ATTR(log_debug, 0);
167CLUSTER_ATTR(protocol, 0); 169CLUSTER_ATTR(protocol, 0);
168CLUSTER_ATTR(timewarn_cs, 1); 170CLUSTER_ATTR(timewarn_cs, 1);
171CLUSTER_ATTR(waitwarn_us, 0);
169 172
170static struct configfs_attribute *cluster_attrs[] = { 173static struct configfs_attribute *cluster_attrs[] = {
171 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, 174 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
@@ -179,6 +182,7 @@ static struct configfs_attribute *cluster_attrs[] = {
179 [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr, 182 [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr,
180 [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr, 183 [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr,
181 [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr, 184 [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr,
185 [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr,
182 NULL, 186 NULL,
183}; 187};
184 188
@@ -439,6 +443,7 @@ static struct config_group *make_cluster(struct config_group *g,
439 cl->cl_log_debug = dlm_config.ci_log_debug; 443 cl->cl_log_debug = dlm_config.ci_log_debug;
440 cl->cl_protocol = dlm_config.ci_protocol; 444 cl->cl_protocol = dlm_config.ci_protocol;
441 cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs; 445 cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
446 cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
442 447
443 space_list = &sps->ss_group; 448 space_list = &sps->ss_group;
444 comm_list = &cms->cs_group; 449 comm_list = &cms->cs_group;
@@ -986,6 +991,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
986#define DEFAULT_LOG_DEBUG 0 991#define DEFAULT_LOG_DEBUG 0
987#define DEFAULT_PROTOCOL 0 992#define DEFAULT_PROTOCOL 0
988#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */ 993#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
994#define DEFAULT_WAITWARN_US 0
989 995
990struct dlm_config_info dlm_config = { 996struct dlm_config_info dlm_config = {
991 .ci_tcp_port = DEFAULT_TCP_PORT, 997 .ci_tcp_port = DEFAULT_TCP_PORT,
@@ -998,6 +1004,7 @@ struct dlm_config_info dlm_config = {
998 .ci_scan_secs = DEFAULT_SCAN_SECS, 1004 .ci_scan_secs = DEFAULT_SCAN_SECS,
999 .ci_log_debug = DEFAULT_LOG_DEBUG, 1005 .ci_log_debug = DEFAULT_LOG_DEBUG,
1000 .ci_protocol = DEFAULT_PROTOCOL, 1006 .ci_protocol = DEFAULT_PROTOCOL,
1001 .ci_timewarn_cs = DEFAULT_TIMEWARN_CS 1007 .ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
1008 .ci_waitwarn_us = DEFAULT_WAITWARN_US
1002}; 1009};
1003 1010
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 4f1d6fce58c5..dd0ce24d5a80 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -28,6 +28,7 @@ struct dlm_config_info {
28 int ci_log_debug; 28 int ci_log_debug;
29 int ci_protocol; 29 int ci_protocol;
30 int ci_timewarn_cs; 30 int ci_timewarn_cs;
31 int ci_waitwarn_us;
31}; 32};
32 33
33extern struct dlm_config_info dlm_config; 34extern struct dlm_config_info dlm_config;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index b94204913011..0262451eb9c6 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -209,6 +209,7 @@ struct dlm_args {
209#define DLM_IFL_WATCH_TIMEWARN 0x00400000 209#define DLM_IFL_WATCH_TIMEWARN 0x00400000
210#define DLM_IFL_TIMEOUT_CANCEL 0x00800000 210#define DLM_IFL_TIMEOUT_CANCEL 0x00800000
211#define DLM_IFL_DEADLOCK_CANCEL 0x01000000 211#define DLM_IFL_DEADLOCK_CANCEL 0x01000000
212#define DLM_IFL_STUB_MS 0x02000000 /* magic number for m_flags */
212#define DLM_IFL_USER 0x00000001 213#define DLM_IFL_USER 0x00000001
213#define DLM_IFL_ORPHAN 0x00000002 214#define DLM_IFL_ORPHAN 0x00000002
214 215
@@ -245,6 +246,7 @@ struct dlm_lkb {
245 246
246 int8_t lkb_wait_type; /* type of reply waiting for */ 247 int8_t lkb_wait_type; /* type of reply waiting for */
247 int8_t lkb_wait_count; 248 int8_t lkb_wait_count;
249 int lkb_wait_nodeid; /* for debugging */
248 250
249 struct list_head lkb_idtbl_list; /* lockspace lkbtbl */ 251 struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
250 struct list_head lkb_statequeue; /* rsb g/c/w list */ 252 struct list_head lkb_statequeue; /* rsb g/c/w list */
@@ -254,6 +256,7 @@ struct dlm_lkb {
254 struct list_head lkb_ownqueue; /* list of locks for a process */ 256 struct list_head lkb_ownqueue; /* list of locks for a process */
255 struct list_head lkb_time_list; 257 struct list_head lkb_time_list;
256 ktime_t lkb_timestamp; 258 ktime_t lkb_timestamp;
259 ktime_t lkb_wait_time;
257 unsigned long lkb_timeout_cs; 260 unsigned long lkb_timeout_cs;
258 261
259 struct dlm_callback lkb_callbacks[DLM_CALLBACKS_SIZE]; 262 struct dlm_callback lkb_callbacks[DLM_CALLBACKS_SIZE];
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 56d6bfcc1e48..f71d0b5abd95 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -799,10 +799,84 @@ static int msg_reply_type(int mstype)
799 return -1; 799 return -1;
800} 800}
801 801
802static int nodeid_warned(int nodeid, int num_nodes, int *warned)
803{
804 int i;
805
806 for (i = 0; i < num_nodes; i++) {
807 if (!warned[i]) {
808 warned[i] = nodeid;
809 return 0;
810 }
811 if (warned[i] == nodeid)
812 return 1;
813 }
814 return 0;
815}
816
817void dlm_scan_waiters(struct dlm_ls *ls)
818{
819 struct dlm_lkb *lkb;
820 ktime_t zero = ktime_set(0, 0);
821 s64 us;
822 s64 debug_maxus = 0;
823 u32 debug_scanned = 0;
824 u32 debug_expired = 0;
825 int num_nodes = 0;
826 int *warned = NULL;
827
828 if (!dlm_config.ci_waitwarn_us)
829 return;
830
831 mutex_lock(&ls->ls_waiters_mutex);
832
833 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
834 if (ktime_equal(lkb->lkb_wait_time, zero))
835 continue;
836
837 debug_scanned++;
838
839 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
840
841 if (us < dlm_config.ci_waitwarn_us)
842 continue;
843
844 lkb->lkb_wait_time = zero;
845
846 debug_expired++;
847 if (us > debug_maxus)
848 debug_maxus = us;
849
850 if (!num_nodes) {
851 num_nodes = ls->ls_num_nodes;
852 warned = kmalloc(GFP_KERNEL, num_nodes * sizeof(int));
853 if (warned)
854 memset(warned, 0, num_nodes * sizeof(int));
855 }
856 if (!warned)
857 continue;
858 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
859 continue;
860
861 log_error(ls, "waitwarn %x %lld %d us check connection to "
862 "node %d", lkb->lkb_id, (long long)us,
863 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
864 }
865 mutex_unlock(&ls->ls_waiters_mutex);
866
867 if (warned)
868 kfree(warned);
869
870 if (debug_expired)
871 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
872 debug_scanned, debug_expired,
873 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
874}
875
802/* add/remove lkb from global waiters list of lkb's waiting for 876/* add/remove lkb from global waiters list of lkb's waiting for
803 a reply from a remote node */ 877 a reply from a remote node */
804 878
805static int add_to_waiters(struct dlm_lkb *lkb, int mstype) 879static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
806{ 880{
807 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 881 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
808 int error = 0; 882 int error = 0;
@@ -842,6 +916,8 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
842 916
843 lkb->lkb_wait_count++; 917 lkb->lkb_wait_count++;
844 lkb->lkb_wait_type = mstype; 918 lkb->lkb_wait_type = mstype;
919 lkb->lkb_wait_time = ktime_get();
920 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
845 hold_lkb(lkb); 921 hold_lkb(lkb);
846 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); 922 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
847 out: 923 out:
@@ -961,10 +1037,10 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
961 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 1037 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
962 int error; 1038 int error;
963 1039
964 if (ms != &ls->ls_stub_ms) 1040 if (ms->m_flags != DLM_IFL_STUB_MS)
965 mutex_lock(&ls->ls_waiters_mutex); 1041 mutex_lock(&ls->ls_waiters_mutex);
966 error = _remove_from_waiters(lkb, ms->m_type, ms); 1042 error = _remove_from_waiters(lkb, ms->m_type, ms);
967 if (ms != &ls->ls_stub_ms) 1043 if (ms->m_flags != DLM_IFL_STUB_MS)
968 mutex_unlock(&ls->ls_waiters_mutex); 1044 mutex_unlock(&ls->ls_waiters_mutex);
969 return error; 1045 return error;
970} 1046}
@@ -1157,6 +1233,16 @@ void dlm_adjust_timeouts(struct dlm_ls *ls)
1157 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) 1233 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1158 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us); 1234 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1159 mutex_unlock(&ls->ls_timeout_mutex); 1235 mutex_unlock(&ls->ls_timeout_mutex);
1236
1237 if (!dlm_config.ci_waitwarn_us)
1238 return;
1239
1240 mutex_lock(&ls->ls_waiters_mutex);
1241 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1242 if (ktime_to_us(lkb->lkb_wait_time))
1243 lkb->lkb_wait_time = ktime_get();
1244 }
1245 mutex_unlock(&ls->ls_waiters_mutex);
1160} 1246}
1161 1247
1162/* lkb is master or local copy */ 1248/* lkb is master or local copy */
@@ -1376,14 +1462,8 @@ static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1376 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become 1462 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1377 compatible with other granted locks */ 1463 compatible with other granted locks */
1378 1464
1379static void munge_demoted(struct dlm_lkb *lkb, struct dlm_message *ms) 1465static void munge_demoted(struct dlm_lkb *lkb)
1380{ 1466{
1381 if (ms->m_type != DLM_MSG_CONVERT_REPLY) {
1382 log_print("munge_demoted %x invalid reply type %d",
1383 lkb->lkb_id, ms->m_type);
1384 return;
1385 }
1386
1387 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) { 1467 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1388 log_print("munge_demoted %x invalid modes gr %d rq %d", 1468 log_print("munge_demoted %x invalid modes gr %d rq %d",
1389 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode); 1469 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
@@ -2844,12 +2924,12 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2844 struct dlm_mhandle *mh; 2924 struct dlm_mhandle *mh;
2845 int to_nodeid, error; 2925 int to_nodeid, error;
2846 2926
2847 error = add_to_waiters(lkb, mstype); 2927 to_nodeid = r->res_nodeid;
2928
2929 error = add_to_waiters(lkb, mstype, to_nodeid);
2848 if (error) 2930 if (error)
2849 return error; 2931 return error;
2850 2932
2851 to_nodeid = r->res_nodeid;
2852
2853 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh); 2933 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2854 if (error) 2934 if (error)
2855 goto fail; 2935 goto fail;
@@ -2880,9 +2960,9 @@ static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2880 /* down conversions go without a reply from the master */ 2960 /* down conversions go without a reply from the master */
2881 if (!error && down_conversion(lkb)) { 2961 if (!error && down_conversion(lkb)) {
2882 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY); 2962 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2963 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
2883 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY; 2964 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
2884 r->res_ls->ls_stub_ms.m_result = 0; 2965 r->res_ls->ls_stub_ms.m_result = 0;
2885 r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
2886 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms); 2966 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2887 } 2967 }
2888 2968
@@ -2951,12 +3031,12 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2951 struct dlm_mhandle *mh; 3031 struct dlm_mhandle *mh;
2952 int to_nodeid, error; 3032 int to_nodeid, error;
2953 3033
2954 error = add_to_waiters(lkb, DLM_MSG_LOOKUP); 3034 to_nodeid = dlm_dir_nodeid(r);
3035
3036 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
2955 if (error) 3037 if (error)
2956 return error; 3038 return error;
2957 3039
2958 to_nodeid = dlm_dir_nodeid(r);
2959
2960 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh); 3040 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2961 if (error) 3041 if (error)
2962 goto fail; 3042 goto fail;
@@ -3070,6 +3150,9 @@ static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3070 3150
3071static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms) 3151static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3072{ 3152{
3153 if (ms->m_flags == DLM_IFL_STUB_MS)
3154 return;
3155
3073 lkb->lkb_sbflags = ms->m_sbflags; 3156 lkb->lkb_sbflags = ms->m_sbflags;
3074 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | 3157 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3075 (ms->m_flags & 0x0000FFFF); 3158 (ms->m_flags & 0x0000FFFF);
@@ -3612,7 +3695,7 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3612 /* convert was queued on remote master */ 3695 /* convert was queued on remote master */
3613 receive_flags_reply(lkb, ms); 3696 receive_flags_reply(lkb, ms);
3614 if (is_demoted(lkb)) 3697 if (is_demoted(lkb))
3615 munge_demoted(lkb, ms); 3698 munge_demoted(lkb);
3616 del_lkb(r, lkb); 3699 del_lkb(r, lkb);
3617 add_lkb(r, lkb, DLM_LKSTS_CONVERT); 3700 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3618 add_timeout(lkb); 3701 add_timeout(lkb);
@@ -3622,7 +3705,7 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3622 /* convert was granted on remote master */ 3705 /* convert was granted on remote master */
3623 receive_flags_reply(lkb, ms); 3706 receive_flags_reply(lkb, ms);
3624 if (is_demoted(lkb)) 3707 if (is_demoted(lkb))
3625 munge_demoted(lkb, ms); 3708 munge_demoted(lkb);
3626 grant_lock_pc(r, lkb, ms); 3709 grant_lock_pc(r, lkb, ms);
3627 queue_cast(r, lkb, 0); 3710 queue_cast(r, lkb, 0);
3628 break; 3711 break;
@@ -3996,15 +4079,17 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
3996 dlm_put_lockspace(ls); 4079 dlm_put_lockspace(ls);
3997} 4080}
3998 4081
3999static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb) 4082static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
4083 struct dlm_message *ms_stub)
4000{ 4084{
4001 if (middle_conversion(lkb)) { 4085 if (middle_conversion(lkb)) {
4002 hold_lkb(lkb); 4086 hold_lkb(lkb);
4003 ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY; 4087 memset(ms_stub, 0, sizeof(struct dlm_message));
4004 ls->ls_stub_ms.m_result = -EINPROGRESS; 4088 ms_stub->m_flags = DLM_IFL_STUB_MS;
4005 ls->ls_stub_ms.m_flags = lkb->lkb_flags; 4089 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
4006 ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid; 4090 ms_stub->m_result = -EINPROGRESS;
4007 _receive_convert_reply(lkb, &ls->ls_stub_ms); 4091 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4092 _receive_convert_reply(lkb, ms_stub);
4008 4093
4009 /* Same special case as in receive_rcom_lock_args() */ 4094 /* Same special case as in receive_rcom_lock_args() */
4010 lkb->lkb_grmode = DLM_LOCK_IV; 4095 lkb->lkb_grmode = DLM_LOCK_IV;
@@ -4045,13 +4130,27 @@ static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
4045void dlm_recover_waiters_pre(struct dlm_ls *ls) 4130void dlm_recover_waiters_pre(struct dlm_ls *ls)
4046{ 4131{
4047 struct dlm_lkb *lkb, *safe; 4132 struct dlm_lkb *lkb, *safe;
4133 struct dlm_message *ms_stub;
4048 int wait_type, stub_unlock_result, stub_cancel_result; 4134 int wait_type, stub_unlock_result, stub_cancel_result;
4049 4135
4136 ms_stub = kmalloc(GFP_KERNEL, sizeof(struct dlm_message));
4137 if (!ms_stub) {
4138 log_error(ls, "dlm_recover_waiters_pre no mem");
4139 return;
4140 }
4141
4050 mutex_lock(&ls->ls_waiters_mutex); 4142 mutex_lock(&ls->ls_waiters_mutex);
4051 4143
4052 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { 4144 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
4053 log_debug(ls, "pre recover waiter lkid %x type %d flags %x", 4145
4054 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags); 4146 /* exclude debug messages about unlocks because there can be so
4147 many and they aren't very interesting */
4148
4149 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
4150 log_debug(ls, "recover_waiter %x nodeid %d "
4151 "msg %d to %d", lkb->lkb_id, lkb->lkb_nodeid,
4152 lkb->lkb_wait_type, lkb->lkb_wait_nodeid);
4153 }
4055 4154
4056 /* all outstanding lookups, regardless of destination will be 4155 /* all outstanding lookups, regardless of destination will be
4057 resent after recovery is done */ 4156 resent after recovery is done */
@@ -4097,26 +4196,28 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
4097 break; 4196 break;
4098 4197
4099 case DLM_MSG_CONVERT: 4198 case DLM_MSG_CONVERT:
4100 recover_convert_waiter(ls, lkb); 4199 recover_convert_waiter(ls, lkb, ms_stub);
4101 break; 4200 break;
4102 4201
4103 case DLM_MSG_UNLOCK: 4202 case DLM_MSG_UNLOCK:
4104 hold_lkb(lkb); 4203 hold_lkb(lkb);
4105 ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY; 4204 memset(ms_stub, 0, sizeof(struct dlm_message));
4106 ls->ls_stub_ms.m_result = stub_unlock_result; 4205 ms_stub->m_flags = DLM_IFL_STUB_MS;
4107 ls->ls_stub_ms.m_flags = lkb->lkb_flags; 4206 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
4108 ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid; 4207 ms_stub->m_result = stub_unlock_result;
4109 _receive_unlock_reply(lkb, &ls->ls_stub_ms); 4208 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4209 _receive_unlock_reply(lkb, ms_stub);
4110 dlm_put_lkb(lkb); 4210 dlm_put_lkb(lkb);
4111 break; 4211 break;
4112 4212
4113 case DLM_MSG_CANCEL: 4213 case DLM_MSG_CANCEL:
4114 hold_lkb(lkb); 4214 hold_lkb(lkb);
4115 ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY; 4215 memset(ms_stub, 0, sizeof(struct dlm_message));
4116 ls->ls_stub_ms.m_result = stub_cancel_result; 4216 ms_stub->m_flags = DLM_IFL_STUB_MS;
4117 ls->ls_stub_ms.m_flags = lkb->lkb_flags; 4217 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
4118 ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid; 4218 ms_stub->m_result = stub_cancel_result;
4119 _receive_cancel_reply(lkb, &ls->ls_stub_ms); 4219 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4220 _receive_cancel_reply(lkb, ms_stub);
4120 dlm_put_lkb(lkb); 4221 dlm_put_lkb(lkb);
4121 break; 4222 break;
4122 4223
@@ -4127,6 +4228,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
4127 schedule(); 4228 schedule();
4128 } 4229 }
4129 mutex_unlock(&ls->ls_waiters_mutex); 4230 mutex_unlock(&ls->ls_waiters_mutex);
4231 kfree(ms_stub);
4130} 4232}
4131 4233
4132static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) 4234static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
@@ -4191,8 +4293,8 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
4191 ou = is_overlap_unlock(lkb); 4293 ou = is_overlap_unlock(lkb);
4192 err = 0; 4294 err = 0;
4193 4295
4194 log_debug(ls, "recover_waiters_post %x type %d flags %x %s", 4296 log_debug(ls, "recover_waiter %x nodeid %d msg %d r_nodeid %d",
4195 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name); 4297 lkb->lkb_id, lkb->lkb_nodeid, mstype, r->res_nodeid);
4196 4298
4197 /* At this point we assume that we won't get a reply to any 4299 /* At this point we assume that we won't get a reply to any
4198 previous op or overlap op on this lock. First, do a big 4300 previous op or overlap op on this lock. First, do a big
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 88e93c80cc22..265017a7c3e7 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -24,6 +24,7 @@ int dlm_put_lkb(struct dlm_lkb *lkb);
24void dlm_scan_rsbs(struct dlm_ls *ls); 24void dlm_scan_rsbs(struct dlm_ls *ls);
25int dlm_lock_recovery_try(struct dlm_ls *ls); 25int dlm_lock_recovery_try(struct dlm_ls *ls);
26void dlm_unlock_recovery(struct dlm_ls *ls); 26void dlm_unlock_recovery(struct dlm_ls *ls);
27void dlm_scan_waiters(struct dlm_ls *ls);
27void dlm_scan_timeout(struct dlm_ls *ls); 28void dlm_scan_timeout(struct dlm_ls *ls);
28void dlm_adjust_timeouts(struct dlm_ls *ls); 29void dlm_adjust_timeouts(struct dlm_ls *ls);
29 30
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f994a7dfda85..14cbf4099753 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -243,7 +243,6 @@ static struct dlm_ls *find_ls_to_scan(void)
243static int dlm_scand(void *data) 243static int dlm_scand(void *data)
244{ 244{
245 struct dlm_ls *ls; 245 struct dlm_ls *ls;
246 int timeout_jiffies = dlm_config.ci_scan_secs * HZ;
247 246
248 while (!kthread_should_stop()) { 247 while (!kthread_should_stop()) {
249 ls = find_ls_to_scan(); 248 ls = find_ls_to_scan();
@@ -252,13 +251,14 @@ static int dlm_scand(void *data)
252 ls->ls_scan_time = jiffies; 251 ls->ls_scan_time = jiffies;
253 dlm_scan_rsbs(ls); 252 dlm_scan_rsbs(ls);
254 dlm_scan_timeout(ls); 253 dlm_scan_timeout(ls);
254 dlm_scan_waiters(ls);
255 dlm_unlock_recovery(ls); 255 dlm_unlock_recovery(ls);
256 } else { 256 } else {
257 ls->ls_scan_time += HZ; 257 ls->ls_scan_time += HZ;
258 } 258 }
259 } else { 259 continue;
260 schedule_timeout_interruptible(timeout_jiffies);
261 } 260 }
261 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
262 } 262 }
263 return 0; 263 return 0;
264} 264}
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 30d8b85febbf..e2b878004364 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -71,6 +71,36 @@ static void send_op(struct plock_op *op)
71 wake_up(&send_wq); 71 wake_up(&send_wq);
72} 72}
73 73
74/* If a process was killed while waiting for the only plock on a file,
75 locks_remove_posix will not see any lock on the file so it won't
76 send an unlock-close to us to pass on to userspace to clean up the
77 abandoned waiter. So, we have to insert the unlock-close when the
78 lock call is interrupted. */
79
80static void do_unlock_close(struct dlm_ls *ls, u64 number,
81 struct file *file, struct file_lock *fl)
82{
83 struct plock_op *op;
84
85 op = kzalloc(sizeof(*op), GFP_NOFS);
86 if (!op)
87 return;
88
89 op->info.optype = DLM_PLOCK_OP_UNLOCK;
90 op->info.pid = fl->fl_pid;
91 op->info.fsid = ls->ls_global_id;
92 op->info.number = number;
93 op->info.start = 0;
94 op->info.end = OFFSET_MAX;
95 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
96 op->info.owner = (__u64) fl->fl_pid;
97 else
98 op->info.owner = (__u64)(long) fl->fl_owner;
99
100 op->info.flags |= DLM_PLOCK_FL_CLOSE;
101 send_op(op);
102}
103
74int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, 104int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
75 int cmd, struct file_lock *fl) 105 int cmd, struct file_lock *fl)
76{ 106{
@@ -114,9 +144,19 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
114 144
115 send_op(op); 145 send_op(op);
116 146
117 if (xop->callback == NULL) 147 if (xop->callback == NULL) {
118 wait_event(recv_wq, (op->done != 0)); 148 rv = wait_event_killable(recv_wq, (op->done != 0));
119 else { 149 if (rv == -ERESTARTSYS) {
150 log_debug(ls, "dlm_posix_lock: wait killed %llx",
151 (unsigned long long)number);
152 spin_lock(&ops_lock);
153 list_del(&op->list);
154 spin_unlock(&ops_lock);
155 kfree(xop);
156 do_unlock_close(ls, number, file, fl);
157 goto out;
158 }
159 } else {
120 rv = FILE_LOCK_DEFERRED; 160 rv = FILE_LOCK_DEFERRED;
121 goto out; 161 goto out;
122 } 162 }
@@ -233,6 +273,13 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
233 else 273 else
234 op->info.owner = (__u64)(long) fl->fl_owner; 274 op->info.owner = (__u64)(long) fl->fl_owner;
235 275
276 if (fl->fl_flags & FL_CLOSE) {
277 op->info.flags |= DLM_PLOCK_FL_CLOSE;
278 send_op(op);
279 rv = 0;
280 goto out;
281 }
282
236 send_op(op); 283 send_op(op);
237 wait_event(recv_wq, (op->done != 0)); 284 wait_event(recv_wq, (op->done != 0));
238 285
@@ -334,7 +381,10 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
334 spin_lock(&ops_lock); 381 spin_lock(&ops_lock);
335 if (!list_empty(&send_list)) { 382 if (!list_empty(&send_list)) {
336 op = list_entry(send_list.next, struct plock_op, list); 383 op = list_entry(send_list.next, struct plock_op, list);
337 list_move(&op->list, &recv_list); 384 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
385 list_del(&op->list);
386 else
387 list_move(&op->list, &recv_list);
338 memcpy(&info, &op->info, sizeof(info)); 388 memcpy(&info, &op->info, sizeof(info));
339 } 389 }
340 spin_unlock(&ops_lock); 390 spin_unlock(&ops_lock);
@@ -342,6 +392,13 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
342 if (!op) 392 if (!op)
343 return -EAGAIN; 393 return -EAGAIN;
344 394
395 /* there is no need to get a reply from userspace for unlocks
396 that were generated by the vfs cleaning up for a close
397 (the process did not make an unlock call). */
398
399 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
400 kfree(op);
401
345 if (copy_to_user(u, &info, sizeof(info))) 402 if (copy_to_user(u, &info, sizeof(info)))
346 return -EFAULT; 403 return -EFAULT;
347 return sizeof(info); 404 return sizeof(info);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index d5ab3fe7c198..e96bf3e9be88 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -611,7 +611,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
611 611
612 out_sig: 612 out_sig:
613 sigprocmask(SIG_SETMASK, &tmpsig, NULL); 613 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
614 recalc_sigpending();
615 out_free: 614 out_free:
616 kfree(kbuf); 615 kfree(kbuf);
617 return error; 616 return error;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 0a78dae7e2cb..1dd62ed35b85 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -898,7 +898,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
898 brelse(bh); 898 brelse(bh);
899 899
900 if (!sb_set_blocksize(sb, blocksize)) { 900 if (!sb_set_blocksize(sb, blocksize)) {
901 ext2_msg(sb, KERN_ERR, "error: blocksize is too small"); 901 ext2_msg(sb, KERN_ERR,
902 "error: bad blocksize %d", blocksize);
902 goto failed_sbi; 903 goto failed_sbi;
903 } 904 }
904 905
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 32f3b8695859..34b6d9bfc48a 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1416,10 +1416,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1416 frame->at = entries; 1416 frame->at = entries;
1417 frame->bh = bh; 1417 frame->bh = bh;
1418 bh = bh2; 1418 bh = bh2;
1419 /*
1420 * Mark buffers dirty here so that if do_split() fails we write a
1421 * consistent set of buffers to disk.
1422 */
1423 ext3_journal_dirty_metadata(handle, frame->bh);
1424 ext3_journal_dirty_metadata(handle, bh);
1419 de = do_split(handle,dir, &bh, frame, &hinfo, &retval); 1425 de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1420 dx_release (frames); 1426 if (!de) {
1421 if (!(de)) 1427 ext3_mark_inode_dirty(handle, dir);
1428 dx_release(frames);
1422 return retval; 1429 return retval;
1430 }
1431 dx_release(frames);
1423 1432
1424 return add_dirent_to_buf(handle, dentry, inode, de, bh); 1433 return add_dirent_to_buf(handle, dentry, inode, de, bh);
1425} 1434}
@@ -2189,6 +2198,7 @@ static int ext3_symlink (struct inode * dir,
2189 handle_t *handle; 2198 handle_t *handle;
2190 struct inode * inode; 2199 struct inode * inode;
2191 int l, err, retries = 0; 2200 int l, err, retries = 0;
2201 int credits;
2192 2202
2193 l = strlen(symname)+1; 2203 l = strlen(symname)+1;
2194 if (l > dir->i_sb->s_blocksize) 2204 if (l > dir->i_sb->s_blocksize)
@@ -2196,10 +2206,26 @@ static int ext3_symlink (struct inode * dir,
2196 2206
2197 dquot_initialize(dir); 2207 dquot_initialize(dir);
2198 2208
2209 if (l > EXT3_N_BLOCKS * 4) {
2210 /*
2211 * For non-fast symlinks, we just allocate inode and put it on
2212 * orphan list in the first transaction => we need bitmap,
2213 * group descriptor, sb, inode block, quota blocks.
2214 */
2215 credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
2216 } else {
2217 /*
2218 * Fast symlink. We have to add entry to directory
2219 * (EXT3_DATA_TRANS_BLOCKS + EXT3_INDEX_EXTRA_TRANS_BLOCKS),
2220 * allocate new inode (bitmap, group descriptor, inode block,
2221 * quota blocks, sb is already counted in previous macros).
2222 */
2223 credits = EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
2224 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2225 EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
2226 }
2199retry: 2227retry:
2200 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + 2228 handle = ext3_journal_start(dir, credits);
2201 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
2202 EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
2203 if (IS_ERR(handle)) 2229 if (IS_ERR(handle))
2204 return PTR_ERR(handle); 2230 return PTR_ERR(handle);
2205 2231
@@ -2211,21 +2237,45 @@ retry:
2211 if (IS_ERR(inode)) 2237 if (IS_ERR(inode))
2212 goto out_stop; 2238 goto out_stop;
2213 2239
2214 if (l > sizeof (EXT3_I(inode)->i_data)) { 2240 if (l > EXT3_N_BLOCKS * 4) {
2215 inode->i_op = &ext3_symlink_inode_operations; 2241 inode->i_op = &ext3_symlink_inode_operations;
2216 ext3_set_aops(inode); 2242 ext3_set_aops(inode);
2217 /* 2243 /*
2218 * page_symlink() calls into ext3_prepare/commit_write. 2244 * We cannot call page_symlink() with transaction started
2219 * We have a transaction open. All is sweetness. It also sets 2245 * because it calls into ext3_write_begin() which acquires page
2220 * i_size in generic_commit_write(). 2246 * lock which ranks below transaction start (and it can also
2247 * wait for journal commit if we are running out of space). So
2248 * we have to stop transaction now and restart it when symlink
2249 * contents is written.
2250 *
2251 * To keep fs consistent in case of crash, we have to put inode
2252 * to orphan list in the mean time.
2221 */ 2253 */
2254 drop_nlink(inode);
2255 err = ext3_orphan_add(handle, inode);
2256 ext3_journal_stop(handle);
2257 if (err)
2258 goto err_drop_inode;
2222 err = __page_symlink(inode, symname, l, 1); 2259 err = __page_symlink(inode, symname, l, 1);
2260 if (err)
2261 goto err_drop_inode;
2262 /*
2263 * Now inode is being linked into dir (EXT3_DATA_TRANS_BLOCKS
2264 * + EXT3_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
2265 */
2266 handle = ext3_journal_start(dir,
2267 EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
2268 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
2269 if (IS_ERR(handle)) {
2270 err = PTR_ERR(handle);
2271 goto err_drop_inode;
2272 }
2273 inc_nlink(inode);
2274 err = ext3_orphan_del(handle, inode);
2223 if (err) { 2275 if (err) {
2276 ext3_journal_stop(handle);
2224 drop_nlink(inode); 2277 drop_nlink(inode);
2225 unlock_new_inode(inode); 2278 goto err_drop_inode;
2226 ext3_mark_inode_dirty(handle, inode);
2227 iput (inode);
2228 goto out_stop;
2229 } 2279 }
2230 } else { 2280 } else {
2231 inode->i_op = &ext3_fast_symlink_inode_operations; 2281 inode->i_op = &ext3_fast_symlink_inode_operations;
@@ -2239,6 +2289,10 @@ out_stop:
2239 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) 2289 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
2240 goto retry; 2290 goto retry;
2241 return err; 2291 return err;
2292err_drop_inode:
2293 unlock_new_inode(inode);
2294 iput(inode);
2295 return err;
2242} 2296}
2243 2297
2244static int ext3_link (struct dentry * old_dentry, 2298static int ext3_link (struct dentry * old_dentry,
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 69b180459463..72ffa974b0b8 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -302,12 +302,6 @@ void journal_commit_transaction(journal_t *journal)
302 * all outstanding updates to complete. 302 * all outstanding updates to complete.
303 */ 303 */
304 304
305#ifdef COMMIT_STATS
306 spin_lock(&journal->j_list_lock);
307 summarise_journal_usage(journal);
308 spin_unlock(&journal->j_list_lock);
309#endif
310
311 /* Do we need to erase the effects of a prior journal_flush? */ 305 /* Do we need to erase the effects of a prior journal_flush? */
312 if (journal->j_flags & JFS_FLUSHED) { 306 if (journal->j_flags & JFS_FLUSHED) {
313 jbd_debug(3, "super block updated\n"); 307 jbd_debug(3, "super block updated\n");
@@ -722,8 +716,13 @@ wait_for_iobuf:
722 required. */ 716 required. */
723 JBUFFER_TRACE(jh, "file as BJ_Forget"); 717 JBUFFER_TRACE(jh, "file as BJ_Forget");
724 journal_file_buffer(jh, commit_transaction, BJ_Forget); 718 journal_file_buffer(jh, commit_transaction, BJ_Forget);
725 /* Wake up any transactions which were waiting for this 719 /*
726 IO to complete */ 720 * Wake up any transactions which were waiting for this
721 * IO to complete. The barrier must be here so that changes
722 * by journal_file_buffer() take effect before wake_up_bit()
723 * does the waitqueue check.
724 */
725 smp_mb();
727 wake_up_bit(&bh->b_state, BH_Unshadow); 726 wake_up_bit(&bh->b_state, BH_Unshadow);
728 JBUFFER_TRACE(jh, "brelse shadowed buffer"); 727 JBUFFER_TRACE(jh, "brelse shadowed buffer");
729 __brelse(bh); 728 __brelse(bh);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index b3713afaaa9e..e2d4285fbe90 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -437,9 +437,12 @@ int __log_space_left(journal_t *journal)
437int __log_start_commit(journal_t *journal, tid_t target) 437int __log_start_commit(journal_t *journal, tid_t target)
438{ 438{
439 /* 439 /*
440 * Are we already doing a recent enough commit? 440 * The only transaction we can possibly wait upon is the
441 * currently running transaction (if it exists). Otherwise,
442 * the target tid must be an old one.
441 */ 443 */
442 if (!tid_geq(journal->j_commit_request, target)) { 444 if (journal->j_running_transaction &&
445 journal->j_running_transaction->t_tid == target) {
443 /* 446 /*
444 * We want a new commit: OK, mark the request and wakeup the 447 * We want a new commit: OK, mark the request and wakeup the
445 * commit thread. We do _not_ do the commit ourselves. 448 * commit thread. We do _not_ do the commit ourselves.
@@ -451,7 +454,14 @@ int __log_start_commit(journal_t *journal, tid_t target)
451 journal->j_commit_sequence); 454 journal->j_commit_sequence);
452 wake_up(&journal->j_wait_commit); 455 wake_up(&journal->j_wait_commit);
453 return 1; 456 return 1;
454 } 457 } else if (!tid_geq(journal->j_commit_request, target))
458 /* This should never happen, but if it does, preserve
459 the evidence before kjournald goes into a loop and
460 increments j_commit_sequence beyond all recognition. */
461 WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
462 journal->j_commit_request, journal->j_commit_sequence,
463 target, journal->j_running_transaction ?
464 journal->j_running_transaction->t_tid : 0);
455 return 0; 465 return 0;
456} 466}
457 467
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 60d2319651b2..f7ee81a065da 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -266,7 +266,8 @@ static handle_t *new_handle(int nblocks)
266 * This function is visible to journal users (like ext3fs), so is not 266 * This function is visible to journal users (like ext3fs), so is not
267 * called with the journal already locked. 267 * called with the journal already locked.
268 * 268 *
269 * Return a pointer to a newly allocated handle, or NULL on failure 269 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
270 * on failure.
270 */ 271 */
271handle_t *journal_start(journal_t *journal, int nblocks) 272handle_t *journal_start(journal_t *journal, int nblocks)
272{ 273{
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 6e28000a4b21..29148a81c783 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -338,12 +338,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
338 * all outstanding updates to complete. 338 * all outstanding updates to complete.
339 */ 339 */
340 340
341#ifdef COMMIT_STATS
342 spin_lock(&journal->j_list_lock);
343 summarise_journal_usage(journal);
344 spin_unlock(&journal->j_list_lock);
345#endif
346
347 /* Do we need to erase the effects of a prior jbd2_journal_flush? */ 341 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
348 if (journal->j_flags & JBD2_FLUSHED) { 342 if (journal->j_flags & JBD2_FLUSHED) {
349 jbd_debug(3, "super block updated\n"); 343 jbd_debug(3, "super block updated\n");
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index d8a0313e99e6..f17e58b32989 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -30,6 +30,7 @@ ocfs2-objs := \
30 namei.o \ 30 namei.o \
31 refcounttree.o \ 31 refcounttree.o \
32 reservations.o \ 32 reservations.o \
33 move_extents.o \
33 resize.o \ 34 resize.o \
34 slot_map.o \ 35 slot_map.o \
35 suballoc.o \ 36 suballoc.o \
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 312a28f433a4..bc91072b7219 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -22,6 +22,11 @@
22#include "ioctl.h" 22#include "ioctl.h"
23#include "resize.h" 23#include "resize.h"
24#include "refcounttree.h" 24#include "refcounttree.h"
25#include "sysfile.h"
26#include "dir.h"
27#include "buffer_head_io.h"
28#include "suballoc.h"
29#include "move_extents.h"
25 30
26#include <linux/ext2_fs.h> 31#include <linux/ext2_fs.h>
27 32
@@ -35,31 +40,27 @@
35 * be -EFAULT. The error will be returned from the ioctl(2) call. It's 40 * be -EFAULT. The error will be returned from the ioctl(2) call. It's
36 * just a best-effort to tell userspace that this request caused the error. 41 * just a best-effort to tell userspace that this request caused the error.
37 */ 42 */
38static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq, 43static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
39 struct ocfs2_info_request __user *req) 44 struct ocfs2_info_request __user *req)
40{ 45{
41 kreq->ir_flags |= OCFS2_INFO_FL_ERROR; 46 kreq->ir_flags |= OCFS2_INFO_FL_ERROR;
42 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); 47 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
43} 48}
44 49
45#define o2info_set_request_error(a, b) \ 50static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
46 __o2info_set_request_error((struct ocfs2_info_request *)&(a), b)
47
48static inline void __o2info_set_request_filled(struct ocfs2_info_request *req)
49{ 51{
50 req->ir_flags |= OCFS2_INFO_FL_FILLED; 52 req->ir_flags |= OCFS2_INFO_FL_FILLED;
51} 53}
52 54
53#define o2info_set_request_filled(a) \ 55static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
54 __o2info_set_request_filled((struct ocfs2_info_request *)&(a))
55
56static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req)
57{ 56{
58 req->ir_flags &= ~OCFS2_INFO_FL_FILLED; 57 req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
59} 58}
60 59
61#define o2info_clear_request_filled(a) \ 60static inline int o2info_coherent(struct ocfs2_info_request *req)
62 __o2info_clear_request_filled((struct ocfs2_info_request *)&(a)) 61{
62 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
63}
63 64
64static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) 65static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
65{ 66{
@@ -153,7 +154,7 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
153 154
154 oib.ib_blocksize = inode->i_sb->s_blocksize; 155 oib.ib_blocksize = inode->i_sb->s_blocksize;
155 156
156 o2info_set_request_filled(oib); 157 o2info_set_request_filled(&oib.ib_req);
157 158
158 if (o2info_to_user(oib, req)) 159 if (o2info_to_user(oib, req))
159 goto bail; 160 goto bail;
@@ -161,7 +162,7 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
161 status = 0; 162 status = 0;
162bail: 163bail:
163 if (status) 164 if (status)
164 o2info_set_request_error(oib, req); 165 o2info_set_request_error(&oib.ib_req, req);
165 166
166 return status; 167 return status;
167} 168}
@@ -178,7 +179,7 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
178 179
179 oic.ic_clustersize = osb->s_clustersize; 180 oic.ic_clustersize = osb->s_clustersize;
180 181
181 o2info_set_request_filled(oic); 182 o2info_set_request_filled(&oic.ic_req);
182 183
183 if (o2info_to_user(oic, req)) 184 if (o2info_to_user(oic, req))
184 goto bail; 185 goto bail;
@@ -186,7 +187,7 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
186 status = 0; 187 status = 0;
187bail: 188bail:
188 if (status) 189 if (status)
189 o2info_set_request_error(oic, req); 190 o2info_set_request_error(&oic.ic_req, req);
190 191
191 return status; 192 return status;
192} 193}
@@ -203,7 +204,7 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
203 204
204 oim.im_max_slots = osb->max_slots; 205 oim.im_max_slots = osb->max_slots;
205 206
206 o2info_set_request_filled(oim); 207 o2info_set_request_filled(&oim.im_req);
207 208
208 if (o2info_to_user(oim, req)) 209 if (o2info_to_user(oim, req))
209 goto bail; 210 goto bail;
@@ -211,7 +212,7 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
211 status = 0; 212 status = 0;
212bail: 213bail:
213 if (status) 214 if (status)
214 o2info_set_request_error(oim, req); 215 o2info_set_request_error(&oim.im_req, req);
215 216
216 return status; 217 return status;
217} 218}
@@ -228,7 +229,7 @@ int ocfs2_info_handle_label(struct inode *inode,
228 229
229 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); 230 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
230 231
231 o2info_set_request_filled(oil); 232 o2info_set_request_filled(&oil.il_req);
232 233
233 if (o2info_to_user(oil, req)) 234 if (o2info_to_user(oil, req))
234 goto bail; 235 goto bail;
@@ -236,7 +237,7 @@ int ocfs2_info_handle_label(struct inode *inode,
236 status = 0; 237 status = 0;
237bail: 238bail:
238 if (status) 239 if (status)
239 o2info_set_request_error(oil, req); 240 o2info_set_request_error(&oil.il_req, req);
240 241
241 return status; 242 return status;
242} 243}
@@ -253,7 +254,7 @@ int ocfs2_info_handle_uuid(struct inode *inode,
253 254
254 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); 255 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
255 256
256 o2info_set_request_filled(oiu); 257 o2info_set_request_filled(&oiu.iu_req);
257 258
258 if (o2info_to_user(oiu, req)) 259 if (o2info_to_user(oiu, req))
259 goto bail; 260 goto bail;
@@ -261,7 +262,7 @@ int ocfs2_info_handle_uuid(struct inode *inode,
261 status = 0; 262 status = 0;
262bail: 263bail:
263 if (status) 264 if (status)
264 o2info_set_request_error(oiu, req); 265 o2info_set_request_error(&oiu.iu_req, req);
265 266
266 return status; 267 return status;
267} 268}
@@ -280,7 +281,7 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
280 oif.if_incompat_features = osb->s_feature_incompat; 281 oif.if_incompat_features = osb->s_feature_incompat;
281 oif.if_ro_compat_features = osb->s_feature_ro_compat; 282 oif.if_ro_compat_features = osb->s_feature_ro_compat;
282 283
283 o2info_set_request_filled(oif); 284 o2info_set_request_filled(&oif.if_req);
284 285
285 if (o2info_to_user(oif, req)) 286 if (o2info_to_user(oif, req))
286 goto bail; 287 goto bail;
@@ -288,7 +289,7 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
288 status = 0; 289 status = 0;
289bail: 290bail:
290 if (status) 291 if (status)
291 o2info_set_request_error(oif, req); 292 o2info_set_request_error(&oif.if_req, req);
292 293
293 return status; 294 return status;
294} 295}
@@ -305,7 +306,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
305 306
306 oij.ij_journal_size = osb->journal->j_inode->i_size; 307 oij.ij_journal_size = osb->journal->j_inode->i_size;
307 308
308 o2info_set_request_filled(oij); 309 o2info_set_request_filled(&oij.ij_req);
309 310
310 if (o2info_to_user(oij, req)) 311 if (o2info_to_user(oij, req))
311 goto bail; 312 goto bail;
@@ -313,7 +314,408 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
313 status = 0; 314 status = 0;
314bail: 315bail:
315 if (status) 316 if (status)
316 o2info_set_request_error(oij, req); 317 o2info_set_request_error(&oij.ij_req, req);
318
319 return status;
320}
321
322int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
323 struct inode *inode_alloc, u64 blkno,
324 struct ocfs2_info_freeinode *fi, u32 slot)
325{
326 int status = 0, unlock = 0;
327
328 struct buffer_head *bh = NULL;
329 struct ocfs2_dinode *dinode_alloc = NULL;
330
331 if (inode_alloc)
332 mutex_lock(&inode_alloc->i_mutex);
333
334 if (o2info_coherent(&fi->ifi_req)) {
335 status = ocfs2_inode_lock(inode_alloc, &bh, 0);
336 if (status < 0) {
337 mlog_errno(status);
338 goto bail;
339 }
340 unlock = 1;
341 } else {
342 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
343 if (status < 0) {
344 mlog_errno(status);
345 goto bail;
346 }
347 }
348
349 dinode_alloc = (struct ocfs2_dinode *)bh->b_data;
350
351 fi->ifi_stat[slot].lfi_total =
352 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total);
353 fi->ifi_stat[slot].lfi_free =
354 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) -
355 le32_to_cpu(dinode_alloc->id1.bitmap1.i_used);
356
357bail:
358 if (unlock)
359 ocfs2_inode_unlock(inode_alloc, 0);
360
361 if (inode_alloc)
362 mutex_unlock(&inode_alloc->i_mutex);
363
364 brelse(bh);
365
366 return status;
367}
368
369int ocfs2_info_handle_freeinode(struct inode *inode,
370 struct ocfs2_info_request __user *req)
371{
372 u32 i;
373 u64 blkno = -1;
374 char namebuf[40];
375 int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE;
376 struct ocfs2_info_freeinode *oifi = NULL;
377 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
378 struct inode *inode_alloc = NULL;
379
380 oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL);
381 if (!oifi) {
382 status = -ENOMEM;
383 mlog_errno(status);
384 goto bail;
385 }
386
387 if (o2info_from_user(*oifi, req))
388 goto bail;
389
390 oifi->ifi_slotnum = osb->max_slots;
391
392 for (i = 0; i < oifi->ifi_slotnum; i++) {
393 if (o2info_coherent(&oifi->ifi_req)) {
394 inode_alloc = ocfs2_get_system_file_inode(osb, type, i);
395 if (!inode_alloc) {
396 mlog(ML_ERROR, "unable to get alloc inode in "
397 "slot %u\n", i);
398 status = -EIO;
399 goto bail;
400 }
401 } else {
402 ocfs2_sprintf_system_inode_name(namebuf,
403 sizeof(namebuf),
404 type, i);
405 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
406 namebuf,
407 strlen(namebuf),
408 &blkno);
409 if (status < 0) {
410 status = -ENOENT;
411 goto bail;
412 }
413 }
414
415 status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i);
416 if (status < 0)
417 goto bail;
418
419 iput(inode_alloc);
420 inode_alloc = NULL;
421 }
422
423 o2info_set_request_filled(&oifi->ifi_req);
424
425 if (o2info_to_user(*oifi, req))
426 goto bail;
427
428 status = 0;
429bail:
430 if (status)
431 o2info_set_request_error(&oifi->ifi_req, req);
432
433 kfree(oifi);
434
435 return status;
436}
437
438static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
439 unsigned int chunksize)
440{
441 int index;
442
443 index = __ilog2_u32(chunksize);
444 if (index >= OCFS2_INFO_MAX_HIST)
445 index = OCFS2_INFO_MAX_HIST - 1;
446
447 hist->fc_chunks[index]++;
448 hist->fc_clusters[index] += chunksize;
449}
450
451static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats,
452 unsigned int chunksize)
453{
454 if (chunksize > stats->ffs_max)
455 stats->ffs_max = chunksize;
456
457 if (chunksize < stats->ffs_min)
458 stats->ffs_min = chunksize;
459
460 stats->ffs_avg += chunksize;
461 stats->ffs_free_chunks_real++;
462}
463
464void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg,
465 unsigned int chunksize)
466{
467 o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize);
468 o2ffg_update_stats(&(ffg->iff_ffs), chunksize);
469}
470
471int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb,
472 struct inode *gb_inode,
473 struct ocfs2_dinode *gb_dinode,
474 struct ocfs2_chain_rec *rec,
475 struct ocfs2_info_freefrag *ffg,
476 u32 chunks_in_group)
477{
478 int status = 0, used;
479 u64 blkno;
480
481 struct buffer_head *bh = NULL;
482 struct ocfs2_group_desc *bg = NULL;
483
484 unsigned int max_bits, num_clusters;
485 unsigned int offset = 0, cluster, chunk;
486 unsigned int chunk_free, last_chunksize = 0;
487
488 if (!le32_to_cpu(rec->c_free))
489 goto bail;
490
491 do {
492 if (!bg)
493 blkno = le64_to_cpu(rec->c_blkno);
494 else
495 blkno = le64_to_cpu(bg->bg_next_group);
496
497 if (bh) {
498 brelse(bh);
499 bh = NULL;
500 }
501
502 if (o2info_coherent(&ffg->iff_req))
503 status = ocfs2_read_group_descriptor(gb_inode,
504 gb_dinode,
505 blkno, &bh);
506 else
507 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
508
509 if (status < 0) {
510 mlog(ML_ERROR, "Can't read the group descriptor # "
511 "%llu from device.", (unsigned long long)blkno);
512 status = -EIO;
513 goto bail;
514 }
515
516 bg = (struct ocfs2_group_desc *)bh->b_data;
517
518 if (!le16_to_cpu(bg->bg_free_bits_count))
519 continue;
520
521 max_bits = le16_to_cpu(bg->bg_bits);
522 offset = 0;
523
524 for (chunk = 0; chunk < chunks_in_group; chunk++) {
525 /*
526 * last chunk may be not an entire one.
527 */
528 if ((offset + ffg->iff_chunksize) > max_bits)
529 num_clusters = max_bits - offset;
530 else
531 num_clusters = ffg->iff_chunksize;
532
533 chunk_free = 0;
534 for (cluster = 0; cluster < num_clusters; cluster++) {
535 used = ocfs2_test_bit(offset,
536 (unsigned long *)bg->bg_bitmap);
537 /*
538 * - chunk_free counts free clusters in #N chunk.
539 * - last_chunksize records the size(in) clusters
540 * for the last real free chunk being counted.
541 */
542 if (!used) {
543 last_chunksize++;
544 chunk_free++;
545 }
546
547 if (used && last_chunksize) {
548 ocfs2_info_update_ffg(ffg,
549 last_chunksize);
550 last_chunksize = 0;
551 }
552
553 offset++;
554 }
555
556 if (chunk_free == ffg->iff_chunksize)
557 ffg->iff_ffs.ffs_free_chunks++;
558 }
559
560 /*
561 * need to update the info for last free chunk.
562 */
563 if (last_chunksize)
564 ocfs2_info_update_ffg(ffg, last_chunksize);
565
566 } while (le64_to_cpu(bg->bg_next_group));
567
568bail:
569 brelse(bh);
570
571 return status;
572}
573
574int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb,
575 struct inode *gb_inode, u64 blkno,
576 struct ocfs2_info_freefrag *ffg)
577{
578 u32 chunks_in_group;
579 int status = 0, unlock = 0, i;
580
581 struct buffer_head *bh = NULL;
582 struct ocfs2_chain_list *cl = NULL;
583 struct ocfs2_chain_rec *rec = NULL;
584 struct ocfs2_dinode *gb_dinode = NULL;
585
586 if (gb_inode)
587 mutex_lock(&gb_inode->i_mutex);
588
589 if (o2info_coherent(&ffg->iff_req)) {
590 status = ocfs2_inode_lock(gb_inode, &bh, 0);
591 if (status < 0) {
592 mlog_errno(status);
593 goto bail;
594 }
595 unlock = 1;
596 } else {
597 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
598 if (status < 0) {
599 mlog_errno(status);
600 goto bail;
601 }
602 }
603
604 gb_dinode = (struct ocfs2_dinode *)bh->b_data;
605 cl = &(gb_dinode->id2.i_chain);
606
607 /*
608 * Chunksize(in) clusters from userspace should be
609 * less than clusters in a group.
610 */
611 if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
612 status = -EINVAL;
613 goto bail;
614 }
615
616 memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats));
617
618 ffg->iff_ffs.ffs_min = ~0U;
619 ffg->iff_ffs.ffs_clusters =
620 le32_to_cpu(gb_dinode->id1.bitmap1.i_total);
621 ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters -
622 le32_to_cpu(gb_dinode->id1.bitmap1.i_used);
623
624 chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
625
626 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
627 rec = &(cl->cl_recs[i]);
628 status = ocfs2_info_freefrag_scan_chain(osb, gb_inode,
629 gb_dinode,
630 rec, ffg,
631 chunks_in_group);
632 if (status)
633 goto bail;
634 }
635
636 if (ffg->iff_ffs.ffs_free_chunks_real)
637 ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg /
638 ffg->iff_ffs.ffs_free_chunks_real);
639bail:
640 if (unlock)
641 ocfs2_inode_unlock(gb_inode, 0);
642
643 if (gb_inode)
644 mutex_unlock(&gb_inode->i_mutex);
645
646 if (gb_inode)
647 iput(gb_inode);
648
649 brelse(bh);
650
651 return status;
652}
653
654int ocfs2_info_handle_freefrag(struct inode *inode,
655 struct ocfs2_info_request __user *req)
656{
657 u64 blkno = -1;
658 char namebuf[40];
659 int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE;
660
661 struct ocfs2_info_freefrag *oiff;
662 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
663 struct inode *gb_inode = NULL;
664
665 oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL);
666 if (!oiff) {
667 status = -ENOMEM;
668 mlog_errno(status);
669 goto bail;
670 }
671
672 if (o2info_from_user(*oiff, req))
673 goto bail;
674 /*
675 * chunksize from userspace should be power of 2.
676 */
677 if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) ||
678 (!oiff->iff_chunksize)) {
679 status = -EINVAL;
680 goto bail;
681 }
682
683 if (o2info_coherent(&oiff->iff_req)) {
684 gb_inode = ocfs2_get_system_file_inode(osb, type,
685 OCFS2_INVALID_SLOT);
686 if (!gb_inode) {
687 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
688 status = -EIO;
689 goto bail;
690 }
691 } else {
692 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type,
693 OCFS2_INVALID_SLOT);
694 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
695 namebuf,
696 strlen(namebuf),
697 &blkno);
698 if (status < 0) {
699 status = -ENOENT;
700 goto bail;
701 }
702 }
703
704 status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff);
705 if (status < 0)
706 goto bail;
707
708 o2info_set_request_filled(&oiff->iff_req);
709
710 if (o2info_to_user(*oiff, req))
711 goto bail;
712
713 status = 0;
714bail:
715 if (status)
716 o2info_set_request_error(&oiff->iff_req, req);
717
718 kfree(oiff);
317 719
318 return status; 720 return status;
319} 721}
@@ -327,7 +729,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
327 if (o2info_from_user(oir, req)) 729 if (o2info_from_user(oir, req))
328 goto bail; 730 goto bail;
329 731
330 o2info_clear_request_filled(oir); 732 o2info_clear_request_filled(&oir);
331 733
332 if (o2info_to_user(oir, req)) 734 if (o2info_to_user(oir, req))
333 goto bail; 735 goto bail;
@@ -335,7 +737,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
335 status = 0; 737 status = 0;
336bail: 738bail:
337 if (status) 739 if (status)
338 o2info_set_request_error(oir, req); 740 o2info_set_request_error(&oir, req);
339 741
340 return status; 742 return status;
341} 743}
@@ -389,6 +791,14 @@ int ocfs2_info_handle_request(struct inode *inode,
389 if (oir.ir_size == sizeof(struct ocfs2_info_journal_size)) 791 if (oir.ir_size == sizeof(struct ocfs2_info_journal_size))
390 status = ocfs2_info_handle_journal_size(inode, req); 792 status = ocfs2_info_handle_journal_size(inode, req);
391 break; 793 break;
794 case OCFS2_INFO_FREEINODE:
795 if (oir.ir_size == sizeof(struct ocfs2_info_freeinode))
796 status = ocfs2_info_handle_freeinode(inode, req);
797 break;
798 case OCFS2_INFO_FREEFRAG:
799 if (oir.ir_size == sizeof(struct ocfs2_info_freefrag))
800 status = ocfs2_info_handle_freefrag(inode, req);
801 break;
392 default: 802 default:
393 status = ocfs2_info_handle_unknown(inode, req); 803 status = ocfs2_info_handle_unknown(inode, req);
394 break; 804 break;
@@ -565,6 +975,8 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
565 975
566 return 0; 976 return 0;
567 } 977 }
978 case OCFS2_IOC_MOVE_EXT:
979 return ocfs2_ioctl_move_extents(filp, (void __user *)arg);
568 default: 980 default:
569 return -ENOTTY; 981 return -ENOTTY;
570 } 982 }
@@ -608,6 +1020,8 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
608 return -EFAULT; 1020 return -EFAULT;
609 1021
610 return ocfs2_info_handle(inode, &info, 1); 1022 return ocfs2_info_handle(inode, &info, 1);
1023 case OCFS2_IOC_MOVE_EXT:
1024 break;
611 default: 1025 default:
612 return -ENOIOCTLCMD; 1026 return -ENOIOCTLCMD;
613 } 1027 }
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
new file mode 100644
index 000000000000..4c5488468c14
--- /dev/null
+++ b/fs/ocfs2/move_extents.c
@@ -0,0 +1,1153 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * move_extents.c
5 *
6 * Copyright (C) 2011 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <linux/fs.h>
18#include <linux/types.h>
19#include <linux/mount.h>
20#include <linux/swap.h>
21
22#include <cluster/masklog.h>
23
24#include "ocfs2.h"
25#include "ocfs2_ioctl.h"
26
27#include "alloc.h"
28#include "aops.h"
29#include "dlmglue.h"
30#include "extent_map.h"
31#include "inode.h"
32#include "journal.h"
33#include "suballoc.h"
34#include "uptodate.h"
35#include "super.h"
36#include "dir.h"
37#include "buffer_head_io.h"
38#include "sysfile.h"
39#include "suballoc.h"
40#include "refcounttree.h"
41#include "move_extents.h"
42
43struct ocfs2_move_extents_context {
44 struct inode *inode;
45 struct file *file;
46 int auto_defrag;
47 int partial;
48 int credits;
49 u32 new_phys_cpos;
50 u32 clusters_moved;
51 u64 refcount_loc;
52 struct ocfs2_move_extents *range;
53 struct ocfs2_extent_tree et;
54 struct ocfs2_alloc_context *meta_ac;
55 struct ocfs2_alloc_context *data_ac;
56 struct ocfs2_cached_dealloc_ctxt dealloc;
57};
58
59static int __ocfs2_move_extent(handle_t *handle,
60 struct ocfs2_move_extents_context *context,
61 u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
62 int ext_flags)
63{
64 int ret = 0, index;
65 struct inode *inode = context->inode;
66 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
67 struct ocfs2_extent_rec *rec, replace_rec;
68 struct ocfs2_path *path = NULL;
69 struct ocfs2_extent_list *el;
70 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
71 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
72
73 ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
74 p_cpos, new_p_cpos, len);
75 if (ret) {
76 mlog_errno(ret);
77 goto out;
78 }
79
80 memset(&replace_rec, 0, sizeof(replace_rec));
81 replace_rec.e_cpos = cpu_to_le32(cpos);
82 replace_rec.e_leaf_clusters = cpu_to_le16(len);
83 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
84 new_p_cpos));
85
86 path = ocfs2_new_path_from_et(&context->et);
87 if (!path) {
88 ret = -ENOMEM;
89 mlog_errno(ret);
90 goto out;
91 }
92
93 ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
94 if (ret) {
95 mlog_errno(ret);
96 goto out;
97 }
98
99 el = path_leaf_el(path);
100
101 index = ocfs2_search_extent_list(el, cpos);
102 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
103 ocfs2_error(inode->i_sb,
104 "Inode %llu has an extent at cpos %u which can no "
105 "longer be found.\n",
106 (unsigned long long)ino, cpos);
107 ret = -EROFS;
108 goto out;
109 }
110
111 rec = &el->l_recs[index];
112
113 BUG_ON(ext_flags != rec->e_flags);
114 /*
115 * after moving/defraging to new location, the extent is not going
116 * to be refcounted anymore.
117 */
118 replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
119
120 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
121 context->et.et_root_bh,
122 OCFS2_JOURNAL_ACCESS_WRITE);
123 if (ret) {
124 mlog_errno(ret);
125 goto out;
126 }
127
128 ret = ocfs2_split_extent(handle, &context->et, path, index,
129 &replace_rec, context->meta_ac,
130 &context->dealloc);
131 if (ret) {
132 mlog_errno(ret);
133 goto out;
134 }
135
136 ocfs2_journal_dirty(handle, context->et.et_root_bh);
137
138 context->new_phys_cpos = new_p_cpos;
139
140 /*
141 * need I to append truncate log for old clusters?
142 */
143 if (old_blkno) {
144 if (ext_flags & OCFS2_EXT_REFCOUNTED)
145 ret = ocfs2_decrease_refcount(inode, handle,
146 ocfs2_blocks_to_clusters(osb->sb,
147 old_blkno),
148 len, context->meta_ac,
149 &context->dealloc, 1);
150 else
151 ret = ocfs2_truncate_log_append(osb, handle,
152 old_blkno, len);
153 }
154
155out:
156 return ret;
157}
158
159/*
160 * lock allocators, and reserving appropriate number of bits for
161 * meta blocks and data clusters.
162 *
163 * in some cases, we don't need to reserve clusters, just let data_ac
164 * be NULL.
165 */
166static int ocfs2_lock_allocators_move_extents(struct inode *inode,
167 struct ocfs2_extent_tree *et,
168 u32 clusters_to_move,
169 u32 extents_to_split,
170 struct ocfs2_alloc_context **meta_ac,
171 struct ocfs2_alloc_context **data_ac,
172 int extra_blocks,
173 int *credits)
174{
175 int ret, num_free_extents;
176 unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
177 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
178
179 num_free_extents = ocfs2_num_free_extents(osb, et);
180 if (num_free_extents < 0) {
181 ret = num_free_extents;
182 mlog_errno(ret);
183 goto out;
184 }
185
186 if (!num_free_extents ||
187 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
188 extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
189
190 ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
191 if (ret) {
192 mlog_errno(ret);
193 goto out;
194 }
195
196 if (data_ac) {
197 ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
198 if (ret) {
199 mlog_errno(ret);
200 goto out;
201 }
202 }
203
204 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el,
205 clusters_to_move + 2);
206
207 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
208 extra_blocks, clusters_to_move, *credits);
209out:
210 if (ret) {
211 if (*meta_ac) {
212 ocfs2_free_alloc_context(*meta_ac);
213 *meta_ac = NULL;
214 }
215 }
216
217 return ret;
218}
219
220/*
221 * Using one journal handle to guarantee the data consistency in case
222 * crash happens anywhere.
223 *
224 * XXX: defrag can end up with finishing partial extent as requested,
225 * due to not enough contiguous clusters can be found in allocator.
226 */
227static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
228 u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
229{
230 int ret, credits = 0, extra_blocks = 0, partial = context->partial;
231 handle_t *handle;
232 struct inode *inode = context->inode;
233 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
234 struct inode *tl_inode = osb->osb_tl_inode;
235 struct ocfs2_refcount_tree *ref_tree = NULL;
236 u32 new_phys_cpos, new_len;
237 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
238
239 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
240
241 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
242 OCFS2_HAS_REFCOUNT_FL));
243
244 BUG_ON(!context->refcount_loc);
245
246 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
247 &ref_tree, NULL);
248 if (ret) {
249 mlog_errno(ret);
250 return ret;
251 }
252
253 ret = ocfs2_prepare_refcount_change_for_del(inode,
254 context->refcount_loc,
255 phys_blkno,
256 *len,
257 &credits,
258 &extra_blocks);
259 if (ret) {
260 mlog_errno(ret);
261 goto out;
262 }
263 }
264
265 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
266 &context->meta_ac,
267 &context->data_ac,
268 extra_blocks, &credits);
269 if (ret) {
270 mlog_errno(ret);
271 goto out;
272 }
273
274 /*
275 * should be using allocation reservation strategy there?
276 *
277 * if (context->data_ac)
278 * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
279 */
280
281 mutex_lock(&tl_inode->i_mutex);
282
283 if (ocfs2_truncate_log_needs_flush(osb)) {
284 ret = __ocfs2_flush_truncate_log(osb);
285 if (ret < 0) {
286 mlog_errno(ret);
287 goto out_unlock_mutex;
288 }
289 }
290
291 handle = ocfs2_start_trans(osb, credits);
292 if (IS_ERR(handle)) {
293 ret = PTR_ERR(handle);
294 mlog_errno(ret);
295 goto out_unlock_mutex;
296 }
297
298 ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
299 &new_phys_cpos, &new_len);
300 if (ret) {
301 mlog_errno(ret);
302 goto out_commit;
303 }
304
305 /*
306 * allowing partial extent moving is kind of 'pros and cons', it makes
307 * whole defragmentation less likely to fail, on the contrary, the bad
308 * thing is it may make the fs even more fragmented after moving, let
309 * userspace make a good decision here.
310 */
311 if (new_len != *len) {
312 mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
313 if (!partial) {
314 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
315 ret = -ENOSPC;
316 goto out_commit;
317 }
318 }
319
320 mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
321 phys_cpos, new_phys_cpos);
322
323 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
324 new_phys_cpos, ext_flags);
325 if (ret)
326 mlog_errno(ret);
327
328 if (partial && (new_len != *len))
329 *len = new_len;
330
331 /*
332 * Here we should write the new page out first if we are
333 * in write-back mode.
334 */
335 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
336 if (ret)
337 mlog_errno(ret);
338
339out_commit:
340 ocfs2_commit_trans(osb, handle);
341
342out_unlock_mutex:
343 mutex_unlock(&tl_inode->i_mutex);
344
345 if (context->data_ac) {
346 ocfs2_free_alloc_context(context->data_ac);
347 context->data_ac = NULL;
348 }
349
350 if (context->meta_ac) {
351 ocfs2_free_alloc_context(context->meta_ac);
352 context->meta_ac = NULL;
353 }
354
355out:
356 if (ref_tree)
357 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
358
359 return ret;
360}
361
362/*
363 * find the victim alloc group, where #blkno fits.
364 */
365static int ocfs2_find_victim_alloc_group(struct inode *inode,
366 u64 vict_blkno,
367 int type, int slot,
368 int *vict_bit,
369 struct buffer_head **ret_bh)
370{
371 int ret, i, blocks_per_unit = 1;
372 u64 blkno;
373 char namebuf[40];
374
375 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
376 struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
377 struct ocfs2_chain_list *cl;
378 struct ocfs2_chain_rec *rec;
379 struct ocfs2_dinode *ac_dinode;
380 struct ocfs2_group_desc *bg;
381
382 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
383 ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
384 strlen(namebuf), &blkno);
385 if (ret) {
386 ret = -ENOENT;
387 goto out;
388 }
389
390 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
391 if (ret) {
392 mlog_errno(ret);
393 goto out;
394 }
395
396 ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
397 cl = &(ac_dinode->id2.i_chain);
398 rec = &(cl->cl_recs[0]);
399
400 if (type == GLOBAL_BITMAP_SYSTEM_INODE)
401 blocks_per_unit <<= (osb->s_clustersize_bits -
402 inode->i_sb->s_blocksize_bits);
403 /*
404 * 'vict_blkno' was out of the valid range.
405 */
406 if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
407 (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) *
408 blocks_per_unit))) {
409 ret = -EINVAL;
410 goto out;
411 }
412
413 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
414
415 rec = &(cl->cl_recs[i]);
416 if (!rec)
417 continue;
418
419 bg = NULL;
420
421 do {
422 if (!bg)
423 blkno = le64_to_cpu(rec->c_blkno);
424 else
425 blkno = le64_to_cpu(bg->bg_next_group);
426
427 if (gd_bh) {
428 brelse(gd_bh);
429 gd_bh = NULL;
430 }
431
432 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
433 if (ret) {
434 mlog_errno(ret);
435 goto out;
436 }
437
438 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
439
440 if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
441 le16_to_cpu(bg->bg_bits))) {
442
443 *ret_bh = gd_bh;
444 *vict_bit = (vict_blkno - blkno) /
445 blocks_per_unit;
446 mlog(0, "find the victim group: #%llu, "
447 "total_bits: %u, vict_bit: %u\n",
448 blkno, le16_to_cpu(bg->bg_bits),
449 *vict_bit);
450 goto out;
451 }
452
453 } while (le64_to_cpu(bg->bg_next_group));
454 }
455
456 ret = -EINVAL;
457out:
458 brelse(ac_bh);
459
460 /*
461 * caller has to release the gd_bh properly.
462 */
463 return ret;
464}
465
466/*
467 * XXX: helper to validate and adjust moving goal.
468 */
469static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
470 struct ocfs2_move_extents *range)
471{
472 int ret, goal_bit = 0;
473
474 struct buffer_head *gd_bh = NULL;
475 struct ocfs2_group_desc *bg;
476 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
477 int c_to_b = 1 << (osb->s_clustersize_bits -
478 inode->i_sb->s_blocksize_bits);
479
480 /*
481 * validate goal sits within global_bitmap, and return the victim
482 * group desc
483 */
484 ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
485 GLOBAL_BITMAP_SYSTEM_INODE,
486 OCFS2_INVALID_SLOT,
487 &goal_bit, &gd_bh);
488 if (ret)
489 goto out;
490
491 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
492
493 /*
494 * make goal become cluster aligned.
495 */
496 if (range->me_goal % c_to_b)
497 range->me_goal = range->me_goal / c_to_b * c_to_b;
498
499 /*
500 * moving goal is not allowd to start with a group desc blok(#0 blk)
501 * let's compromise to the latter cluster.
502 */
503 if (range->me_goal == le64_to_cpu(bg->bg_blkno))
504 range->me_goal += c_to_b;
505
506 /*
507 * movement is not gonna cross two groups.
508 */
509 if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
510 range->me_len) {
511 ret = -EINVAL;
512 goto out;
513 }
514 /*
515 * more exact validations/adjustments will be performed later during
516 * moving operation for each extent range.
517 */
518 mlog(0, "extents get ready to be moved to #%llu block\n",
519 range->me_goal);
520
521out:
522 brelse(gd_bh);
523
524 return ret;
525}
526
527static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
528 int *goal_bit, u32 move_len, u32 max_hop,
529 u32 *phys_cpos)
530{
531 int i, used, last_free_bits = 0, base_bit = *goal_bit;
532 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
533 u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
534 le64_to_cpu(gd->bg_blkno));
535
536 for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
537
538 used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
539 if (used) {
540 /*
541 * we even tried searching the free chunk by jumping
542 * a 'max_hop' distance, but still failed.
543 */
544 if ((i - base_bit) > max_hop) {
545 *phys_cpos = 0;
546 break;
547 }
548
549 if (last_free_bits)
550 last_free_bits = 0;
551
552 continue;
553 } else
554 last_free_bits++;
555
556 if (last_free_bits == move_len) {
557 *goal_bit = i;
558 *phys_cpos = base_cpos + i;
559 break;
560 }
561 }
562
563 mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
564}
565
566static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
567 handle_t *handle,
568 struct buffer_head *di_bh,
569 u32 num_bits,
570 u16 chain)
571{
572 int ret;
573 u32 tmp_used;
574 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
575 struct ocfs2_chain_list *cl =
576 (struct ocfs2_chain_list *) &di->id2.i_chain;
577
578 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
579 OCFS2_JOURNAL_ACCESS_WRITE);
580 if (ret < 0) {
581 mlog_errno(ret);
582 goto out;
583 }
584
585 tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
586 di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
587 le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
588 ocfs2_journal_dirty(handle, di_bh);
589
590out:
591 return ret;
592}
593
594static inline int ocfs2_block_group_set_bits(handle_t *handle,
595 struct inode *alloc_inode,
596 struct ocfs2_group_desc *bg,
597 struct buffer_head *group_bh,
598 unsigned int bit_off,
599 unsigned int num_bits)
600{
601 int status;
602 void *bitmap = bg->bg_bitmap;
603 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
604
605 /* All callers get the descriptor via
606 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
607 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
608 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
609
610 mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
611 num_bits);
612
613 if (ocfs2_is_cluster_bitmap(alloc_inode))
614 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
615
616 status = ocfs2_journal_access_gd(handle,
617 INODE_CACHE(alloc_inode),
618 group_bh,
619 journal_type);
620 if (status < 0) {
621 mlog_errno(status);
622 goto bail;
623 }
624
625 le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
626 if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
627 ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
628 " count %u but claims %u are freed. num_bits %d",
629 (unsigned long long)le64_to_cpu(bg->bg_blkno),
630 le16_to_cpu(bg->bg_bits),
631 le16_to_cpu(bg->bg_free_bits_count), num_bits);
632 return -EROFS;
633 }
634 while (num_bits--)
635 ocfs2_set_bit(bit_off++, bitmap);
636
637 ocfs2_journal_dirty(handle, group_bh);
638
639bail:
640 return status;
641}
642
643static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
644 u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
645 u32 len, int ext_flags)
646{
647 int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
648 handle_t *handle;
649 struct inode *inode = context->inode;
650 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
651 struct inode *tl_inode = osb->osb_tl_inode;
652 struct inode *gb_inode = NULL;
653 struct buffer_head *gb_bh = NULL;
654 struct buffer_head *gd_bh = NULL;
655 struct ocfs2_group_desc *gd;
656 struct ocfs2_refcount_tree *ref_tree = NULL;
657 u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
658 context->range->me_threshold);
659 u64 phys_blkno, new_phys_blkno;
660
661 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
662
663 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
664
665 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
666 OCFS2_HAS_REFCOUNT_FL));
667
668 BUG_ON(!context->refcount_loc);
669
670 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
671 &ref_tree, NULL);
672 if (ret) {
673 mlog_errno(ret);
674 return ret;
675 }
676
677 ret = ocfs2_prepare_refcount_change_for_del(inode,
678 context->refcount_loc,
679 phys_blkno,
680 len,
681 &credits,
682 &extra_blocks);
683 if (ret) {
684 mlog_errno(ret);
685 goto out;
686 }
687 }
688
689 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
690 &context->meta_ac,
691 NULL, extra_blocks, &credits);
692 if (ret) {
693 mlog_errno(ret);
694 goto out;
695 }
696
697 /*
698 * need to count 2 extra credits for global_bitmap inode and
699 * group descriptor.
700 */
701 credits += OCFS2_INODE_UPDATE_CREDITS + 1;
702
703 /*
704 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
705 * logic, while we still need to lock the global_bitmap.
706 */
707 gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
708 OCFS2_INVALID_SLOT);
709 if (!gb_inode) {
710 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
711 ret = -EIO;
712 goto out;
713 }
714
715 mutex_lock(&gb_inode->i_mutex);
716
717 ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
718 if (ret) {
719 mlog_errno(ret);
720 goto out_unlock_gb_mutex;
721 }
722
723 mutex_lock(&tl_inode->i_mutex);
724
725 handle = ocfs2_start_trans(osb, credits);
726 if (IS_ERR(handle)) {
727 ret = PTR_ERR(handle);
728 mlog_errno(ret);
729 goto out_unlock_tl_inode;
730 }
731
732 new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
733 ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
734 GLOBAL_BITMAP_SYSTEM_INODE,
735 OCFS2_INVALID_SLOT,
736 &goal_bit, &gd_bh);
737 if (ret) {
738 mlog_errno(ret);
739 goto out_commit;
740 }
741
742 /*
743 * probe the victim cluster group to find a proper
744 * region to fit wanted movement, it even will perfrom
745 * a best-effort attempt by compromising to a threshold
746 * around the goal.
747 */
748 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
749 new_phys_cpos);
750 if (!new_phys_cpos) {
751 ret = -ENOSPC;
752 goto out_commit;
753 }
754
755 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
756 *new_phys_cpos, ext_flags);
757 if (ret) {
758 mlog_errno(ret);
759 goto out_commit;
760 }
761
762 gd = (struct ocfs2_group_desc *)gd_bh->b_data;
763 ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
764 le16_to_cpu(gd->bg_chain));
765 if (ret) {
766 mlog_errno(ret);
767 goto out_commit;
768 }
769
770 ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
771 goal_bit, len);
772 if (ret)
773 mlog_errno(ret);
774
775 /*
776 * Here we should write the new page out first if we are
777 * in write-back mode.
778 */
779 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
780 if (ret)
781 mlog_errno(ret);
782
783out_commit:
784 ocfs2_commit_trans(osb, handle);
785 brelse(gd_bh);
786
787out_unlock_tl_inode:
788 mutex_unlock(&tl_inode->i_mutex);
789
790 ocfs2_inode_unlock(gb_inode, 1);
791out_unlock_gb_mutex:
792 mutex_unlock(&gb_inode->i_mutex);
793 brelse(gb_bh);
794 iput(gb_inode);
795
796out:
797 if (context->meta_ac) {
798 ocfs2_free_alloc_context(context->meta_ac);
799 context->meta_ac = NULL;
800 }
801
802 if (ref_tree)
803 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
804
805 return ret;
806}
807
808/*
809 * Helper to calculate the defraging length in one run according to threshold.
810 */
811static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
812 u32 threshold, int *skip)
813{
814 if ((*alloc_size + *len_defraged) < threshold) {
815 /*
816 * proceed defragmentation until we meet the thresh
817 */
818 *len_defraged += *alloc_size;
819 } else if (*len_defraged == 0) {
820 /*
821 * XXX: skip a large extent.
822 */
823 *skip = 1;
824 } else {
825 /*
826 * split this extent to coalesce with former pieces as
827 * to reach the threshold.
828 *
829 * we're done here with one cycle of defragmentation
830 * in a size of 'thresh', resetting 'len_defraged'
831 * forces a new defragmentation.
832 */
833 *alloc_size = threshold - *len_defraged;
834 *len_defraged = 0;
835 }
836}
837
838static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
839 struct ocfs2_move_extents_context *context)
840{
841 int ret = 0, flags, do_defrag, skip = 0;
842 u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
843 u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
844
845 struct inode *inode = context->inode;
846 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
847 struct ocfs2_move_extents *range = context->range;
848 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
849
850 if ((inode->i_size == 0) || (range->me_len == 0))
851 return 0;
852
853 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
854 return 0;
855
856 context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
857
858 ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
859 ocfs2_init_dealloc_ctxt(&context->dealloc);
860
861 /*
862 * TO-DO XXX:
863 *
864 * - xattr extents.
865 */
866
867 do_defrag = context->auto_defrag;
868
869 /*
870 * extents moving happens in unit of clusters, for the sake
871 * of simplicity, we may ignore two clusters where 'byte_start'
872 * and 'byte_start + len' were within.
873 */
874 move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
875 len_to_move = (range->me_start + range->me_len) >>
876 osb->s_clustersize_bits;
877 if (len_to_move >= move_start)
878 len_to_move -= move_start;
879 else
880 len_to_move = 0;
881
882 if (do_defrag) {
883 defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
884 if (defrag_thresh <= 1)
885 goto done;
886 } else
887 new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
888 range->me_goal);
889
890 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
891 "thresh: %u\n",
892 (unsigned long long)OCFS2_I(inode)->ip_blkno,
893 (unsigned long long)range->me_start,
894 (unsigned long long)range->me_len,
895 move_start, len_to_move, defrag_thresh);
896
897 cpos = move_start;
898 while (len_to_move) {
899 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
900 &flags);
901 if (ret) {
902 mlog_errno(ret);
903 goto out;
904 }
905
906 if (alloc_size > len_to_move)
907 alloc_size = len_to_move;
908
909 /*
910 * XXX: how to deal with a hole:
911 *
912 * - skip the hole of course
913 * - force a new defragmentation
914 */
915 if (!phys_cpos) {
916 if (do_defrag)
917 len_defraged = 0;
918
919 goto next;
920 }
921
922 if (do_defrag) {
923 ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
924 defrag_thresh, &skip);
925 /*
926 * skip large extents
927 */
928 if (skip) {
929 skip = 0;
930 goto next;
931 }
932
933 mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
934 "alloc_size: %u, len_defraged: %u\n",
935 cpos, phys_cpos, alloc_size, len_defraged);
936
937 ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
938 &alloc_size, flags);
939 } else {
940 ret = ocfs2_move_extent(context, cpos, phys_cpos,
941 &new_phys_cpos, alloc_size,
942 flags);
943
944 new_phys_cpos += alloc_size;
945 }
946
947 if (ret < 0) {
948 mlog_errno(ret);
949 goto out;
950 }
951
952 context->clusters_moved += alloc_size;
953next:
954 cpos += alloc_size;
955 len_to_move -= alloc_size;
956 }
957
958done:
959 range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
960
961out:
962 range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
963 context->clusters_moved);
964 range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
965 context->new_phys_cpos);
966
967 ocfs2_schedule_truncate_log_flush(osb, 1);
968 ocfs2_run_deallocs(osb, &context->dealloc);
969
970 return ret;
971}
972
973static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
974{
975 int status;
976 handle_t *handle;
977 struct inode *inode = context->inode;
978 struct ocfs2_dinode *di;
979 struct buffer_head *di_bh = NULL;
980 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
981
982 if (!inode)
983 return -ENOENT;
984
985 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
986 return -EROFS;
987
988 mutex_lock(&inode->i_mutex);
989
990 /*
991 * This prevents concurrent writes from other nodes
992 */
993 status = ocfs2_rw_lock(inode, 1);
994 if (status) {
995 mlog_errno(status);
996 goto out;
997 }
998
999 status = ocfs2_inode_lock(inode, &di_bh, 1);
1000 if (status) {
1001 mlog_errno(status);
1002 goto out_rw_unlock;
1003 }
1004
1005 /*
1006 * rememer ip_xattr_sem also needs to be held if necessary
1007 */
1008 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1009
1010 status = __ocfs2_move_extents_range(di_bh, context);
1011
1012 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1013 if (status) {
1014 mlog_errno(status);
1015 goto out_inode_unlock;
1016 }
1017
1018 /*
1019 * We update ctime for these changes
1020 */
1021 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1022 if (IS_ERR(handle)) {
1023 status = PTR_ERR(handle);
1024 mlog_errno(status);
1025 goto out_inode_unlock;
1026 }
1027
1028 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
1029 OCFS2_JOURNAL_ACCESS_WRITE);
1030 if (status) {
1031 mlog_errno(status);
1032 goto out_commit;
1033 }
1034
1035 di = (struct ocfs2_dinode *)di_bh->b_data;
1036 inode->i_ctime = CURRENT_TIME;
1037 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
1038 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
1039
1040 ocfs2_journal_dirty(handle, di_bh);
1041
1042out_commit:
1043 ocfs2_commit_trans(osb, handle);
1044
1045out_inode_unlock:
1046 brelse(di_bh);
1047 ocfs2_inode_unlock(inode, 1);
1048out_rw_unlock:
1049 ocfs2_rw_unlock(inode, 1);
1050out:
1051 mutex_unlock(&inode->i_mutex);
1052
1053 return status;
1054}
1055
1056int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
1057{
1058 int status;
1059
1060 struct inode *inode = filp->f_path.dentry->d_inode;
1061 struct ocfs2_move_extents range;
1062 struct ocfs2_move_extents_context *context = NULL;
1063
1064 status = mnt_want_write(filp->f_path.mnt);
1065 if (status)
1066 return status;
1067
1068 if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE))
1069 goto out;
1070
1071 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1072 status = -EPERM;
1073 goto out;
1074 }
1075
1076 context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
1077 if (!context) {
1078 status = -ENOMEM;
1079 mlog_errno(status);
1080 goto out;
1081 }
1082
1083 context->inode = inode;
1084 context->file = filp;
1085
1086 if (argp) {
1087 if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
1088 sizeof(range))) {
1089 status = -EFAULT;
1090 goto out;
1091 }
1092 } else {
1093 status = -EINVAL;
1094 goto out;
1095 }
1096
1097 if (range.me_start > i_size_read(inode))
1098 goto out;
1099
1100 if (range.me_start + range.me_len > i_size_read(inode))
1101 range.me_len = i_size_read(inode) - range.me_start;
1102
1103 context->range = &range;
1104
1105 if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
1106 context->auto_defrag = 1;
1107 /*
1108 * ok, the default theshold for the defragmentation
1109 * is 1M, since our maximum clustersize was 1M also.
1110 * any thought?
1111 */
1112 if (!range.me_threshold)
1113 range.me_threshold = 1024 * 1024;
1114
1115 if (range.me_threshold > i_size_read(inode))
1116 range.me_threshold = i_size_read(inode);
1117
1118 if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
1119 context->partial = 1;
1120 } else {
1121 /*
1122 * first best-effort attempt to validate and adjust the goal
1123 * (physical address in block), while it can't guarantee later
1124 * operation can succeed all the time since global_bitmap may
1125 * change a bit over time.
1126 */
1127
1128 status = ocfs2_validate_and_adjust_move_goal(inode, &range);
1129 if (status)
1130 goto out;
1131 }
1132
1133 status = ocfs2_move_extents(context);
1134 if (status)
1135 mlog_errno(status);
1136out:
1137 /*
1138 * movement/defragmentation may end up being partially completed,
1139 * that's the reason why we need to return userspace the finished
1140 * length and new_offset even if failure happens somewhere.
1141 */
1142 if (argp) {
1143 if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
1144 sizeof(range)))
1145 status = -EFAULT;
1146 }
1147
1148 kfree(context);
1149
1150 mnt_drop_write(filp->f_path.mnt);
1151
1152 return status;
1153}
diff --git a/fs/ocfs2/move_extents.h b/fs/ocfs2/move_extents.h
new file mode 100644
index 000000000000..4e143e811441
--- /dev/null
+++ b/fs/ocfs2/move_extents.h
@@ -0,0 +1,22 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * move_extents.h
5 *
6 * Copyright (C) 2011 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef OCFS2_MOVE_EXTENTS_H
18#define OCFS2_MOVE_EXTENTS_H
19
20int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp);
21
22#endif /* OCFS2_MOVE_EXTENTS_H */
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index b46f39bf7438..5b27ff1fa577 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -142,6 +142,38 @@ struct ocfs2_info_journal_size {
142 __u64 ij_journal_size; 142 __u64 ij_journal_size;
143}; 143};
144 144
145struct ocfs2_info_freeinode {
146 struct ocfs2_info_request ifi_req;
147 struct ocfs2_info_local_freeinode {
148 __u64 lfi_total;
149 __u64 lfi_free;
150 } ifi_stat[OCFS2_MAX_SLOTS];
151 __u32 ifi_slotnum; /* out */
152 __u32 ifi_pad;
153};
154
155#define OCFS2_INFO_MAX_HIST (32)
156
157struct ocfs2_info_freefrag {
158 struct ocfs2_info_request iff_req;
159 struct ocfs2_info_freefrag_stats { /* (out) */
160 struct ocfs2_info_free_chunk_list {
161 __u32 fc_chunks[OCFS2_INFO_MAX_HIST];
162 __u32 fc_clusters[OCFS2_INFO_MAX_HIST];
163 } ffs_fc_hist;
164 __u32 ffs_clusters;
165 __u32 ffs_free_clusters;
166 __u32 ffs_free_chunks;
167 __u32 ffs_free_chunks_real;
168 __u32 ffs_min; /* Minimum free chunksize in clusters */
169 __u32 ffs_max;
170 __u32 ffs_avg;
171 __u32 ffs_pad;
172 } iff_ffs;
173 __u32 iff_chunksize; /* chunksize in clusters(in) */
174 __u32 iff_pad;
175};
176
145/* Codes for ocfs2_info_request */ 177/* Codes for ocfs2_info_request */
146enum ocfs2_info_type { 178enum ocfs2_info_type {
147 OCFS2_INFO_CLUSTERSIZE = 1, 179 OCFS2_INFO_CLUSTERSIZE = 1,
@@ -151,6 +183,8 @@ enum ocfs2_info_type {
151 OCFS2_INFO_UUID, 183 OCFS2_INFO_UUID,
152 OCFS2_INFO_FS_FEATURES, 184 OCFS2_INFO_FS_FEATURES,
153 OCFS2_INFO_JOURNAL_SIZE, 185 OCFS2_INFO_JOURNAL_SIZE,
186 OCFS2_INFO_FREEINODE,
187 OCFS2_INFO_FREEFRAG,
154 OCFS2_INFO_NUM_TYPES 188 OCFS2_INFO_NUM_TYPES
155}; 189};
156 190
@@ -171,4 +205,38 @@ enum ocfs2_info_type {
171 205
172#define OCFS2_IOC_INFO _IOR('o', 5, struct ocfs2_info) 206#define OCFS2_IOC_INFO _IOR('o', 5, struct ocfs2_info)
173 207
208struct ocfs2_move_extents {
209/* All values are in bytes */
210 /* in */
211 __u64 me_start; /* Virtual start in the file to move */
212 __u64 me_len; /* Length of the extents to be moved */
213 __u64 me_goal; /* Physical offset of the goal,
214 it's in block unit */
215 __u64 me_threshold; /* Maximum distance from goal or threshold
216 for auto defragmentation */
217 __u64 me_flags; /* Flags for the operation:
218 * - auto defragmentation.
219 * - refcount,xattr cases.
220 */
221 /* out */
222 __u64 me_moved_len; /* Moved/defraged length */
223 __u64 me_new_offset; /* Resulting physical location */
224 __u32 me_reserved[2]; /* Reserved for futhure */
225};
226
227#define OCFS2_MOVE_EXT_FL_AUTO_DEFRAG (0x00000001) /* Kernel manages to
228 claim new clusters
229 as the goal place
230 for extents moving */
231#define OCFS2_MOVE_EXT_FL_PART_DEFRAG (0x00000002) /* Allow partial extent
232 moving, is to make
233 movement less likely
234 to fail, may make fs
235 even more fragmented */
236#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation
237 completely gets done.
238 */
239
240#define OCFS2_IOC_MOVE_EXT _IOW('o', 6, struct ocfs2_move_extents)
241
174#endif /* OCFS2_IOCTL_H */ 242#endif /* OCFS2_IOCTL_H */
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3c7606cff1ab..ebfd3825f12a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -66,7 +66,7 @@ struct ocfs2_cow_context {
66 u32 *num_clusters, 66 u32 *num_clusters,
67 unsigned int *extent_flags); 67 unsigned int *extent_flags);
68 int (*cow_duplicate_clusters)(handle_t *handle, 68 int (*cow_duplicate_clusters)(handle_t *handle,
69 struct ocfs2_cow_context *context, 69 struct file *file,
70 u32 cpos, u32 old_cluster, 70 u32 cpos, u32 old_cluster,
71 u32 new_cluster, u32 new_len); 71 u32 new_cluster, u32 new_len);
72}; 72};
@@ -2921,20 +2921,21 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2921 return 0; 2921 return 0;
2922} 2922}
2923 2923
2924static int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2924int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2925 struct ocfs2_cow_context *context, 2925 struct file *file,
2926 u32 cpos, u32 old_cluster, 2926 u32 cpos, u32 old_cluster,
2927 u32 new_cluster, u32 new_len) 2927 u32 new_cluster, u32 new_len)
2928{ 2928{
2929 int ret = 0, partial; 2929 int ret = 0, partial;
2930 struct ocfs2_caching_info *ci = context->data_et.et_ci; 2930 struct inode *inode = file->f_path.dentry->d_inode;
2931 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
2931 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2932 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2932 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2933 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2933 struct page *page; 2934 struct page *page;
2934 pgoff_t page_index; 2935 pgoff_t page_index;
2935 unsigned int from, to, readahead_pages; 2936 unsigned int from, to, readahead_pages;
2936 loff_t offset, end, map_end; 2937 loff_t offset, end, map_end;
2937 struct address_space *mapping = context->inode->i_mapping; 2938 struct address_space *mapping = inode->i_mapping;
2938 2939
2939 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 2940 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2940 new_cluster, new_len); 2941 new_cluster, new_len);
@@ -2948,8 +2949,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2948 * We only duplicate pages until we reach the page contains i_size - 1. 2949 * We only duplicate pages until we reach the page contains i_size - 1.
2949 * So trim 'end' to i_size. 2950 * So trim 'end' to i_size.
2950 */ 2951 */
2951 if (end > i_size_read(context->inode)) 2952 if (end > i_size_read(inode))
2952 end = i_size_read(context->inode); 2953 end = i_size_read(inode);
2953 2954
2954 while (offset < end) { 2955 while (offset < end) {
2955 page_index = offset >> PAGE_CACHE_SHIFT; 2956 page_index = offset >> PAGE_CACHE_SHIFT;
@@ -2972,10 +2973,9 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2972 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2973 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2973 BUG_ON(PageDirty(page)); 2974 BUG_ON(PageDirty(page));
2974 2975
2975 if (PageReadahead(page) && context->file) { 2976 if (PageReadahead(page)) {
2976 page_cache_async_readahead(mapping, 2977 page_cache_async_readahead(mapping,
2977 &context->file->f_ra, 2978 &file->f_ra, file,
2978 context->file,
2979 page, page_index, 2979 page, page_index,
2980 readahead_pages); 2980 readahead_pages);
2981 } 2981 }
@@ -2999,8 +2999,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2999 } 2999 }
3000 } 3000 }
3001 3001
3002 ocfs2_map_and_dirty_page(context->inode, 3002 ocfs2_map_and_dirty_page(inode, handle, from, to,
3003 handle, from, to,
3004 page, 0, &new_block); 3003 page, 0, &new_block);
3005 mark_page_accessed(page); 3004 mark_page_accessed(page);
3006unlock: 3005unlock:
@@ -3015,14 +3014,15 @@ unlock:
3015 return ret; 3014 return ret;
3016} 3015}
3017 3016
3018static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3017int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3019 struct ocfs2_cow_context *context, 3018 struct file *file,
3020 u32 cpos, u32 old_cluster, 3019 u32 cpos, u32 old_cluster,
3021 u32 new_cluster, u32 new_len) 3020 u32 new_cluster, u32 new_len)
3022{ 3021{
3023 int ret = 0; 3022 int ret = 0;
3024 struct super_block *sb = context->inode->i_sb; 3023 struct inode *inode = file->f_path.dentry->d_inode;
3025 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3024 struct super_block *sb = inode->i_sb;
3025 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3026 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3026 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
3027 u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); 3027 u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
3028 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 3028 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
@@ -3145,8 +3145,8 @@ static int ocfs2_replace_clusters(handle_t *handle,
3145 3145
3146 /*If the old clusters is unwritten, no need to duplicate. */ 3146 /*If the old clusters is unwritten, no need to duplicate. */
3147 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3147 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3148 ret = context->cow_duplicate_clusters(handle, context, cpos, 3148 ret = context->cow_duplicate_clusters(handle, context->file,
3149 old, new, len); 3149 cpos, old, new, len);
3150 if (ret) { 3150 if (ret) {
3151 mlog_errno(ret); 3151 mlog_errno(ret);
3152 goto out; 3152 goto out;
@@ -3162,22 +3162,22 @@ out:
3162 return ret; 3162 return ret;
3163} 3163}
3164 3164
3165static int ocfs2_cow_sync_writeback(struct super_block *sb, 3165int ocfs2_cow_sync_writeback(struct super_block *sb,
3166 struct ocfs2_cow_context *context, 3166 struct inode *inode,
3167 u32 cpos, u32 num_clusters) 3167 u32 cpos, u32 num_clusters)
3168{ 3168{
3169 int ret = 0; 3169 int ret = 0;
3170 loff_t offset, end, map_end; 3170 loff_t offset, end, map_end;
3171 pgoff_t page_index; 3171 pgoff_t page_index;
3172 struct page *page; 3172 struct page *page;
3173 3173
3174 if (ocfs2_should_order_data(context->inode)) 3174 if (ocfs2_should_order_data(inode))
3175 return 0; 3175 return 0;
3176 3176
3177 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 3177 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
3178 end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); 3178 end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
3179 3179
3180 ret = filemap_fdatawrite_range(context->inode->i_mapping, 3180 ret = filemap_fdatawrite_range(inode->i_mapping,
3181 offset, end - 1); 3181 offset, end - 1);
3182 if (ret < 0) { 3182 if (ret < 0) {
3183 mlog_errno(ret); 3183 mlog_errno(ret);
@@ -3190,7 +3190,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3190 if (map_end > end) 3190 if (map_end > end)
3191 map_end = end; 3191 map_end = end;
3192 3192
3193 page = find_or_create_page(context->inode->i_mapping, 3193 page = find_or_create_page(inode->i_mapping,
3194 page_index, GFP_NOFS); 3194 page_index, GFP_NOFS);
3195 BUG_ON(!page); 3195 BUG_ON(!page);
3196 3196
@@ -3349,7 +3349,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3349 * in write-back mode. 3349 * in write-back mode.
3350 */ 3350 */
3351 if (context->get_clusters == ocfs2_di_get_clusters) { 3351 if (context->get_clusters == ocfs2_di_get_clusters) {
3352 ret = ocfs2_cow_sync_writeback(sb, context, cpos, 3352 ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
3353 orig_num_clusters); 3353 orig_num_clusters);
3354 if (ret) 3354 if (ret)
3355 mlog_errno(ret); 3355 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index c8ce46f7d8e3..7754608c83a4 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -84,6 +84,17 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
84 struct buffer_head *ref_root_bh, 84 struct buffer_head *ref_root_bh,
85 u32 cpos, u32 write_len, 85 u32 cpos, u32 write_len,
86 struct ocfs2_post_refcount *post); 86 struct ocfs2_post_refcount *post);
87int ocfs2_duplicate_clusters_by_page(handle_t *handle,
88 struct file *file,
89 u32 cpos, u32 old_cluster,
90 u32 new_cluster, u32 new_len);
91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
92 struct file *file,
93 u32 cpos, u32 old_cluster,
94 u32 new_cluster, u32 new_len);
95int ocfs2_cow_sync_writeback(struct super_block *sb,
96 struct inode *inode,
97 u32 cpos, u32 num_clusters);
87int ocfs2_add_refcount_flag(struct inode *inode, 98int ocfs2_add_refcount_flag(struct inode *inode,
88 struct ocfs2_extent_tree *data_et, 99 struct ocfs2_extent_tree *data_et,
89 struct ocfs2_caching_info *ref_ci, 100 struct ocfs2_caching_info *ref_ci,
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 8b3a7da531eb..315de66e52b2 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -106,7 +106,7 @@ static long long get_liability(struct ubifs_info *c)
106 long long liab; 106 long long liab;
107 107
108 spin_lock(&c->space_lock); 108 spin_lock(&c->space_lock);
109 liab = c->budg_idx_growth + c->budg_data_growth + c->budg_dd_growth; 109 liab = c->bi.idx_growth + c->bi.data_growth + c->bi.dd_growth;
110 spin_unlock(&c->space_lock); 110 spin_unlock(&c->space_lock);
111 return liab; 111 return liab;
112} 112}
@@ -180,7 +180,7 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c)
180 int idx_lebs; 180 int idx_lebs;
181 long long idx_size; 181 long long idx_size;
182 182
183 idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx; 183 idx_size = c->bi.old_idx_sz + c->bi.idx_growth + c->bi.uncommitted_idx;
184 /* And make sure we have thrice the index size of space reserved */ 184 /* And make sure we have thrice the index size of space reserved */
185 idx_size += idx_size << 1; 185 idx_size += idx_size << 1;
186 /* 186 /*
@@ -292,13 +292,13 @@ static int can_use_rp(struct ubifs_info *c)
292 * budgeted index space to the size of the current index, multiplies this by 3, 292 * budgeted index space to the size of the current index, multiplies this by 3,
293 * and makes sure this does not exceed the amount of free LEBs. 293 * and makes sure this does not exceed the amount of free LEBs.
294 * 294 *
295 * Notes about @c->min_idx_lebs and @c->lst.idx_lebs variables: 295 * Notes about @c->bi.min_idx_lebs and @c->lst.idx_lebs variables:
296 * o @c->lst.idx_lebs is the number of LEBs the index currently uses. It might 296 * o @c->lst.idx_lebs is the number of LEBs the index currently uses. It might
297 * be large, because UBIFS does not do any index consolidation as long as 297 * be large, because UBIFS does not do any index consolidation as long as
298 * there is free space. IOW, the index may take a lot of LEBs, but the LEBs 298 * there is free space. IOW, the index may take a lot of LEBs, but the LEBs
299 * will contain a lot of dirt. 299 * will contain a lot of dirt.
300 * o @c->min_idx_lebs is the number of LEBS the index presumably takes. IOW, 300 * o @c->bi.min_idx_lebs is the number of LEBS the index presumably takes. IOW,
301 * the index may be consolidated to take up to @c->min_idx_lebs LEBs. 301 * the index may be consolidated to take up to @c->bi.min_idx_lebs LEBs.
302 * 302 *
303 * This function returns zero in case of success, and %-ENOSPC in case of 303 * This function returns zero in case of success, and %-ENOSPC in case of
304 * failure. 304 * failure.
@@ -343,13 +343,13 @@ static int do_budget_space(struct ubifs_info *c)
343 c->lst.taken_empty_lebs; 343 c->lst.taken_empty_lebs;
344 if (unlikely(rsvd_idx_lebs > lebs)) { 344 if (unlikely(rsvd_idx_lebs > lebs)) {
345 dbg_budg("out of indexing space: min_idx_lebs %d (old %d), " 345 dbg_budg("out of indexing space: min_idx_lebs %d (old %d), "
346 "rsvd_idx_lebs %d", min_idx_lebs, c->min_idx_lebs, 346 "rsvd_idx_lebs %d", min_idx_lebs, c->bi.min_idx_lebs,
347 rsvd_idx_lebs); 347 rsvd_idx_lebs);
348 return -ENOSPC; 348 return -ENOSPC;
349 } 349 }
350 350
351 available = ubifs_calc_available(c, min_idx_lebs); 351 available = ubifs_calc_available(c, min_idx_lebs);
352 outstanding = c->budg_data_growth + c->budg_dd_growth; 352 outstanding = c->bi.data_growth + c->bi.dd_growth;
353 353
354 if (unlikely(available < outstanding)) { 354 if (unlikely(available < outstanding)) {
355 dbg_budg("out of data space: available %lld, outstanding %lld", 355 dbg_budg("out of data space: available %lld, outstanding %lld",
@@ -360,7 +360,7 @@ static int do_budget_space(struct ubifs_info *c)
360 if (available - outstanding <= c->rp_size && !can_use_rp(c)) 360 if (available - outstanding <= c->rp_size && !can_use_rp(c))
361 return -ENOSPC; 361 return -ENOSPC;
362 362
363 c->min_idx_lebs = min_idx_lebs; 363 c->bi.min_idx_lebs = min_idx_lebs;
364 return 0; 364 return 0;
365} 365}
366 366
@@ -393,11 +393,11 @@ static int calc_data_growth(const struct ubifs_info *c,
393{ 393{
394 int data_growth; 394 int data_growth;
395 395
396 data_growth = req->new_ino ? c->inode_budget : 0; 396 data_growth = req->new_ino ? c->bi.inode_budget : 0;
397 if (req->new_page) 397 if (req->new_page)
398 data_growth += c->page_budget; 398 data_growth += c->bi.page_budget;
399 if (req->new_dent) 399 if (req->new_dent)
400 data_growth += c->dent_budget; 400 data_growth += c->bi.dent_budget;
401 data_growth += req->new_ino_d; 401 data_growth += req->new_ino_d;
402 return data_growth; 402 return data_growth;
403} 403}
@@ -413,12 +413,12 @@ static int calc_dd_growth(const struct ubifs_info *c,
413{ 413{
414 int dd_growth; 414 int dd_growth;
415 415
416 dd_growth = req->dirtied_page ? c->page_budget : 0; 416 dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
417 417
418 if (req->dirtied_ino) 418 if (req->dirtied_ino)
419 dd_growth += c->inode_budget << (req->dirtied_ino - 1); 419 dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
420 if (req->mod_dent) 420 if (req->mod_dent)
421 dd_growth += c->dent_budget; 421 dd_growth += c->bi.dent_budget;
422 dd_growth += req->dirtied_ino_d; 422 dd_growth += req->dirtied_ino_d;
423 return dd_growth; 423 return dd_growth;
424} 424}
@@ -460,19 +460,19 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
460 460
461again: 461again:
462 spin_lock(&c->space_lock); 462 spin_lock(&c->space_lock);
463 ubifs_assert(c->budg_idx_growth >= 0); 463 ubifs_assert(c->bi.idx_growth >= 0);
464 ubifs_assert(c->budg_data_growth >= 0); 464 ubifs_assert(c->bi.data_growth >= 0);
465 ubifs_assert(c->budg_dd_growth >= 0); 465 ubifs_assert(c->bi.dd_growth >= 0);
466 466
467 if (unlikely(c->nospace) && (c->nospace_rp || !can_use_rp(c))) { 467 if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) {
468 dbg_budg("no space"); 468 dbg_budg("no space");
469 spin_unlock(&c->space_lock); 469 spin_unlock(&c->space_lock);
470 return -ENOSPC; 470 return -ENOSPC;
471 } 471 }
472 472
473 c->budg_idx_growth += idx_growth; 473 c->bi.idx_growth += idx_growth;
474 c->budg_data_growth += data_growth; 474 c->bi.data_growth += data_growth;
475 c->budg_dd_growth += dd_growth; 475 c->bi.dd_growth += dd_growth;
476 476
477 err = do_budget_space(c); 477 err = do_budget_space(c);
478 if (likely(!err)) { 478 if (likely(!err)) {
@@ -484,9 +484,9 @@ again:
484 } 484 }
485 485
486 /* Restore the old values */ 486 /* Restore the old values */
487 c->budg_idx_growth -= idx_growth; 487 c->bi.idx_growth -= idx_growth;
488 c->budg_data_growth -= data_growth; 488 c->bi.data_growth -= data_growth;
489 c->budg_dd_growth -= dd_growth; 489 c->bi.dd_growth -= dd_growth;
490 spin_unlock(&c->space_lock); 490 spin_unlock(&c->space_lock);
491 491
492 if (req->fast) { 492 if (req->fast) {
@@ -506,9 +506,9 @@ again:
506 goto again; 506 goto again;
507 } 507 }
508 dbg_budg("FS is full, -ENOSPC"); 508 dbg_budg("FS is full, -ENOSPC");
509 c->nospace = 1; 509 c->bi.nospace = 1;
510 if (can_use_rp(c) || c->rp_size == 0) 510 if (can_use_rp(c) || c->rp_size == 0)
511 c->nospace_rp = 1; 511 c->bi.nospace_rp = 1;
512 smp_wmb(); 512 smp_wmb();
513 } else 513 } else
514 ubifs_err("cannot budget space, error %d", err); 514 ubifs_err("cannot budget space, error %d", err);
@@ -523,8 +523,8 @@ again:
523 * This function releases the space budgeted by 'ubifs_budget_space()'. Note, 523 * This function releases the space budgeted by 'ubifs_budget_space()'. Note,
524 * since the index changes (which were budgeted for in @req->idx_growth) will 524 * since the index changes (which were budgeted for in @req->idx_growth) will
525 * only be written to the media on commit, this function moves the index budget 525 * only be written to the media on commit, this function moves the index budget
526 * from @c->budg_idx_growth to @c->budg_uncommitted_idx. The latter will be 526 * from @c->bi.idx_growth to @c->bi.uncommitted_idx. The latter will be zeroed
527 * zeroed by the commit operation. 527 * by the commit operation.
528 */ 528 */
529void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) 529void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
530{ 530{
@@ -553,23 +553,23 @@ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
553 if (!req->data_growth && !req->dd_growth) 553 if (!req->data_growth && !req->dd_growth)
554 return; 554 return;
555 555
556 c->nospace = c->nospace_rp = 0; 556 c->bi.nospace = c->bi.nospace_rp = 0;
557 smp_wmb(); 557 smp_wmb();
558 558
559 spin_lock(&c->space_lock); 559 spin_lock(&c->space_lock);
560 c->budg_idx_growth -= req->idx_growth; 560 c->bi.idx_growth -= req->idx_growth;
561 c->budg_uncommitted_idx += req->idx_growth; 561 c->bi.uncommitted_idx += req->idx_growth;
562 c->budg_data_growth -= req->data_growth; 562 c->bi.data_growth -= req->data_growth;
563 c->budg_dd_growth -= req->dd_growth; 563 c->bi.dd_growth -= req->dd_growth;
564 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); 564 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
565 565
566 ubifs_assert(c->budg_idx_growth >= 0); 566 ubifs_assert(c->bi.idx_growth >= 0);
567 ubifs_assert(c->budg_data_growth >= 0); 567 ubifs_assert(c->bi.data_growth >= 0);
568 ubifs_assert(c->budg_dd_growth >= 0); 568 ubifs_assert(c->bi.dd_growth >= 0);
569 ubifs_assert(c->min_idx_lebs < c->main_lebs); 569 ubifs_assert(c->bi.min_idx_lebs < c->main_lebs);
570 ubifs_assert(!(c->budg_idx_growth & 7)); 570 ubifs_assert(!(c->bi.idx_growth & 7));
571 ubifs_assert(!(c->budg_data_growth & 7)); 571 ubifs_assert(!(c->bi.data_growth & 7));
572 ubifs_assert(!(c->budg_dd_growth & 7)); 572 ubifs_assert(!(c->bi.dd_growth & 7));
573 spin_unlock(&c->space_lock); 573 spin_unlock(&c->space_lock);
574} 574}
575 575
@@ -586,13 +586,13 @@ void ubifs_convert_page_budget(struct ubifs_info *c)
586{ 586{
587 spin_lock(&c->space_lock); 587 spin_lock(&c->space_lock);
588 /* Release the index growth reservation */ 588 /* Release the index growth reservation */
589 c->budg_idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT; 589 c->bi.idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT;
590 /* Release the data growth reservation */ 590 /* Release the data growth reservation */
591 c->budg_data_growth -= c->page_budget; 591 c->bi.data_growth -= c->bi.page_budget;
592 /* Increase the dirty data growth reservation instead */ 592 /* Increase the dirty data growth reservation instead */
593 c->budg_dd_growth += c->page_budget; 593 c->bi.dd_growth += c->bi.page_budget;
594 /* And re-calculate the indexing space reservation */ 594 /* And re-calculate the indexing space reservation */
595 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); 595 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
596 spin_unlock(&c->space_lock); 596 spin_unlock(&c->space_lock);
597} 597}
598 598
@@ -612,7 +612,7 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c,
612 612
613 memset(&req, 0, sizeof(struct ubifs_budget_req)); 613 memset(&req, 0, sizeof(struct ubifs_budget_req));
614 /* The "no space" flags will be cleared because dd_growth is > 0 */ 614 /* The "no space" flags will be cleared because dd_growth is > 0 */
615 req.dd_growth = c->inode_budget + ALIGN(ui->data_len, 8); 615 req.dd_growth = c->bi.inode_budget + ALIGN(ui->data_len, 8);
616 ubifs_release_budget(c, &req); 616 ubifs_release_budget(c, &req);
617} 617}
618 618
@@ -682,9 +682,9 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c)
682 int rsvd_idx_lebs, lebs; 682 int rsvd_idx_lebs, lebs;
683 long long available, outstanding, free; 683 long long available, outstanding, free;
684 684
685 ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); 685 ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
686 outstanding = c->budg_data_growth + c->budg_dd_growth; 686 outstanding = c->bi.data_growth + c->bi.dd_growth;
687 available = ubifs_calc_available(c, c->min_idx_lebs); 687 available = ubifs_calc_available(c, c->bi.min_idx_lebs);
688 688
689 /* 689 /*
690 * When reporting free space to user-space, UBIFS guarantees that it is 690 * When reporting free space to user-space, UBIFS guarantees that it is
@@ -697,8 +697,8 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c)
697 * Note, the calculations below are similar to what we have in 697 * Note, the calculations below are similar to what we have in
698 * 'do_budget_space()', so refer there for comments. 698 * 'do_budget_space()', so refer there for comments.
699 */ 699 */
700 if (c->min_idx_lebs > c->lst.idx_lebs) 700 if (c->bi.min_idx_lebs > c->lst.idx_lebs)
701 rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; 701 rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
702 else 702 else
703 rsvd_idx_lebs = 0; 703 rsvd_idx_lebs = 0;
704 lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - 704 lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 1bd01ded7123..87cd0ead8633 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -182,7 +182,7 @@ static int do_commit(struct ubifs_info *c)
182 c->mst_node->root_len = cpu_to_le32(zroot.len); 182 c->mst_node->root_len = cpu_to_le32(zroot.len);
183 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum); 183 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum);
184 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs); 184 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs);
185 c->mst_node->index_size = cpu_to_le64(c->old_idx_sz); 185 c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz);
186 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum); 186 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum);
187 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs); 187 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs);
188 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum); 188 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 004d3745dc45..0bb2bcef0de9 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -34,7 +34,6 @@
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35#include <linux/debugfs.h> 35#include <linux/debugfs.h>
36#include <linux/math64.h> 36#include <linux/math64.h>
37#include <linux/slab.h>
38 37
39#ifdef CONFIG_UBIFS_FS_DEBUG 38#ifdef CONFIG_UBIFS_FS_DEBUG
40 39
@@ -43,15 +42,12 @@ DEFINE_SPINLOCK(dbg_lock);
43static char dbg_key_buf0[128]; 42static char dbg_key_buf0[128];
44static char dbg_key_buf1[128]; 43static char dbg_key_buf1[128];
45 44
46unsigned int ubifs_msg_flags;
47unsigned int ubifs_chk_flags; 45unsigned int ubifs_chk_flags;
48unsigned int ubifs_tst_flags; 46unsigned int ubifs_tst_flags;
49 47
50module_param_named(debug_msgs, ubifs_msg_flags, uint, S_IRUGO | S_IWUSR);
51module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR); 48module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
52module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR); 49module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
53 50
54MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
55MODULE_PARM_DESC(debug_chks, "Debug check flags"); 51MODULE_PARM_DESC(debug_chks, "Debug check flags");
56MODULE_PARM_DESC(debug_tsts, "Debug special test flags"); 52MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
57 53
@@ -317,6 +313,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
317 printk(KERN_DEBUG "\tflags %#x\n", sup_flags); 313 printk(KERN_DEBUG "\tflags %#x\n", sup_flags);
318 printk(KERN_DEBUG "\t big_lpt %u\n", 314 printk(KERN_DEBUG "\t big_lpt %u\n",
319 !!(sup_flags & UBIFS_FLG_BIGLPT)); 315 !!(sup_flags & UBIFS_FLG_BIGLPT));
316 printk(KERN_DEBUG "\t space_fixup %u\n",
317 !!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
320 printk(KERN_DEBUG "\tmin_io_size %u\n", 318 printk(KERN_DEBUG "\tmin_io_size %u\n",
321 le32_to_cpu(sup->min_io_size)); 319 le32_to_cpu(sup->min_io_size));
322 printk(KERN_DEBUG "\tleb_size %u\n", 320 printk(KERN_DEBUG "\tleb_size %u\n",
@@ -602,7 +600,7 @@ void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
602 spin_unlock(&dbg_lock); 600 spin_unlock(&dbg_lock);
603} 601}
604 602
605void dbg_dump_budg(struct ubifs_info *c) 603void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
606{ 604{
607 int i; 605 int i;
608 struct rb_node *rb; 606 struct rb_node *rb;
@@ -610,26 +608,42 @@ void dbg_dump_budg(struct ubifs_info *c)
610 struct ubifs_gced_idx_leb *idx_gc; 608 struct ubifs_gced_idx_leb *idx_gc;
611 long long available, outstanding, free; 609 long long available, outstanding, free;
612 610
613 ubifs_assert(spin_is_locked(&c->space_lock)); 611 spin_lock(&c->space_lock);
614 spin_lock(&dbg_lock); 612 spin_lock(&dbg_lock);
615 printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, " 613 printk(KERN_DEBUG "(pid %d) Budgeting info: data budget sum %lld, "
616 "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid, 614 "total budget sum %lld\n", current->pid,
617 c->budg_data_growth, c->budg_dd_growth, c->budg_idx_growth); 615 bi->data_growth + bi->dd_growth,
618 printk(KERN_DEBUG "\tdata budget sum %lld, total budget sum %lld, " 616 bi->data_growth + bi->dd_growth + bi->idx_growth);
619 "freeable_cnt %d\n", c->budg_data_growth + c->budg_dd_growth, 617 printk(KERN_DEBUG "\tbudg_data_growth %lld, budg_dd_growth %lld, "
620 c->budg_data_growth + c->budg_dd_growth + c->budg_idx_growth, 618 "budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth,
621 c->freeable_cnt); 619 bi->idx_growth);
622 printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %lld, " 620 printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %llu, "
623 "calc_idx_sz %lld, idx_gc_cnt %d\n", c->min_idx_lebs, 621 "uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz,
624 c->old_idx_sz, c->calc_idx_sz, c->idx_gc_cnt); 622 bi->uncommitted_idx);
623 printk(KERN_DEBUG "\tpage_budget %d, inode_budget %d, dent_budget %d\n",
624 bi->page_budget, bi->inode_budget, bi->dent_budget);
625 printk(KERN_DEBUG "\tnospace %u, nospace_rp %u\n",
626 bi->nospace, bi->nospace_rp);
627 printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
628 c->dark_wm, c->dead_wm, c->max_idx_node_sz);
629
630 if (bi != &c->bi)
631 /*
632 * If we are dumping saved budgeting data, do not print
633 * additional information which is about the current state, not
634 * the old one which corresponded to the saved budgeting data.
635 */
636 goto out_unlock;
637
638 printk(KERN_DEBUG "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
639 c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt);
625 printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, " 640 printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
626 "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt), 641 "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
627 atomic_long_read(&c->dirty_zn_cnt), 642 atomic_long_read(&c->dirty_zn_cnt),
628 atomic_long_read(&c->clean_zn_cnt)); 643 atomic_long_read(&c->clean_zn_cnt));
629 printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
630 c->dark_wm, c->dead_wm, c->max_idx_node_sz);
631 printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n", 644 printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
632 c->gc_lnum, c->ihead_lnum); 645 c->gc_lnum, c->ihead_lnum);
646
633 /* If we are in R/O mode, journal heads do not exist */ 647 /* If we are in R/O mode, journal heads do not exist */
634 if (c->jheads) 648 if (c->jheads)
635 for (i = 0; i < c->jhead_cnt; i++) 649 for (i = 0; i < c->jhead_cnt; i++)
@@ -648,13 +662,15 @@ void dbg_dump_budg(struct ubifs_info *c)
648 printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state); 662 printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
649 663
650 /* Print budgeting predictions */ 664 /* Print budgeting predictions */
651 available = ubifs_calc_available(c, c->min_idx_lebs); 665 available = ubifs_calc_available(c, c->bi.min_idx_lebs);
652 outstanding = c->budg_data_growth + c->budg_dd_growth; 666 outstanding = c->bi.data_growth + c->bi.dd_growth;
653 free = ubifs_get_free_space_nolock(c); 667 free = ubifs_get_free_space_nolock(c);
654 printk(KERN_DEBUG "Budgeting predictions:\n"); 668 printk(KERN_DEBUG "Budgeting predictions:\n");
655 printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", 669 printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
656 available, outstanding, free); 670 available, outstanding, free);
671out_unlock:
657 spin_unlock(&dbg_lock); 672 spin_unlock(&dbg_lock);
673 spin_unlock(&c->space_lock);
658} 674}
659 675
660void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) 676void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
@@ -729,7 +745,13 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
729 if (bud->lnum == lp->lnum) { 745 if (bud->lnum == lp->lnum) {
730 int head = 0; 746 int head = 0;
731 for (i = 0; i < c->jhead_cnt; i++) { 747 for (i = 0; i < c->jhead_cnt; i++) {
732 if (lp->lnum == c->jheads[i].wbuf.lnum) { 748 /*
749 * Note, if we are in R/O mode or in the middle
750 * of mounting/re-mounting, the write-buffers do
751 * not exist.
752 */
753 if (c->jheads &&
754 lp->lnum == c->jheads[i].wbuf.lnum) {
733 printk(KERN_CONT ", jhead %s", 755 printk(KERN_CONT ", jhead %s",
734 dbg_jhead(i)); 756 dbg_jhead(i));
735 head = 1; 757 head = 1;
@@ -976,6 +998,8 @@ void dbg_save_space_info(struct ubifs_info *c)
976 998
977 spin_lock(&c->space_lock); 999 spin_lock(&c->space_lock);
978 memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats)); 1000 memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
1001 memcpy(&d->saved_bi, &c->bi, sizeof(struct ubifs_budg_info));
1002 d->saved_idx_gc_cnt = c->idx_gc_cnt;
979 1003
980 /* 1004 /*
981 * We use a dirty hack here and zero out @c->freeable_cnt, because it 1005 * We use a dirty hack here and zero out @c->freeable_cnt, because it
@@ -1042,14 +1066,14 @@ int dbg_check_space_info(struct ubifs_info *c)
1042out: 1066out:
1043 ubifs_msg("saved lprops statistics dump"); 1067 ubifs_msg("saved lprops statistics dump");
1044 dbg_dump_lstats(&d->saved_lst); 1068 dbg_dump_lstats(&d->saved_lst);
1045 ubifs_get_lp_stats(c, &lst); 1069 ubifs_msg("saved budgeting info dump");
1046 1070 dbg_dump_budg(c, &d->saved_bi);
1071 ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
1047 ubifs_msg("current lprops statistics dump"); 1072 ubifs_msg("current lprops statistics dump");
1073 ubifs_get_lp_stats(c, &lst);
1048 dbg_dump_lstats(&lst); 1074 dbg_dump_lstats(&lst);
1049 1075 ubifs_msg("current budgeting info dump");
1050 spin_lock(&c->space_lock); 1076 dbg_dump_budg(c, &c->bi);
1051 dbg_dump_budg(c);
1052 spin_unlock(&c->space_lock);
1053 dump_stack(); 1077 dump_stack();
1054 return -EINVAL; 1078 return -EINVAL;
1055} 1079}
@@ -1793,6 +1817,8 @@ static struct fsck_inode *add_inode(struct ubifs_info *c,
1793 struct rb_node **p, *parent = NULL; 1817 struct rb_node **p, *parent = NULL;
1794 struct fsck_inode *fscki; 1818 struct fsck_inode *fscki;
1795 ino_t inum = key_inum_flash(c, &ino->key); 1819 ino_t inum = key_inum_flash(c, &ino->key);
1820 struct inode *inode;
1821 struct ubifs_inode *ui;
1796 1822
1797 p = &fsckd->inodes.rb_node; 1823 p = &fsckd->inodes.rb_node;
1798 while (*p) { 1824 while (*p) {
@@ -1816,19 +1842,46 @@ static struct fsck_inode *add_inode(struct ubifs_info *c,
1816 if (!fscki) 1842 if (!fscki)
1817 return ERR_PTR(-ENOMEM); 1843 return ERR_PTR(-ENOMEM);
1818 1844
1845 inode = ilookup(c->vfs_sb, inum);
1846
1819 fscki->inum = inum; 1847 fscki->inum = inum;
1820 fscki->nlink = le32_to_cpu(ino->nlink); 1848 /*
1821 fscki->size = le64_to_cpu(ino->size); 1849 * If the inode is present in the VFS inode cache, use it instead of
1822 fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt); 1850 * the on-flash inode which might be out-of-date. E.g., the size might
1823 fscki->xattr_sz = le32_to_cpu(ino->xattr_size); 1851 * be out-of-date. If we do not do this, the following may happen, for
1824 fscki->xattr_nms = le32_to_cpu(ino->xattr_names); 1852 * example:
1825 fscki->mode = le32_to_cpu(ino->mode); 1853 * 1. A power cut happens
1854 * 2. We mount the file-system R/O, the replay process fixes up the
1855 * inode size in the VFS cache, but on on-flash.
1856 * 3. 'check_leaf()' fails because it hits a data node beyond inode
1857 * size.
1858 */
1859 if (!inode) {
1860 fscki->nlink = le32_to_cpu(ino->nlink);
1861 fscki->size = le64_to_cpu(ino->size);
1862 fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
1863 fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
1864 fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
1865 fscki->mode = le32_to_cpu(ino->mode);
1866 } else {
1867 ui = ubifs_inode(inode);
1868 fscki->nlink = inode->i_nlink;
1869 fscki->size = inode->i_size;
1870 fscki->xattr_cnt = ui->xattr_cnt;
1871 fscki->xattr_sz = ui->xattr_size;
1872 fscki->xattr_nms = ui->xattr_names;
1873 fscki->mode = inode->i_mode;
1874 iput(inode);
1875 }
1876
1826 if (S_ISDIR(fscki->mode)) { 1877 if (S_ISDIR(fscki->mode)) {
1827 fscki->calc_sz = UBIFS_INO_NODE_SZ; 1878 fscki->calc_sz = UBIFS_INO_NODE_SZ;
1828 fscki->calc_cnt = 2; 1879 fscki->calc_cnt = 2;
1829 } 1880 }
1881
1830 rb_link_node(&fscki->rb, parent, p); 1882 rb_link_node(&fscki->rb, parent, p);
1831 rb_insert_color(&fscki->rb, &fsckd->inodes); 1883 rb_insert_color(&fscki->rb, &fsckd->inodes);
1884
1832 return fscki; 1885 return fscki;
1833} 1886}
1834 1887
@@ -2421,7 +2474,8 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
2421 hashb = key_block(c, &sb->key); 2474 hashb = key_block(c, &sb->key);
2422 2475
2423 if (hasha > hashb) { 2476 if (hasha > hashb) {
2424 ubifs_err("larger hash %u goes before %u", hasha, hashb); 2477 ubifs_err("larger hash %u goes before %u",
2478 hasha, hashb);
2425 goto error_dump; 2479 goto error_dump;
2426 } 2480 }
2427 } 2481 }
@@ -2437,14 +2491,12 @@ error_dump:
2437 return 0; 2491 return 0;
2438} 2492}
2439 2493
2440static int invocation_cnt;
2441
2442int dbg_force_in_the_gaps(void) 2494int dbg_force_in_the_gaps(void)
2443{ 2495{
2444 if (!dbg_force_in_the_gaps_enabled) 2496 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
2445 return 0; 2497 return 0;
2446 /* Force in-the-gaps every 8th commit */ 2498
2447 return !((invocation_cnt++) & 0x7); 2499 return !(random32() & 7);
2448} 2500}
2449 2501
2450/* Failure mode for recovery testing */ 2502/* Failure mode for recovery testing */
@@ -2632,7 +2684,7 @@ int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
2632 int len, int check) 2684 int len, int check)
2633{ 2685{
2634 if (in_failure_mode(desc)) 2686 if (in_failure_mode(desc))
2635 return -EIO; 2687 return -EROFS;
2636 return ubi_leb_read(desc, lnum, buf, offset, len, check); 2688 return ubi_leb_read(desc, lnum, buf, offset, len, check);
2637} 2689}
2638 2690
@@ -2642,7 +2694,7 @@ int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
2642 int err, failing; 2694 int err, failing;
2643 2695
2644 if (in_failure_mode(desc)) 2696 if (in_failure_mode(desc))
2645 return -EIO; 2697 return -EROFS;
2646 failing = do_fail(desc, lnum, 1); 2698 failing = do_fail(desc, lnum, 1);
2647 if (failing) 2699 if (failing)
2648 cut_data(buf, len); 2700 cut_data(buf, len);
@@ -2650,7 +2702,7 @@ int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
2650 if (err) 2702 if (err)
2651 return err; 2703 return err;
2652 if (failing) 2704 if (failing)
2653 return -EIO; 2705 return -EROFS;
2654 return 0; 2706 return 0;
2655} 2707}
2656 2708
@@ -2660,12 +2712,12 @@ int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
2660 int err; 2712 int err;
2661 2713
2662 if (do_fail(desc, lnum, 1)) 2714 if (do_fail(desc, lnum, 1))
2663 return -EIO; 2715 return -EROFS;
2664 err = ubi_leb_change(desc, lnum, buf, len, dtype); 2716 err = ubi_leb_change(desc, lnum, buf, len, dtype);
2665 if (err) 2717 if (err)
2666 return err; 2718 return err;
2667 if (do_fail(desc, lnum, 1)) 2719 if (do_fail(desc, lnum, 1))
2668 return -EIO; 2720 return -EROFS;
2669 return 0; 2721 return 0;
2670} 2722}
2671 2723
@@ -2674,12 +2726,12 @@ int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
2674 int err; 2726 int err;
2675 2727
2676 if (do_fail(desc, lnum, 0)) 2728 if (do_fail(desc, lnum, 0))
2677 return -EIO; 2729 return -EROFS;
2678 err = ubi_leb_erase(desc, lnum); 2730 err = ubi_leb_erase(desc, lnum);
2679 if (err) 2731 if (err)
2680 return err; 2732 return err;
2681 if (do_fail(desc, lnum, 0)) 2733 if (do_fail(desc, lnum, 0))
2682 return -EIO; 2734 return -EROFS;
2683 return 0; 2735 return 0;
2684} 2736}
2685 2737
@@ -2688,19 +2740,19 @@ int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
2688 int err; 2740 int err;
2689 2741
2690 if (do_fail(desc, lnum, 0)) 2742 if (do_fail(desc, lnum, 0))
2691 return -EIO; 2743 return -EROFS;
2692 err = ubi_leb_unmap(desc, lnum); 2744 err = ubi_leb_unmap(desc, lnum);
2693 if (err) 2745 if (err)
2694 return err; 2746 return err;
2695 if (do_fail(desc, lnum, 0)) 2747 if (do_fail(desc, lnum, 0))
2696 return -EIO; 2748 return -EROFS;
2697 return 0; 2749 return 0;
2698} 2750}
2699 2751
2700int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum) 2752int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
2701{ 2753{
2702 if (in_failure_mode(desc)) 2754 if (in_failure_mode(desc))
2703 return -EIO; 2755 return -EROFS;
2704 return ubi_is_mapped(desc, lnum); 2756 return ubi_is_mapped(desc, lnum);
2705} 2757}
2706 2758
@@ -2709,12 +2761,12 @@ int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
2709 int err; 2761 int err;
2710 2762
2711 if (do_fail(desc, lnum, 0)) 2763 if (do_fail(desc, lnum, 0))
2712 return -EIO; 2764 return -EROFS;
2713 err = ubi_leb_map(desc, lnum, dtype); 2765 err = ubi_leb_map(desc, lnum, dtype);
2714 if (err) 2766 if (err)
2715 return err; 2767 return err;
2716 if (do_fail(desc, lnum, 0)) 2768 if (do_fail(desc, lnum, 0))
2717 return -EIO; 2769 return -EROFS;
2718 return 0; 2770 return 0;
2719} 2771}
2720 2772
@@ -2784,7 +2836,7 @@ void dbg_debugfs_exit(void)
2784static int open_debugfs_file(struct inode *inode, struct file *file) 2836static int open_debugfs_file(struct inode *inode, struct file *file)
2785{ 2837{
2786 file->private_data = inode->i_private; 2838 file->private_data = inode->i_private;
2787 return 0; 2839 return nonseekable_open(inode, file);
2788} 2840}
2789 2841
2790static ssize_t write_debugfs_file(struct file *file, const char __user *buf, 2842static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
@@ -2795,18 +2847,15 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
2795 2847
2796 if (file->f_path.dentry == d->dfs_dump_lprops) 2848 if (file->f_path.dentry == d->dfs_dump_lprops)
2797 dbg_dump_lprops(c); 2849 dbg_dump_lprops(c);
2798 else if (file->f_path.dentry == d->dfs_dump_budg) { 2850 else if (file->f_path.dentry == d->dfs_dump_budg)
2799 spin_lock(&c->space_lock); 2851 dbg_dump_budg(c, &c->bi);
2800 dbg_dump_budg(c); 2852 else if (file->f_path.dentry == d->dfs_dump_tnc) {
2801 spin_unlock(&c->space_lock);
2802 } else if (file->f_path.dentry == d->dfs_dump_tnc) {
2803 mutex_lock(&c->tnc_mutex); 2853 mutex_lock(&c->tnc_mutex);
2804 dbg_dump_tnc(c); 2854 dbg_dump_tnc(c);
2805 mutex_unlock(&c->tnc_mutex); 2855 mutex_unlock(&c->tnc_mutex);
2806 } else 2856 } else
2807 return -EINVAL; 2857 return -EINVAL;
2808 2858
2809 *ppos += count;
2810 return count; 2859 return count;
2811} 2860}
2812 2861
@@ -2814,7 +2863,7 @@ static const struct file_operations dfs_fops = {
2814 .open = open_debugfs_file, 2863 .open = open_debugfs_file,
2815 .write = write_debugfs_file, 2864 .write = write_debugfs_file,
2816 .owner = THIS_MODULE, 2865 .owner = THIS_MODULE,
2817 .llseek = default_llseek, 2866 .llseek = no_llseek,
2818}; 2867};
2819 2868
2820/** 2869/**
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index e6493cac193d..a811ac4a26bb 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -31,6 +31,8 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c,
31 31
32#ifdef CONFIG_UBIFS_FS_DEBUG 32#ifdef CONFIG_UBIFS_FS_DEBUG
33 33
34#include <linux/random.h>
35
34/** 36/**
35 * ubifs_debug_info - per-FS debugging information. 37 * ubifs_debug_info - per-FS debugging information.
36 * @old_zroot: old index root - used by 'dbg_check_old_index()' 38 * @old_zroot: old index root - used by 'dbg_check_old_index()'
@@ -50,13 +52,15 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c,
50 * @new_ihead_offs: used by debugging to check @c->ihead_offs 52 * @new_ihead_offs: used by debugging to check @c->ihead_offs
51 * 53 *
52 * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()') 54 * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()')
53 * @saved_free: saved free space (used by 'dbg_save_space_info()') 55 * @saved_bi: saved budgeting information
56 * @saved_free: saved amount of free space
57 * @saved_idx_gc_cnt: saved value of @c->idx_gc_cnt
54 * 58 *
55 * dfs_dir_name: name of debugfs directory containing this file-system's files 59 * @dfs_dir_name: name of debugfs directory containing this file-system's files
56 * dfs_dir: direntry object of the file-system debugfs directory 60 * @dfs_dir: direntry object of the file-system debugfs directory
57 * dfs_dump_lprops: "dump lprops" debugfs knob 61 * @dfs_dump_lprops: "dump lprops" debugfs knob
58 * dfs_dump_budg: "dump budgeting information" debugfs knob 62 * @dfs_dump_budg: "dump budgeting information" debugfs knob
59 * dfs_dump_tnc: "dump TNC" debugfs knob 63 * @dfs_dump_tnc: "dump TNC" debugfs knob
60 */ 64 */
61struct ubifs_debug_info { 65struct ubifs_debug_info {
62 struct ubifs_zbranch old_zroot; 66 struct ubifs_zbranch old_zroot;
@@ -76,7 +80,9 @@ struct ubifs_debug_info {
76 int new_ihead_offs; 80 int new_ihead_offs;
77 81
78 struct ubifs_lp_stats saved_lst; 82 struct ubifs_lp_stats saved_lst;
83 struct ubifs_budg_info saved_bi;
79 long long saved_free; 84 long long saved_free;
85 int saved_idx_gc_cnt;
80 86
81 char dfs_dir_name[100]; 87 char dfs_dir_name[100];
82 struct dentry *dfs_dir; 88 struct dentry *dfs_dir;
@@ -101,23 +107,7 @@ struct ubifs_debug_info {
101 } \ 107 } \
102} while (0) 108} while (0)
103 109
104#define dbg_dump_stack() do { \ 110#define dbg_dump_stack() dump_stack()
105 if (!dbg_failure_mode) \
106 dump_stack(); \
107} while (0)
108
109/* Generic debugging messages */
110#define dbg_msg(fmt, ...) do { \
111 spin_lock(&dbg_lock); \
112 printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid, \
113 __func__, ##__VA_ARGS__); \
114 spin_unlock(&dbg_lock); \
115} while (0)
116
117#define dbg_do_msg(typ, fmt, ...) do { \
118 if (ubifs_msg_flags & typ) \
119 dbg_msg(fmt, ##__VA_ARGS__); \
120} while (0)
121 111
122#define dbg_err(fmt, ...) do { \ 112#define dbg_err(fmt, ...) do { \
123 spin_lock(&dbg_lock); \ 113 spin_lock(&dbg_lock); \
@@ -137,77 +127,40 @@ const char *dbg_key_str1(const struct ubifs_info *c,
137#define DBGKEY(key) dbg_key_str0(c, (key)) 127#define DBGKEY(key) dbg_key_str0(c, (key))
138#define DBGKEY1(key) dbg_key_str1(c, (key)) 128#define DBGKEY1(key) dbg_key_str1(c, (key))
139 129
140/* General messages */ 130#define ubifs_dbg_msg(type, fmt, ...) do { \
141#define dbg_gen(fmt, ...) dbg_do_msg(UBIFS_MSG_GEN, fmt, ##__VA_ARGS__) 131 spin_lock(&dbg_lock); \
132 pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
133 spin_unlock(&dbg_lock); \
134} while (0)
142 135
136/* Just a debugging messages not related to any specific UBIFS subsystem */
137#define dbg_msg(fmt, ...) ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
138/* General messages */
139#define dbg_gen(fmt, ...) ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
143/* Additional journal messages */ 140/* Additional journal messages */
144#define dbg_jnl(fmt, ...) dbg_do_msg(UBIFS_MSG_JNL, fmt, ##__VA_ARGS__) 141#define dbg_jnl(fmt, ...) ubifs_dbg_msg("jnl", fmt, ##__VA_ARGS__)
145
146/* Additional TNC messages */ 142/* Additional TNC messages */
147#define dbg_tnc(fmt, ...) dbg_do_msg(UBIFS_MSG_TNC, fmt, ##__VA_ARGS__) 143#define dbg_tnc(fmt, ...) ubifs_dbg_msg("tnc", fmt, ##__VA_ARGS__)
148
149/* Additional lprops messages */ 144/* Additional lprops messages */
150#define dbg_lp(fmt, ...) dbg_do_msg(UBIFS_MSG_LP, fmt, ##__VA_ARGS__) 145#define dbg_lp(fmt, ...) ubifs_dbg_msg("lp", fmt, ##__VA_ARGS__)
151
152/* Additional LEB find messages */ 146/* Additional LEB find messages */
153#define dbg_find(fmt, ...) dbg_do_msg(UBIFS_MSG_FIND, fmt, ##__VA_ARGS__) 147#define dbg_find(fmt, ...) ubifs_dbg_msg("find", fmt, ##__VA_ARGS__)
154
155/* Additional mount messages */ 148/* Additional mount messages */
156#define dbg_mnt(fmt, ...) dbg_do_msg(UBIFS_MSG_MNT, fmt, ##__VA_ARGS__) 149#define dbg_mnt(fmt, ...) ubifs_dbg_msg("mnt", fmt, ##__VA_ARGS__)
157
158/* Additional I/O messages */ 150/* Additional I/O messages */
159#define dbg_io(fmt, ...) dbg_do_msg(UBIFS_MSG_IO, fmt, ##__VA_ARGS__) 151#define dbg_io(fmt, ...) ubifs_dbg_msg("io", fmt, ##__VA_ARGS__)
160
161/* Additional commit messages */ 152/* Additional commit messages */
162#define dbg_cmt(fmt, ...) dbg_do_msg(UBIFS_MSG_CMT, fmt, ##__VA_ARGS__) 153#define dbg_cmt(fmt, ...) ubifs_dbg_msg("cmt", fmt, ##__VA_ARGS__)
163
164/* Additional budgeting messages */ 154/* Additional budgeting messages */
165#define dbg_budg(fmt, ...) dbg_do_msg(UBIFS_MSG_BUDG, fmt, ##__VA_ARGS__) 155#define dbg_budg(fmt, ...) ubifs_dbg_msg("budg", fmt, ##__VA_ARGS__)
166
167/* Additional log messages */ 156/* Additional log messages */
168#define dbg_log(fmt, ...) dbg_do_msg(UBIFS_MSG_LOG, fmt, ##__VA_ARGS__) 157#define dbg_log(fmt, ...) ubifs_dbg_msg("log", fmt, ##__VA_ARGS__)
169
170/* Additional gc messages */ 158/* Additional gc messages */
171#define dbg_gc(fmt, ...) dbg_do_msg(UBIFS_MSG_GC, fmt, ##__VA_ARGS__) 159#define dbg_gc(fmt, ...) ubifs_dbg_msg("gc", fmt, ##__VA_ARGS__)
172
173/* Additional scan messages */ 160/* Additional scan messages */
174#define dbg_scan(fmt, ...) dbg_do_msg(UBIFS_MSG_SCAN, fmt, ##__VA_ARGS__) 161#define dbg_scan(fmt, ...) ubifs_dbg_msg("scan", fmt, ##__VA_ARGS__)
175
176/* Additional recovery messages */ 162/* Additional recovery messages */
177#define dbg_rcvry(fmt, ...) dbg_do_msg(UBIFS_MSG_RCVRY, fmt, ##__VA_ARGS__) 163#define dbg_rcvry(fmt, ...) ubifs_dbg_msg("rcvry", fmt, ##__VA_ARGS__)
178
179/*
180 * Debugging message type flags.
181 *
182 * UBIFS_MSG_GEN: general messages
183 * UBIFS_MSG_JNL: journal messages
184 * UBIFS_MSG_MNT: mount messages
185 * UBIFS_MSG_CMT: commit messages
186 * UBIFS_MSG_FIND: LEB find messages
187 * UBIFS_MSG_BUDG: budgeting messages
188 * UBIFS_MSG_GC: garbage collection messages
189 * UBIFS_MSG_TNC: TNC messages
190 * UBIFS_MSG_LP: lprops messages
191 * UBIFS_MSG_IO: I/O messages
192 * UBIFS_MSG_LOG: log messages
193 * UBIFS_MSG_SCAN: scan messages
194 * UBIFS_MSG_RCVRY: recovery messages
195 */
196enum {
197 UBIFS_MSG_GEN = 0x1,
198 UBIFS_MSG_JNL = 0x2,
199 UBIFS_MSG_MNT = 0x4,
200 UBIFS_MSG_CMT = 0x8,
201 UBIFS_MSG_FIND = 0x10,
202 UBIFS_MSG_BUDG = 0x20,
203 UBIFS_MSG_GC = 0x40,
204 UBIFS_MSG_TNC = 0x80,
205 UBIFS_MSG_LP = 0x100,
206 UBIFS_MSG_IO = 0x200,
207 UBIFS_MSG_LOG = 0x400,
208 UBIFS_MSG_SCAN = 0x800,
209 UBIFS_MSG_RCVRY = 0x1000,
210};
211 164
212/* 165/*
213 * Debugging check flags. 166 * Debugging check flags.
@@ -233,11 +186,9 @@ enum {
233/* 186/*
234 * Special testing flags. 187 * Special testing flags.
235 * 188 *
236 * UBIFS_TST_FORCE_IN_THE_GAPS: force the use of in-the-gaps method
237 * UBIFS_TST_RCVRY: failure mode for recovery testing 189 * UBIFS_TST_RCVRY: failure mode for recovery testing
238 */ 190 */
239enum { 191enum {
240 UBIFS_TST_FORCE_IN_THE_GAPS = 0x2,
241 UBIFS_TST_RCVRY = 0x4, 192 UBIFS_TST_RCVRY = 0x4,
242}; 193};
243 194
@@ -262,7 +213,7 @@ void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum,
262 int offs); 213 int offs);
263void dbg_dump_budget_req(const struct ubifs_budget_req *req); 214void dbg_dump_budget_req(const struct ubifs_budget_req *req);
264void dbg_dump_lstats(const struct ubifs_lp_stats *lst); 215void dbg_dump_lstats(const struct ubifs_lp_stats *lst);
265void dbg_dump_budg(struct ubifs_info *c); 216void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi);
266void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp); 217void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp);
267void dbg_dump_lprops(struct ubifs_info *c); 218void dbg_dump_lprops(struct ubifs_info *c);
268void dbg_dump_lpt_info(struct ubifs_info *c); 219void dbg_dump_lpt_info(struct ubifs_info *c);
@@ -304,18 +255,16 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head);
304int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); 255int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head);
305 256
306/* Force the use of in-the-gaps method for testing */ 257/* Force the use of in-the-gaps method for testing */
307 258static inline int dbg_force_in_the_gaps_enabled(void)
308#define dbg_force_in_the_gaps_enabled \ 259{
309 (ubifs_tst_flags & UBIFS_TST_FORCE_IN_THE_GAPS) 260 return ubifs_chk_flags & UBIFS_CHK_GEN;
310 261}
311int dbg_force_in_the_gaps(void); 262int dbg_force_in_the_gaps(void);
312 263
313/* Failure mode for recovery testing */ 264/* Failure mode for recovery testing */
314
315#define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY) 265#define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY)
316 266
317#ifndef UBIFS_DBG_PRESERVE_UBI 267#ifndef UBIFS_DBG_PRESERVE_UBI
318
319#define ubi_leb_read dbg_leb_read 268#define ubi_leb_read dbg_leb_read
320#define ubi_leb_write dbg_leb_write 269#define ubi_leb_write dbg_leb_write
321#define ubi_leb_change dbg_leb_change 270#define ubi_leb_change dbg_leb_change
@@ -323,7 +272,6 @@ int dbg_force_in_the_gaps(void);
323#define ubi_leb_unmap dbg_leb_unmap 272#define ubi_leb_unmap dbg_leb_unmap
324#define ubi_is_mapped dbg_is_mapped 273#define ubi_is_mapped dbg_is_mapped
325#define ubi_leb_map dbg_leb_map 274#define ubi_leb_map dbg_leb_map
326
327#endif 275#endif
328 276
329int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, 277int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
@@ -370,33 +318,33 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
370 __func__, __LINE__, current->pid); \ 318 __func__, __LINE__, current->pid); \
371} while (0) 319} while (0)
372 320
373#define dbg_err(fmt, ...) do { \ 321#define dbg_err(fmt, ...) do { \
374 if (0) \ 322 if (0) \
375 ubifs_err(fmt, ##__VA_ARGS__); \ 323 ubifs_err(fmt, ##__VA_ARGS__); \
376} while (0) 324} while (0)
377 325
378#define dbg_msg(fmt, ...) do { \ 326#define ubifs_dbg_msg(fmt, ...) do { \
379 if (0) \ 327 if (0) \
380 printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", \ 328 pr_debug(fmt "\n", ##__VA_ARGS__); \
381 current->pid, __func__, ##__VA_ARGS__); \
382} while (0) 329} while (0)
383 330
384#define dbg_dump_stack() 331#define dbg_dump_stack()
385#define ubifs_assert_cmt_locked(c) 332#define ubifs_assert_cmt_locked(c)
386 333
387#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 334#define dbg_msg(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
388#define dbg_jnl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 335#define dbg_gen(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
389#define dbg_tnc(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 336#define dbg_jnl(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
390#define dbg_lp(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 337#define dbg_tnc(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
391#define dbg_find(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 338#define dbg_lp(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
392#define dbg_mnt(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 339#define dbg_find(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
393#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 340#define dbg_mnt(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
394#define dbg_cmt(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 341#define dbg_io(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
395#define dbg_budg(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 342#define dbg_cmt(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
396#define dbg_log(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 343#define dbg_budg(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
397#define dbg_gc(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 344#define dbg_log(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
398#define dbg_scan(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 345#define dbg_gc(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
399#define dbg_rcvry(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 346#define dbg_scan(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
347#define dbg_rcvry(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
400 348
401#define DBGKEY(key) ((char *)(key)) 349#define DBGKEY(key) ((char *)(key))
402#define DBGKEY1(key) ((char *)(key)) 350#define DBGKEY1(key) ((char *)(key))
@@ -420,7 +368,9 @@ static inline void
420dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; } 368dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; }
421static inline void 369static inline void
422dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; } 370dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; }
423static inline void dbg_dump_budg(struct ubifs_info *c) { return; } 371static inline void
372dbg_dump_budg(struct ubifs_info *c,
373 const struct ubifs_budg_info *bi) { return; }
424static inline void dbg_dump_lprop(const struct ubifs_info *c, 374static inline void dbg_dump_lprop(const struct ubifs_info *c,
425 const struct ubifs_lprops *lp) { return; } 375 const struct ubifs_lprops *lp) { return; }
426static inline void dbg_dump_lprops(struct ubifs_info *c) { return; } 376static inline void dbg_dump_lprops(struct ubifs_info *c) { return; }
@@ -482,8 +432,8 @@ dbg_check_nondata_nodes_order(struct ubifs_info *c,
482 struct list_head *head) { return 0; } 432 struct list_head *head) { return 0; }
483 433
484static inline int dbg_force_in_the_gaps(void) { return 0; } 434static inline int dbg_force_in_the_gaps(void) { return 0; }
485#define dbg_force_in_the_gaps_enabled 0 435#define dbg_force_in_the_gaps_enabled() 0
486#define dbg_failure_mode 0 436#define dbg_failure_mode 0
487 437
488static inline int dbg_debugfs_init(void) { return 0; } 438static inline int dbg_debugfs_init(void) { return 0; }
489static inline void dbg_debugfs_exit(void) { return; } 439static inline void dbg_debugfs_exit(void) { return; }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 7217d67a80a6..ef5abd38f0bf 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -603,7 +603,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
603 ubifs_release_budget(c, &req); 603 ubifs_release_budget(c, &req);
604 else { 604 else {
605 /* We've deleted something - clean the "no space" flags */ 605 /* We've deleted something - clean the "no space" flags */
606 c->nospace = c->nospace_rp = 0; 606 c->bi.nospace = c->bi.nospace_rp = 0;
607 smp_wmb(); 607 smp_wmb();
608 } 608 }
609 return 0; 609 return 0;
@@ -693,7 +693,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
693 ubifs_release_budget(c, &req); 693 ubifs_release_budget(c, &req);
694 else { 694 else {
695 /* We've deleted something - clean the "no space" flags */ 695 /* We've deleted something - clean the "no space" flags */
696 c->nospace = c->nospace_rp = 0; 696 c->bi.nospace = c->bi.nospace_rp = 0;
697 smp_wmb(); 697 smp_wmb();
698 } 698 }
699 return 0; 699 return 0;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b286db79c686..5e7fccfc4b29 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -212,7 +212,7 @@ static void release_new_page_budget(struct ubifs_info *c)
212 */ 212 */
213static void release_existing_page_budget(struct ubifs_info *c) 213static void release_existing_page_budget(struct ubifs_info *c)
214{ 214{
215 struct ubifs_budget_req req = { .dd_growth = c->page_budget}; 215 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
216 216
217 ubifs_release_budget(c, &req); 217 ubifs_release_budget(c, &req);
218} 218}
@@ -971,11 +971,11 @@ static int do_writepage(struct page *page, int len)
971 * the page locked, and it locks @ui_mutex. However, write-back does take inode 971 * the page locked, and it locks @ui_mutex. However, write-back does take inode
972 * @i_mutex, which means other VFS operations may be run on this inode at the 972 * @i_mutex, which means other VFS operations may be run on this inode at the
973 * same time. And the problematic one is truncation to smaller size, from where 973 * same time. And the problematic one is truncation to smaller size, from where
974 * we have to call 'truncate_setsize()', which first changes @inode->i_size, then 974 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
975 * drops the truncated pages. And while dropping the pages, it takes the page 975 * then drops the truncated pages. And while dropping the pages, it takes the
976 * lock. This means that 'do_truncation()' cannot call 'truncate_setsize()' with 976 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
977 * @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This 977 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
978 * means that @inode->i_size is changed while @ui_mutex is unlocked. 978 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
979 * 979 *
980 * XXX(truncate): with the new truncate sequence this is not true anymore, 980 * XXX(truncate): with the new truncate sequence this is not true anymore,
981 * and the calls to truncate_setsize can be move around freely. They should 981 * and the calls to truncate_setsize can be move around freely. They should
@@ -1189,7 +1189,7 @@ out_budg:
1189 if (budgeted) 1189 if (budgeted)
1190 ubifs_release_budget(c, &req); 1190 ubifs_release_budget(c, &req);
1191 else { 1191 else {
1192 c->nospace = c->nospace_rp = 0; 1192 c->bi.nospace = c->bi.nospace_rp = 0;
1193 smp_wmb(); 1193 smp_wmb();
1194 } 1194 }
1195 return err; 1195 return err;
@@ -1312,7 +1312,11 @@ int ubifs_fsync(struct file *file, int datasync)
1312 1312
1313 dbg_gen("syncing inode %lu", inode->i_ino); 1313 dbg_gen("syncing inode %lu", inode->i_ino);
1314 1314
1315 if (inode->i_sb->s_flags & MS_RDONLY) 1315 if (c->ro_mount)
1316 /*
1317 * For some really strange reasons VFS does not filter out
1318 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1319 */
1316 return 0; 1320 return 0;
1317 1321
1318 /* 1322 /*
@@ -1432,10 +1436,11 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1432} 1436}
1433 1437
1434/* 1438/*
1435 * mmap()d file has taken write protection fault and is being made 1439 * mmap()d file has taken write protection fault and is being made writable.
1436 * writable. UBIFS must ensure page is budgeted for. 1440 * UBIFS must ensure page is budgeted for.
1437 */ 1441 */
1438static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1442static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
1443 struct vm_fault *vmf)
1439{ 1444{
1440 struct page *page = vmf->page; 1445 struct page *page = vmf->page;
1441 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1446 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
@@ -1536,7 +1541,6 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1536{ 1541{
1537 int err; 1542 int err;
1538 1543
1539 /* 'generic_file_mmap()' takes care of NOMMU case */
1540 err = generic_file_mmap(file, vma); 1544 err = generic_file_mmap(file, vma);
1541 if (err) 1545 if (err)
1542 return err; 1546 return err;
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 1d54383d1269..2559d174e004 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -252,8 +252,8 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
252 * But if the index takes fewer LEBs than it is reserved for it, 252 * But if the index takes fewer LEBs than it is reserved for it,
253 * this function must avoid picking those reserved LEBs. 253 * this function must avoid picking those reserved LEBs.
254 */ 254 */
255 if (c->min_idx_lebs >= c->lst.idx_lebs) { 255 if (c->bi.min_idx_lebs >= c->lst.idx_lebs) {
256 rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; 256 rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
257 exclude_index = 1; 257 exclude_index = 1;
258 } 258 }
259 spin_unlock(&c->space_lock); 259 spin_unlock(&c->space_lock);
@@ -276,7 +276,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
276 pick_free = 0; 276 pick_free = 0;
277 } else { 277 } else {
278 spin_lock(&c->space_lock); 278 spin_lock(&c->space_lock);
279 exclude_index = (c->min_idx_lebs >= c->lst.idx_lebs); 279 exclude_index = (c->bi.min_idx_lebs >= c->lst.idx_lebs);
280 spin_unlock(&c->space_lock); 280 spin_unlock(&c->space_lock);
281 } 281 }
282 282
@@ -501,8 +501,8 @@ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs,
501 501
502 /* Check if there are enough empty LEBs for commit */ 502 /* Check if there are enough empty LEBs for commit */
503 spin_lock(&c->space_lock); 503 spin_lock(&c->space_lock);
504 if (c->min_idx_lebs > c->lst.idx_lebs) 504 if (c->bi.min_idx_lebs > c->lst.idx_lebs)
505 rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; 505 rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
506 else 506 else
507 rsvd_idx_lebs = 0; 507 rsvd_idx_lebs = 0;
508 lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - 508 lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 151f10882820..ded29f6224c2 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -100,6 +100,10 @@ static int switch_gc_head(struct ubifs_info *c)
100 if (err) 100 if (err)
101 return err; 101 return err;
102 102
103 err = ubifs_wbuf_sync_nolock(wbuf);
104 if (err)
105 return err;
106
103 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0); 107 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
104 if (err) 108 if (err)
105 return err; 109 return err;
@@ -118,7 +122,7 @@ static int switch_gc_head(struct ubifs_info *c)
118 * This function compares data nodes @a and @b. Returns %1 if @a has greater 122 * This function compares data nodes @a and @b. Returns %1 if @a has greater
119 * inode or block number, and %-1 otherwise. 123 * inode or block number, and %-1 otherwise.
120 */ 124 */
121int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) 125static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
122{ 126{
123 ino_t inuma, inumb; 127 ino_t inuma, inumb;
124 struct ubifs_info *c = priv; 128 struct ubifs_info *c = priv;
@@ -161,7 +165,8 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
161 * first and sorted by length in descending order. Directory entry nodes go 165 * first and sorted by length in descending order. Directory entry nodes go
162 * after inode nodes and are sorted in ascending hash valuer order. 166 * after inode nodes and are sorted in ascending hash valuer order.
163 */ 167 */
164int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) 168static int nondata_nodes_cmp(void *priv, struct list_head *a,
169 struct list_head *b)
165{ 170{
166 ino_t inuma, inumb; 171 ino_t inuma, inumb;
167 struct ubifs_info *c = priv; 172 struct ubifs_info *c = priv;
@@ -473,6 +478,37 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
473 ubifs_assert(c->gc_lnum != lnum); 478 ubifs_assert(c->gc_lnum != lnum);
474 ubifs_assert(wbuf->lnum != lnum); 479 ubifs_assert(wbuf->lnum != lnum);
475 480
481 if (lp->free + lp->dirty == c->leb_size) {
482 /* Special case - a free LEB */
483 dbg_gc("LEB %d is free, return it", lp->lnum);
484 ubifs_assert(!(lp->flags & LPROPS_INDEX));
485
486 if (lp->free != c->leb_size) {
487 /*
488 * Write buffers must be sync'd before unmapping
489 * freeable LEBs, because one of them may contain data
490 * which obsoletes something in 'lp->pnum'.
491 */
492 err = gc_sync_wbufs(c);
493 if (err)
494 return err;
495 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
496 0, 0, 0, 0);
497 if (err)
498 return err;
499 }
500 err = ubifs_leb_unmap(c, lp->lnum);
501 if (err)
502 return err;
503
504 if (c->gc_lnum == -1) {
505 c->gc_lnum = lnum;
506 return LEB_RETAINED;
507 }
508
509 return LEB_FREED;
510 }
511
476 /* 512 /*
477 * We scan the entire LEB even though we only really need to scan up to 513 * We scan the entire LEB even though we only really need to scan up to
478 * (c->leb_size - lp->free). 514 * (c->leb_size - lp->free).
@@ -682,37 +718,6 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
682 "(min. space %d)", lp.lnum, lp.free, lp.dirty, 718 "(min. space %d)", lp.lnum, lp.free, lp.dirty,
683 lp.free + lp.dirty, min_space); 719 lp.free + lp.dirty, min_space);
684 720
685 if (lp.free + lp.dirty == c->leb_size) {
686 /* An empty LEB was returned */
687 dbg_gc("LEB %d is free, return it", lp.lnum);
688 /*
689 * ubifs_find_dirty_leb() doesn't return freeable index
690 * LEBs.
691 */
692 ubifs_assert(!(lp.flags & LPROPS_INDEX));
693 if (lp.free != c->leb_size) {
694 /*
695 * Write buffers must be sync'd before
696 * unmapping freeable LEBs, because one of them
697 * may contain data which obsoletes something
698 * in 'lp.pnum'.
699 */
700 ret = gc_sync_wbufs(c);
701 if (ret)
702 goto out;
703 ret = ubifs_change_one_lp(c, lp.lnum,
704 c->leb_size, 0, 0, 0,
705 0);
706 if (ret)
707 goto out;
708 }
709 ret = ubifs_leb_unmap(c, lp.lnum);
710 if (ret)
711 goto out;
712 ret = lp.lnum;
713 break;
714 }
715
716 space_before = c->leb_size - wbuf->offs - wbuf->used; 721 space_before = c->leb_size - wbuf->offs - wbuf->used;
717 if (wbuf->lnum == -1) 722 if (wbuf->lnum == -1)
718 space_before = 0; 723 space_before = 0;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index dfd168b7807e..166951e0dcd3 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -393,7 +393,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
393 ubifs_assert(wbuf->size % c->min_io_size == 0); 393 ubifs_assert(wbuf->size % c->min_io_size == 0);
394 ubifs_assert(!c->ro_media && !c->ro_mount); 394 ubifs_assert(!c->ro_media && !c->ro_mount);
395 if (c->leb_size - wbuf->offs >= c->max_write_size) 395 if (c->leb_size - wbuf->offs >= c->max_write_size)
396 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size )); 396 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
397 397
398 if (c->ro_error) 398 if (c->ro_error)
399 return -EROFS; 399 return -EROFS;
@@ -452,8 +452,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
452 * @dtype: data type 452 * @dtype: data type
453 * 453 *
454 * This function targets the write-buffer to logical eraseblock @lnum:@offs. 454 * This function targets the write-buffer to logical eraseblock @lnum:@offs.
455 * The write-buffer is synchronized if it is not empty. Returns zero in case of 455 * The write-buffer has to be empty. Returns zero in case of success and a
456 * success and a negative error code in case of failure. 456 * negative error code in case of failure.
457 */ 457 */
458int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, 458int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
459 int dtype) 459 int dtype)
@@ -465,13 +465,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
465 ubifs_assert(offs >= 0 && offs <= c->leb_size); 465 ubifs_assert(offs >= 0 && offs <= c->leb_size);
466 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); 466 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
467 ubifs_assert(lnum != wbuf->lnum); 467 ubifs_assert(lnum != wbuf->lnum);
468 468 ubifs_assert(wbuf->used == 0);
469 if (wbuf->used > 0) {
470 int err = ubifs_wbuf_sync_nolock(wbuf);
471
472 if (err)
473 return err;
474 }
475 469
476 spin_lock(&wbuf->lock); 470 spin_lock(&wbuf->lock);
477 wbuf->lnum = lnum; 471 wbuf->lnum = lnum;
@@ -573,7 +567,7 @@ out_timers:
573int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) 567int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
574{ 568{
575 struct ubifs_info *c = wbuf->c; 569 struct ubifs_info *c = wbuf->c;
576 int err, written, n, aligned_len = ALIGN(len, 8), offs; 570 int err, written, n, aligned_len = ALIGN(len, 8);
577 571
578 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, 572 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
579 dbg_ntype(((struct ubifs_ch *)buf)->node_type), 573 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
@@ -588,7 +582,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
588 ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); 582 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
589 ubifs_assert(!c->ro_media && !c->ro_mount); 583 ubifs_assert(!c->ro_media && !c->ro_mount);
590 if (c->leb_size - wbuf->offs >= c->max_write_size) 584 if (c->leb_size - wbuf->offs >= c->max_write_size)
591 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size )); 585 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
592 586
593 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { 587 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
594 err = -ENOSPC; 588 err = -ENOSPC;
@@ -636,7 +630,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
636 goto exit; 630 goto exit;
637 } 631 }
638 632
639 offs = wbuf->offs;
640 written = 0; 633 written = 0;
641 634
642 if (wbuf->used) { 635 if (wbuf->used) {
@@ -653,7 +646,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
653 if (err) 646 if (err)
654 goto out; 647 goto out;
655 648
656 offs += wbuf->size; 649 wbuf->offs += wbuf->size;
657 len -= wbuf->avail; 650 len -= wbuf->avail;
658 aligned_len -= wbuf->avail; 651 aligned_len -= wbuf->avail;
659 written += wbuf->avail; 652 written += wbuf->avail;
@@ -672,7 +665,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
672 if (err) 665 if (err)
673 goto out; 666 goto out;
674 667
675 offs += wbuf->size; 668 wbuf->offs += wbuf->size;
676 len -= wbuf->size; 669 len -= wbuf->size;
677 aligned_len -= wbuf->size; 670 aligned_len -= wbuf->size;
678 written += wbuf->size; 671 written += wbuf->size;
@@ -687,12 +680,13 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
687 n = aligned_len >> c->max_write_shift; 680 n = aligned_len >> c->max_write_shift;
688 if (n) { 681 if (n) {
689 n <<= c->max_write_shift; 682 n <<= c->max_write_shift;
690 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, offs); 683 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
691 err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, offs, n, 684 wbuf->offs);
692 wbuf->dtype); 685 err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written,
686 wbuf->offs, n, wbuf->dtype);
693 if (err) 687 if (err)
694 goto out; 688 goto out;
695 offs += n; 689 wbuf->offs += n;
696 aligned_len -= n; 690 aligned_len -= n;
697 len -= n; 691 len -= n;
698 written += n; 692 written += n;
@@ -707,7 +701,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
707 */ 701 */
708 memcpy(wbuf->buf, buf + written, len); 702 memcpy(wbuf->buf, buf + written, len);
709 703
710 wbuf->offs = offs;
711 if (c->leb_size - wbuf->offs >= c->max_write_size) 704 if (c->leb_size - wbuf->offs >= c->max_write_size)
712 wbuf->size = c->max_write_size; 705 wbuf->size = c->max_write_size;
713 else 706 else
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index aed25e864227..34b1679e6e3a 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -141,14 +141,8 @@ again:
141 * LEB with some empty space. 141 * LEB with some empty space.
142 */ 142 */
143 lnum = ubifs_find_free_space(c, len, &offs, squeeze); 143 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
144 if (lnum >= 0) { 144 if (lnum >= 0)
145 /* Found an LEB, add it to the journal head */
146 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
147 if (err)
148 goto out_return;
149 /* A new bud was successfully allocated and added to the log */
150 goto out; 145 goto out;
151 }
152 146
153 err = lnum; 147 err = lnum;
154 if (err != -ENOSPC) 148 if (err != -ENOSPC)
@@ -203,12 +197,23 @@ again:
203 return 0; 197 return 0;
204 } 198 }
205 199
206 err = ubifs_add_bud_to_log(c, jhead, lnum, 0);
207 if (err)
208 goto out_return;
209 offs = 0; 200 offs = 0;
210 201
211out: 202out:
203 /*
204 * Make sure we synchronize the write-buffer before we add the new bud
205 * to the log. Otherwise we may have a power cut after the log
206 * reference node for the last bud (@lnum) is written but before the
207 * write-buffer data are written to the next-to-last bud
208 * (@wbuf->lnum). And the effect would be that the recovery would see
209 * that there is corruption in the next-to-last bud.
210 */
211 err = ubifs_wbuf_sync_nolock(wbuf);
212 if (err)
213 goto out_return;
214 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
215 if (err)
216 goto out_return;
212 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype); 217 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype);
213 if (err) 218 if (err)
214 goto out_unlock; 219 goto out_unlock;
@@ -380,10 +385,8 @@ out:
380 if (err == -ENOSPC) { 385 if (err == -ENOSPC) {
381 /* This are some budgeting problems, print useful information */ 386 /* This are some budgeting problems, print useful information */
382 down_write(&c->commit_sem); 387 down_write(&c->commit_sem);
383 spin_lock(&c->space_lock);
384 dbg_dump_stack(); 388 dbg_dump_stack();
385 dbg_dump_budg(c); 389 dbg_dump_budg(c, &c->bi);
386 spin_unlock(&c->space_lock);
387 dbg_dump_lprops(c); 390 dbg_dump_lprops(c);
388 cmt_retries = dbg_check_lprops(c); 391 cmt_retries = dbg_check_lprops(c);
389 up_write(&c->commit_sem); 392 up_write(&c->commit_sem);
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index 40fa780ebea7..affea9494ae2 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -100,20 +100,6 @@ struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
100} 100}
101 101
102/** 102/**
103 * next_log_lnum - switch to the next log LEB.
104 * @c: UBIFS file-system description object
105 * @lnum: current log LEB
106 */
107static inline int next_log_lnum(const struct ubifs_info *c, int lnum)
108{
109 lnum += 1;
110 if (lnum > c->log_last)
111 lnum = UBIFS_LOG_LNUM;
112
113 return lnum;
114}
115
116/**
117 * empty_log_bytes - calculate amount of empty space in the log. 103 * empty_log_bytes - calculate amount of empty space in the log.
118 * @c: UBIFS file-system description object 104 * @c: UBIFS file-system description object
119 */ 105 */
@@ -257,7 +243,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
257 ref->jhead = cpu_to_le32(jhead); 243 ref->jhead = cpu_to_le32(jhead);
258 244
259 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { 245 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
260 c->lhead_lnum = next_log_lnum(c, c->lhead_lnum); 246 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
261 c->lhead_offs = 0; 247 c->lhead_offs = 0;
262 } 248 }
263 249
@@ -425,7 +411,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
425 411
426 /* Switch to the next log LEB */ 412 /* Switch to the next log LEB */
427 if (c->lhead_offs) { 413 if (c->lhead_offs) {
428 c->lhead_lnum = next_log_lnum(c, c->lhead_lnum); 414 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
429 c->lhead_offs = 0; 415 c->lhead_offs = 0;
430 } 416 }
431 417
@@ -446,7 +432,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
446 432
447 c->lhead_offs += len; 433 c->lhead_offs += len;
448 if (c->lhead_offs == c->leb_size) { 434 if (c->lhead_offs == c->leb_size) {
449 c->lhead_lnum = next_log_lnum(c, c->lhead_lnum); 435 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
450 c->lhead_offs = 0; 436 c->lhead_offs = 0;
451 } 437 }
452 438
@@ -533,7 +519,7 @@ int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
533 } 519 }
534 mutex_lock(&c->log_mutex); 520 mutex_lock(&c->log_mutex);
535 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum; 521 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
536 lnum = next_log_lnum(c, lnum)) { 522 lnum = ubifs_next_log_lnum(c, lnum)) {
537 dbg_log("unmap log LEB %d", lnum); 523 dbg_log("unmap log LEB %d", lnum);
538 err = ubifs_leb_unmap(c, lnum); 524 err = ubifs_leb_unmap(c, lnum);
539 if (err) 525 if (err)
@@ -642,7 +628,7 @@ static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
642 err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM); 628 err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM);
643 if (err) 629 if (err)
644 return err; 630 return err;
645 *lnum = next_log_lnum(c, *lnum); 631 *lnum = ubifs_next_log_lnum(c, *lnum);
646 *offs = 0; 632 *offs = 0;
647 } 633 }
648 memcpy(buf + *offs, node, len); 634 memcpy(buf + *offs, node, len);
@@ -712,7 +698,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
712 ubifs_scan_destroy(sleb); 698 ubifs_scan_destroy(sleb);
713 if (lnum == c->lhead_lnum) 699 if (lnum == c->lhead_lnum)
714 break; 700 break;
715 lnum = next_log_lnum(c, lnum); 701 lnum = ubifs_next_log_lnum(c, lnum);
716 } 702 }
717 if (offs) { 703 if (offs) {
718 int sz = ALIGN(offs, c->min_io_size); 704 int sz = ALIGN(offs, c->min_io_size);
@@ -732,7 +718,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
732 /* Unmap remaining LEBs */ 718 /* Unmap remaining LEBs */
733 lnum = write_lnum; 719 lnum = write_lnum;
734 do { 720 do {
735 lnum = next_log_lnum(c, lnum); 721 lnum = ubifs_next_log_lnum(c, lnum);
736 err = ubifs_leb_unmap(c, lnum); 722 err = ubifs_leb_unmap(c, lnum);
737 if (err) 723 if (err)
738 return err; 724 return err;
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 0ee0847f2421..667884f4a615 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1007,21 +1007,11 @@ out:
1007} 1007}
1008 1008
1009/** 1009/**
1010 * struct scan_check_data - data provided to scan callback function.
1011 * @lst: LEB properties statistics
1012 * @err: error code
1013 */
1014struct scan_check_data {
1015 struct ubifs_lp_stats lst;
1016 int err;
1017};
1018
1019/**
1020 * scan_check_cb - scan callback. 1010 * scan_check_cb - scan callback.
1021 * @c: the UBIFS file-system description object 1011 * @c: the UBIFS file-system description object
1022 * @lp: LEB properties to scan 1012 * @lp: LEB properties to scan
1023 * @in_tree: whether the LEB properties are in main memory 1013 * @in_tree: whether the LEB properties are in main memory
1024 * @data: information passed to and from the caller of the scan 1014 * @lst: lprops statistics to update
1025 * 1015 *
1026 * This function returns a code that indicates whether the scan should continue 1016 * This function returns a code that indicates whether the scan should continue
1027 * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree 1017 * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree
@@ -1030,11 +1020,10 @@ struct scan_check_data {
1030 */ 1020 */
1031static int scan_check_cb(struct ubifs_info *c, 1021static int scan_check_cb(struct ubifs_info *c,
1032 const struct ubifs_lprops *lp, int in_tree, 1022 const struct ubifs_lprops *lp, int in_tree,
1033 struct scan_check_data *data) 1023 struct ubifs_lp_stats *lst)
1034{ 1024{
1035 struct ubifs_scan_leb *sleb; 1025 struct ubifs_scan_leb *sleb;
1036 struct ubifs_scan_node *snod; 1026 struct ubifs_scan_node *snod;
1037 struct ubifs_lp_stats *lst = &data->lst;
1038 int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret; 1027 int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret;
1039 void *buf = NULL; 1028 void *buf = NULL;
1040 1029
@@ -1044,7 +1033,7 @@ static int scan_check_cb(struct ubifs_info *c,
1044 if (cat != (lp->flags & LPROPS_CAT_MASK)) { 1033 if (cat != (lp->flags & LPROPS_CAT_MASK)) {
1045 ubifs_err("bad LEB category %d expected %d", 1034 ubifs_err("bad LEB category %d expected %d",
1046 (lp->flags & LPROPS_CAT_MASK), cat); 1035 (lp->flags & LPROPS_CAT_MASK), cat);
1047 goto out; 1036 return -EINVAL;
1048 } 1037 }
1049 } 1038 }
1050 1039
@@ -1078,7 +1067,7 @@ static int scan_check_cb(struct ubifs_info *c,
1078 } 1067 }
1079 if (!found) { 1068 if (!found) {
1080 ubifs_err("bad LPT list (category %d)", cat); 1069 ubifs_err("bad LPT list (category %d)", cat);
1081 goto out; 1070 return -EINVAL;
1082 } 1071 }
1083 } 1072 }
1084 } 1073 }
@@ -1090,45 +1079,40 @@ static int scan_check_cb(struct ubifs_info *c,
1090 if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) || 1079 if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) ||
1091 lp != heap->arr[lp->hpos]) { 1080 lp != heap->arr[lp->hpos]) {
1092 ubifs_err("bad LPT heap (category %d)", cat); 1081 ubifs_err("bad LPT heap (category %d)", cat);
1093 goto out; 1082 return -EINVAL;
1094 } 1083 }
1095 } 1084 }
1096 1085
1097 buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 1086 buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1098 if (!buf) { 1087 if (!buf)
1099 ubifs_err("cannot allocate memory to scan LEB %d", lnum); 1088 return -ENOMEM;
1100 goto out; 1089
1090 /*
1091 * After an unclean unmount, empty and freeable LEBs
1092 * may contain garbage - do not scan them.
1093 */
1094 if (lp->free == c->leb_size) {
1095 lst->empty_lebs += 1;
1096 lst->total_free += c->leb_size;
1097 lst->total_dark += ubifs_calc_dark(c, c->leb_size);
1098 return LPT_SCAN_CONTINUE;
1099 }
1100 if (lp->free + lp->dirty == c->leb_size &&
1101 !(lp->flags & LPROPS_INDEX)) {
1102 lst->total_free += lp->free;
1103 lst->total_dirty += lp->dirty;
1104 lst->total_dark += ubifs_calc_dark(c, c->leb_size);
1105 return LPT_SCAN_CONTINUE;
1101 } 1106 }
1102 1107
1103 sleb = ubifs_scan(c, lnum, 0, buf, 0); 1108 sleb = ubifs_scan(c, lnum, 0, buf, 0);
1104 if (IS_ERR(sleb)) { 1109 if (IS_ERR(sleb)) {
1105 /* 1110 ret = PTR_ERR(sleb);
1106 * After an unclean unmount, empty and freeable LEBs 1111 if (ret == -EUCLEAN) {
1107 * may contain garbage. 1112 dbg_dump_lprops(c);
1108 */ 1113 dbg_dump_budg(c, &c->bi);
1109 if (lp->free == c->leb_size) {
1110 ubifs_err("scan errors were in empty LEB "
1111 "- continuing checking");
1112 lst->empty_lebs += 1;
1113 lst->total_free += c->leb_size;
1114 lst->total_dark += ubifs_calc_dark(c, c->leb_size);
1115 ret = LPT_SCAN_CONTINUE;
1116 goto exit;
1117 }
1118
1119 if (lp->free + lp->dirty == c->leb_size &&
1120 !(lp->flags & LPROPS_INDEX)) {
1121 ubifs_err("scan errors were in freeable LEB "
1122 "- continuing checking");
1123 lst->total_free += lp->free;
1124 lst->total_dirty += lp->dirty;
1125 lst->total_dark += ubifs_calc_dark(c, c->leb_size);
1126 ret = LPT_SCAN_CONTINUE;
1127 goto exit;
1128 } 1114 }
1129 data->err = PTR_ERR(sleb); 1115 goto out;
1130 ret = LPT_SCAN_STOP;
1131 goto exit;
1132 } 1116 }
1133 1117
1134 is_idx = -1; 1118 is_idx = -1;
@@ -1246,10 +1230,8 @@ static int scan_check_cb(struct ubifs_info *c,
1246 } 1230 }
1247 1231
1248 ubifs_scan_destroy(sleb); 1232 ubifs_scan_destroy(sleb);
1249 ret = LPT_SCAN_CONTINUE;
1250exit:
1251 vfree(buf); 1233 vfree(buf);
1252 return ret; 1234 return LPT_SCAN_CONTINUE;
1253 1235
1254out_print: 1236out_print:
1255 ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, " 1237 ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, "
@@ -1258,10 +1240,10 @@ out_print:
1258 dbg_dump_leb(c, lnum); 1240 dbg_dump_leb(c, lnum);
1259out_destroy: 1241out_destroy:
1260 ubifs_scan_destroy(sleb); 1242 ubifs_scan_destroy(sleb);
1243 ret = -EINVAL;
1261out: 1244out:
1262 vfree(buf); 1245 vfree(buf);
1263 data->err = -EINVAL; 1246 return ret;
1264 return LPT_SCAN_STOP;
1265} 1247}
1266 1248
1267/** 1249/**
@@ -1278,8 +1260,7 @@ out:
1278int dbg_check_lprops(struct ubifs_info *c) 1260int dbg_check_lprops(struct ubifs_info *c)
1279{ 1261{
1280 int i, err; 1262 int i, err;
1281 struct scan_check_data data; 1263 struct ubifs_lp_stats lst;
1282 struct ubifs_lp_stats *lst = &data.lst;
1283 1264
1284 if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) 1265 if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
1285 return 0; 1266 return 0;
@@ -1294,29 +1275,23 @@ int dbg_check_lprops(struct ubifs_info *c)
1294 return err; 1275 return err;
1295 } 1276 }
1296 1277
1297 memset(lst, 0, sizeof(struct ubifs_lp_stats)); 1278 memset(&lst, 0, sizeof(struct ubifs_lp_stats));
1298
1299 data.err = 0;
1300 err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1, 1279 err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1,
1301 (ubifs_lpt_scan_callback)scan_check_cb, 1280 (ubifs_lpt_scan_callback)scan_check_cb,
1302 &data); 1281 &lst);
1303 if (err && err != -ENOSPC) 1282 if (err && err != -ENOSPC)
1304 goto out; 1283 goto out;
1305 if (data.err) {
1306 err = data.err;
1307 goto out;
1308 }
1309 1284
1310 if (lst->empty_lebs != c->lst.empty_lebs || 1285 if (lst.empty_lebs != c->lst.empty_lebs ||
1311 lst->idx_lebs != c->lst.idx_lebs || 1286 lst.idx_lebs != c->lst.idx_lebs ||
1312 lst->total_free != c->lst.total_free || 1287 lst.total_free != c->lst.total_free ||
1313 lst->total_dirty != c->lst.total_dirty || 1288 lst.total_dirty != c->lst.total_dirty ||
1314 lst->total_used != c->lst.total_used) { 1289 lst.total_used != c->lst.total_used) {
1315 ubifs_err("bad overall accounting"); 1290 ubifs_err("bad overall accounting");
1316 ubifs_err("calculated: empty_lebs %d, idx_lebs %d, " 1291 ubifs_err("calculated: empty_lebs %d, idx_lebs %d, "
1317 "total_free %lld, total_dirty %lld, total_used %lld", 1292 "total_free %lld, total_dirty %lld, total_used %lld",
1318 lst->empty_lebs, lst->idx_lebs, lst->total_free, 1293 lst.empty_lebs, lst.idx_lebs, lst.total_free,
1319 lst->total_dirty, lst->total_used); 1294 lst.total_dirty, lst.total_used);
1320 ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, " 1295 ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, "
1321 "total_free %lld, total_dirty %lld, total_used %lld", 1296 "total_free %lld, total_dirty %lld, total_used %lld",
1322 c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free, 1297 c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free,
@@ -1325,11 +1300,11 @@ int dbg_check_lprops(struct ubifs_info *c)
1325 goto out; 1300 goto out;
1326 } 1301 }
1327 1302
1328 if (lst->total_dead != c->lst.total_dead || 1303 if (lst.total_dead != c->lst.total_dead ||
1329 lst->total_dark != c->lst.total_dark) { 1304 lst.total_dark != c->lst.total_dark) {
1330 ubifs_err("bad dead/dark space accounting"); 1305 ubifs_err("bad dead/dark space accounting");
1331 ubifs_err("calculated: total_dead %lld, total_dark %lld", 1306 ubifs_err("calculated: total_dead %lld, total_dark %lld",
1332 lst->total_dead, lst->total_dark); 1307 lst.total_dead, lst.total_dark);
1333 ubifs_err("read from lprops: total_dead %lld, total_dark %lld", 1308 ubifs_err("read from lprops: total_dead %lld, total_dark %lld",
1334 c->lst.total_dead, c->lst.total_dark); 1309 c->lst.total_dead, c->lst.total_dark);
1335 err = -EINVAL; 1310 err = -EINVAL;
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 0c9c69bd983a..dfcb5748a7dc 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -29,6 +29,12 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include "ubifs.h" 30#include "ubifs.h"
31 31
32#ifdef CONFIG_UBIFS_FS_DEBUG
33static int dbg_populate_lsave(struct ubifs_info *c);
34#else
35#define dbg_populate_lsave(c) 0
36#endif
37
32/** 38/**
33 * first_dirty_cnode - find first dirty cnode. 39 * first_dirty_cnode - find first dirty cnode.
34 * @c: UBIFS file-system description object 40 * @c: UBIFS file-system description object
@@ -586,7 +592,7 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
586 if (nnode->nbranch[iip].lnum) 592 if (nnode->nbranch[iip].lnum)
587 break; 593 break;
588 } 594 }
589 } while (iip >= UBIFS_LPT_FANOUT); 595 } while (iip >= UBIFS_LPT_FANOUT);
590 596
591 /* Go right */ 597 /* Go right */
592 nnode = ubifs_get_nnode(c, nnode, iip); 598 nnode = ubifs_get_nnode(c, nnode, iip);
@@ -815,6 +821,10 @@ static void populate_lsave(struct ubifs_info *c)
815 c->lpt_drty_flgs |= LSAVE_DIRTY; 821 c->lpt_drty_flgs |= LSAVE_DIRTY;
816 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); 822 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
817 } 823 }
824
825 if (dbg_populate_lsave(c))
826 return;
827
818 list_for_each_entry(lprops, &c->empty_list, list) { 828 list_for_each_entry(lprops, &c->empty_list, list) {
819 c->lsave[cnt++] = lprops->lnum; 829 c->lsave[cnt++] = lprops->lnum;
820 if (cnt >= c->lsave_cnt) 830 if (cnt >= c->lsave_cnt)
@@ -1994,4 +2004,47 @@ void dbg_dump_lpt_lebs(const struct ubifs_info *c)
1994 current->pid); 2004 current->pid);
1995} 2005}
1996 2006
2007/**
2008 * dbg_populate_lsave - debugging version of 'populate_lsave()'
2009 * @c: UBIFS file-system description object
2010 *
2011 * This is a debugging version for 'populate_lsave()' which populates lsave
2012 * with random LEBs instead of useful LEBs, which is good for test coverage.
2013 * Returns zero if lsave has not been populated (this debugging feature is
2014 * disabled) an non-zero if lsave has been populated.
2015 */
2016static int dbg_populate_lsave(struct ubifs_info *c)
2017{
2018 struct ubifs_lprops *lprops;
2019 struct ubifs_lpt_heap *heap;
2020 int i;
2021
2022 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
2023 return 0;
2024 if (random32() & 3)
2025 return 0;
2026
2027 for (i = 0; i < c->lsave_cnt; i++)
2028 c->lsave[i] = c->main_first;
2029
2030 list_for_each_entry(lprops, &c->empty_list, list)
2031 c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
2032 list_for_each_entry(lprops, &c->freeable_list, list)
2033 c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
2034 list_for_each_entry(lprops, &c->frdi_idx_list, list)
2035 c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
2036
2037 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
2038 for (i = 0; i < heap->cnt; i++)
2039 c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
2040 heap = &c->lpt_heap[LPROPS_DIRTY - 1];
2041 for (i = 0; i < heap->cnt; i++)
2042 c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
2043 heap = &c->lpt_heap[LPROPS_FREE - 1];
2044 for (i = 0; i < heap->cnt; i++)
2045 c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
2046
2047 return 1;
2048}
2049
1997#endif /* CONFIG_UBIFS_FS_DEBUG */ 2050#endif /* CONFIG_UBIFS_FS_DEBUG */
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 21f47afdacff..278c2382e8c2 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -148,7 +148,7 @@ static int validate_master(const struct ubifs_info *c)
148 } 148 }
149 149
150 main_sz = (long long)c->main_lebs * c->leb_size; 150 main_sz = (long long)c->main_lebs * c->leb_size;
151 if (c->old_idx_sz & 7 || c->old_idx_sz >= main_sz) { 151 if (c->bi.old_idx_sz & 7 || c->bi.old_idx_sz >= main_sz) {
152 err = 9; 152 err = 9;
153 goto out; 153 goto out;
154 } 154 }
@@ -218,7 +218,7 @@ static int validate_master(const struct ubifs_info *c)
218 } 218 }
219 219
220 if (c->lst.total_dead + c->lst.total_dark + 220 if (c->lst.total_dead + c->lst.total_dark +
221 c->lst.total_used + c->old_idx_sz > main_sz) { 221 c->lst.total_used + c->bi.old_idx_sz > main_sz) {
222 err = 21; 222 err = 21;
223 goto out; 223 goto out;
224 } 224 }
@@ -286,7 +286,7 @@ int ubifs_read_master(struct ubifs_info *c)
286 c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum); 286 c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum);
287 c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum); 287 c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum);
288 c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs); 288 c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs);
289 c->old_idx_sz = le64_to_cpu(c->mst_node->index_size); 289 c->bi.old_idx_sz = le64_to_cpu(c->mst_node->index_size);
290 c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum); 290 c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum);
291 c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs); 291 c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs);
292 c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum); 292 c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum);
@@ -305,7 +305,7 @@ int ubifs_read_master(struct ubifs_info *c)
305 c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead); 305 c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead);
306 c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark); 306 c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark);
307 307
308 c->calc_idx_sz = c->old_idx_sz; 308 c->calc_idx_sz = c->bi.old_idx_sz;
309 309
310 if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS)) 310 if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS))
311 c->no_orphs = 1; 311 c->no_orphs = 1;
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index c3de04dc952a..0b5296a9a4c5 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -340,4 +340,21 @@ static inline void ubifs_release_lprops(struct ubifs_info *c)
340 mutex_unlock(&c->lp_mutex); 340 mutex_unlock(&c->lp_mutex);
341} 341}
342 342
343/**
344 * ubifs_next_log_lnum - switch to the next log LEB.
345 * @c: UBIFS file-system description object
346 * @lnum: current log LEB
347 *
348 * This helper function returns the log LEB number which goes next after LEB
349 * 'lnum'.
350 */
351static inline int ubifs_next_log_lnum(const struct ubifs_info *c, int lnum)
352{
353 lnum += 1;
354 if (lnum > c->log_last)
355 lnum = UBIFS_LOG_LNUM;
356
357 return lnum;
358}
359
343#endif /* __UBIFS_MISC_H__ */ 360#endif /* __UBIFS_MISC_H__ */
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 09df318e368f..bd644bf587a8 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -673,7 +673,8 @@ static int kill_orphans(struct ubifs_info *c)
673 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); 673 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
674 if (IS_ERR(sleb)) { 674 if (IS_ERR(sleb)) {
675 if (PTR_ERR(sleb) == -EUCLEAN) 675 if (PTR_ERR(sleb) == -EUCLEAN)
676 sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0); 676 sleb = ubifs_recover_leb(c, lnum, 0,
677 c->sbuf, 0);
677 if (IS_ERR(sleb)) { 678 if (IS_ERR(sleb)) {
678 err = PTR_ERR(sleb); 679 err = PTR_ERR(sleb);
679 break; 680 break;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 3dbad6fbd1eb..731d9e2e7b50 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -564,13 +564,16 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
564} 564}
565 565
566/** 566/**
567 * drop_incomplete_group - drop nodes from an incomplete group. 567 * drop_last_node - drop the last node or group of nodes.
568 * @sleb: scanned LEB information 568 * @sleb: scanned LEB information
569 * @offs: offset of dropped nodes is returned here 569 * @offs: offset of dropped nodes is returned here
570 * @grouped: non-zero if whole group of nodes have to be dropped
570 * 571 *
571 * This function returns %1 if nodes are dropped and %0 otherwise. 572 * This is a helper function for 'ubifs_recover_leb()' which drops the last
573 * node of the scanned LEB or the last group of nodes if @grouped is not zero.
574 * This function returns %1 if a node was dropped and %0 otherwise.
572 */ 575 */
573static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs) 576static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
574{ 577{
575 int dropped = 0; 578 int dropped = 0;
576 579
@@ -589,6 +592,8 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
589 kfree(snod); 592 kfree(snod);
590 sleb->nodes_cnt -= 1; 593 sleb->nodes_cnt -= 1;
591 dropped = 1; 594 dropped = 1;
595 if (!grouped)
596 break;
592 } 597 }
593 return dropped; 598 return dropped;
594} 599}
@@ -609,8 +614,7 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
609struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 614struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
610 int offs, void *sbuf, int grouped) 615 int offs, void *sbuf, int grouped)
611{ 616{
612 int err, len = c->leb_size - offs, need_clean = 0, quiet = 1; 617 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
613 int empty_chkd = 0, start = offs;
614 struct ubifs_scan_leb *sleb; 618 struct ubifs_scan_leb *sleb;
615 void *buf = sbuf + offs; 619 void *buf = sbuf + offs;
616 620
@@ -620,12 +624,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
620 if (IS_ERR(sleb)) 624 if (IS_ERR(sleb))
621 return sleb; 625 return sleb;
622 626
623 if (sleb->ecc) 627 ubifs_assert(len >= 8);
624 need_clean = 1;
625
626 while (len >= 8) { 628 while (len >= 8) {
627 int ret;
628
629 dbg_scan("look at LEB %d:%d (%d bytes left)", 629 dbg_scan("look at LEB %d:%d (%d bytes left)",
630 lnum, offs, len); 630 lnum, offs, len);
631 631
@@ -635,8 +635,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
635 * Scan quietly until there is an error from which we cannot 635 * Scan quietly until there is an error from which we cannot
636 * recover 636 * recover
637 */ 637 */
638 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); 638 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
639
640 if (ret == SCANNED_A_NODE) { 639 if (ret == SCANNED_A_NODE) {
641 /* A valid node, and not a padding node */ 640 /* A valid node, and not a padding node */
642 struct ubifs_ch *ch = buf; 641 struct ubifs_ch *ch = buf;
@@ -649,70 +648,32 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
649 offs += node_len; 648 offs += node_len;
650 buf += node_len; 649 buf += node_len;
651 len -= node_len; 650 len -= node_len;
652 continue; 651 } else if (ret > 0) {
653 }
654
655 if (ret > 0) {
656 /* Padding bytes or a valid padding node */ 652 /* Padding bytes or a valid padding node */
657 offs += ret; 653 offs += ret;
658 buf += ret; 654 buf += ret;
659 len -= ret; 655 len -= ret;
660 continue; 656 } else if (ret == SCANNED_EMPTY_SPACE ||
661 } 657 ret == SCANNED_GARBAGE ||
662 658 ret == SCANNED_A_BAD_PAD_NODE ||
663 if (ret == SCANNED_EMPTY_SPACE) { 659 ret == SCANNED_A_CORRUPT_NODE) {
664 if (!is_empty(buf, len)) { 660 dbg_rcvry("found corruption - %d", ret);
665 if (!is_last_write(c, buf, offs))
666 break;
667 clean_buf(c, &buf, lnum, &offs, &len);
668 need_clean = 1;
669 }
670 empty_chkd = 1;
671 break; 661 break;
672 } 662 } else {
673 663 dbg_err("unexpected return value %d", ret);
674 if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE)
675 if (is_last_write(c, buf, offs)) {
676 clean_buf(c, &buf, lnum, &offs, &len);
677 need_clean = 1;
678 empty_chkd = 1;
679 break;
680 }
681
682 if (ret == SCANNED_A_CORRUPT_NODE)
683 if (no_more_nodes(c, buf, len, lnum, offs)) {
684 clean_buf(c, &buf, lnum, &offs, &len);
685 need_clean = 1;
686 empty_chkd = 1;
687 break;
688 }
689
690 if (quiet) {
691 /* Redo the last scan but noisily */
692 quiet = 0;
693 continue;
694 }
695
696 switch (ret) {
697 case SCANNED_GARBAGE:
698 dbg_err("garbage");
699 goto corrupted;
700 case SCANNED_A_CORRUPT_NODE:
701 case SCANNED_A_BAD_PAD_NODE:
702 dbg_err("bad node");
703 goto corrupted;
704 default:
705 dbg_err("unknown");
706 err = -EINVAL; 664 err = -EINVAL;
707 goto error; 665 goto error;
708 } 666 }
709 } 667 }
710 668
711 if (!empty_chkd && !is_empty(buf, len)) { 669 if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) {
712 if (is_last_write(c, buf, offs)) { 670 if (!is_last_write(c, buf, offs))
713 clean_buf(c, &buf, lnum, &offs, &len); 671 goto corrupted_rescan;
714 need_clean = 1; 672 } else if (ret == SCANNED_A_CORRUPT_NODE) {
715 } else { 673 if (!no_more_nodes(c, buf, len, lnum, offs))
674 goto corrupted_rescan;
675 } else if (!is_empty(buf, len)) {
676 if (!is_last_write(c, buf, offs)) {
716 int corruption = first_non_ff(buf, len); 677 int corruption = first_non_ff(buf, len);
717 678
718 /* 679 /*
@@ -728,29 +689,82 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
728 } 689 }
729 } 690 }
730 691
731 /* Drop nodes from incomplete group */ 692 min_io_unit = round_down(offs, c->min_io_size);
732 if (grouped && drop_incomplete_group(sleb, &offs)) { 693 if (grouped)
733 buf = sbuf + offs; 694 /*
734 len = c->leb_size - offs; 695 * If nodes are grouped, always drop the incomplete group at
735 clean_buf(c, &buf, lnum, &offs, &len); 696 * the end.
736 need_clean = 1; 697 */
737 } 698 drop_last_node(sleb, &offs, 1);
738 699
739 if (offs % c->min_io_size) { 700 /*
740 clean_buf(c, &buf, lnum, &offs, &len); 701 * While we are in the middle of the same min. I/O unit keep dropping
741 need_clean = 1; 702 * nodes. So basically, what we want is to make sure that the last min.
742 } 703 * I/O unit where we saw the corruption is dropped completely with all
704 * the uncorrupted node which may possibly sit there.
705 *
706 * In other words, let's name the min. I/O unit where the corruption
707 * starts B, and the previous min. I/O unit A. The below code tries to
708 * deal with a situation when half of B contains valid nodes or the end
709 * of a valid node, and the second half of B contains corrupted data or
710 * garbage. This means that UBIFS had been writing to B just before the
711 * power cut happened. I do not know how realistic is this scenario
712 * that half of the min. I/O unit had been written successfully and the
713 * other half not, but this is possible in our 'failure mode emulation'
714 * infrastructure at least.
715 *
716 * So what is the problem, why we need to drop those nodes? Whey can't
717 * we just clean-up the second half of B by putting a padding node
718 * there? We can, and this works fine with one exception which was
719 * reproduced with power cut emulation testing and happens extremely
720 * rarely. The description follows, but it is worth noting that that is
721 * only about the GC head, so we could do this trick only if the bud
722 * belongs to the GC head, but it does not seem to be worth an
723 * additional "if" statement.
724 *
725 * So, imagine the file-system is full, we run GC which is moving valid
726 * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head
727 * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X
728 * and will try to continue. Imagine that LEB X is currently the
729 * dirtiest LEB, and the amount of used space in LEB Y is exactly the
730 * same as amount of free space in LEB X.
731 *
732 * And a power cut happens when nodes are moved from LEB X to LEB Y. We
733 * are here trying to recover LEB Y which is the GC head LEB. We find
734 * the min. I/O unit B as described above. Then we clean-up LEB Y by
735 * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function
736 * fails, because it cannot find a dirty LEB which could be GC'd into
737 * LEB Y! Even LEB X does not match because the amount of valid nodes
738 * there does not fit the free space in LEB Y any more! And this is
739 * because of the padding node which we added to LEB Y. The
740 * user-visible effect of this which I once observed and analysed is
741 * that we cannot mount the file-system with -ENOSPC error.
742 *
743 * So obviously, to make sure that situation does not happen we should
744 * free min. I/O unit B in LEB Y completely and the last used min. I/O
745 * unit in LEB Y should be A. This is basically what the below code
746 * tries to do.
747 */
748 while (min_io_unit == round_down(offs, c->min_io_size) &&
749 min_io_unit != offs &&
750 drop_last_node(sleb, &offs, grouped));
751
752 buf = sbuf + offs;
753 len = c->leb_size - offs;
743 754
755 clean_buf(c, &buf, lnum, &offs, &len);
744 ubifs_end_scan(c, sleb, lnum, offs); 756 ubifs_end_scan(c, sleb, lnum, offs);
745 757
746 if (need_clean) { 758 err = fix_unclean_leb(c, sleb, start);
747 err = fix_unclean_leb(c, sleb, start); 759 if (err)
748 if (err) 760 goto error;
749 goto error;
750 }
751 761
752 return sleb; 762 return sleb;
753 763
764corrupted_rescan:
765 /* Re-scan the corrupted data with verbose messages */
766 dbg_err("corruptio %d", ret);
767 ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
754corrupted: 768corrupted:
755 ubifs_scanned_corruption(c, lnum, offs, buf); 769 ubifs_scanned_corruption(c, lnum, offs, buf);
756 err = -EUCLEAN; 770 err = -EUCLEAN;
@@ -1070,6 +1084,53 @@ int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf)
1070} 1084}
1071 1085
1072/** 1086/**
1087 * grab_empty_leb - grab an empty LEB to use as GC LEB and run commit.
1088 * @c: UBIFS file-system description object
1089 *
1090 * This is a helper function for 'ubifs_rcvry_gc_commit()' which grabs an empty
1091 * LEB to be used as GC LEB (@c->gc_lnum), and then runs the commit. Returns
1092 * zero in case of success and a negative error code in case of failure.
1093 */
1094static int grab_empty_leb(struct ubifs_info *c)
1095{
1096 int lnum, err;
1097
1098 /*
1099 * Note, it is very important to first search for an empty LEB and then
1100 * run the commit, not vice-versa. The reason is that there might be
1101 * only one empty LEB at the moment, the one which has been the
1102 * @c->gc_lnum just before the power cut happened. During the regular
1103 * UBIFS operation (not now) @c->gc_lnum is marked as "taken", so no
1104 * one but GC can grab it. But at this moment this single empty LEB is
1105 * not marked as taken, so if we run commit - what happens? Right, the
1106 * commit will grab it and write the index there. Remember that the
1107 * index always expands as long as there is free space, and it only
1108 * starts consolidating when we run out of space.
1109 *
1110 * IOW, if we run commit now, we might not be able to find a free LEB
1111 * after this.
1112 */
1113 lnum = ubifs_find_free_leb_for_idx(c);
1114 if (lnum < 0) {
1115 dbg_err("could not find an empty LEB");
1116 dbg_dump_lprops(c);
1117 dbg_dump_budg(c, &c->bi);
1118 return lnum;
1119 }
1120
1121 /* Reset the index flag */
1122 err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
1123 LPROPS_INDEX, 0);
1124 if (err)
1125 return err;
1126
1127 c->gc_lnum = lnum;
1128 dbg_rcvry("found empty LEB %d, run commit", lnum);
1129
1130 return ubifs_run_commit(c);
1131}
1132
1133/**
1073 * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit. 1134 * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit.
1074 * @c: UBIFS file-system description object 1135 * @c: UBIFS file-system description object
1075 * 1136 *
@@ -1091,71 +1152,26 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
1091{ 1152{
1092 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 1153 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
1093 struct ubifs_lprops lp; 1154 struct ubifs_lprops lp;
1094 int lnum, err; 1155 int err;
1156
1157 dbg_rcvry("GC head LEB %d, offs %d", wbuf->lnum, wbuf->offs);
1095 1158
1096 c->gc_lnum = -1; 1159 c->gc_lnum = -1;
1097 if (wbuf->lnum == -1) { 1160 if (wbuf->lnum == -1 || wbuf->offs == c->leb_size)
1098 dbg_rcvry("no GC head LEB"); 1161 return grab_empty_leb(c);
1099 goto find_free; 1162
1100 }
1101 /*
1102 * See whether the used space in the dirtiest LEB fits in the GC head
1103 * LEB.
1104 */
1105 if (wbuf->offs == c->leb_size) {
1106 dbg_rcvry("no room in GC head LEB");
1107 goto find_free;
1108 }
1109 err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2); 1163 err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2);
1110 if (err) { 1164 if (err) {
1111 /* 1165 if (err != -ENOSPC)
1112 * There are no dirty or empty LEBs subject to here being
1113 * enough for the index. Try to use
1114 * 'ubifs_find_free_leb_for_idx()', which will return any empty
1115 * LEBs (ignoring index requirements). If the index then
1116 * doesn't have enough LEBs the recovery commit will fail -
1117 * which is the same result anyway i.e. recovery fails. So
1118 * there is no problem ignoring index requirements and just
1119 * grabbing a free LEB since we have already established there
1120 * is not a dirty LEB we could have used instead.
1121 */
1122 if (err == -ENOSPC) {
1123 dbg_rcvry("could not find a dirty LEB");
1124 goto find_free;
1125 }
1126 return err;
1127 }
1128 ubifs_assert(!(lp.flags & LPROPS_INDEX));
1129 lnum = lp.lnum;
1130 if (lp.free + lp.dirty == c->leb_size) {
1131 /* An empty LEB was returned */
1132 if (lp.free != c->leb_size) {
1133 err = ubifs_change_one_lp(c, lnum, c->leb_size,
1134 0, 0, 0, 0);
1135 if (err)
1136 return err;
1137 }
1138 err = ubifs_leb_unmap(c, lnum);
1139 if (err)
1140 return err; 1166 return err;
1141 c->gc_lnum = lnum; 1167
1142 dbg_rcvry("allocated LEB %d for GC", lnum); 1168 dbg_rcvry("could not find a dirty LEB");
1143 /* Run the commit */ 1169 return grab_empty_leb(c);
1144 dbg_rcvry("committing");
1145 return ubifs_run_commit(c);
1146 }
1147 /*
1148 * There was no empty LEB so the used space in the dirtiest LEB must fit
1149 * in the GC head LEB.
1150 */
1151 if (lp.free + lp.dirty < wbuf->offs) {
1152 dbg_rcvry("LEB %d doesn't fit in GC head LEB %d:%d",
1153 lnum, wbuf->lnum, wbuf->offs);
1154 err = ubifs_return_leb(c, lnum);
1155 if (err)
1156 return err;
1157 goto find_free;
1158 } 1170 }
1171
1172 ubifs_assert(!(lp.flags & LPROPS_INDEX));
1173 ubifs_assert(lp.free + lp.dirty >= wbuf->offs);
1174
1159 /* 1175 /*
1160 * We run the commit before garbage collection otherwise subsequent 1176 * We run the commit before garbage collection otherwise subsequent
1161 * mounts will see the GC and orphan deletion in a different order. 1177 * mounts will see the GC and orphan deletion in a different order.
@@ -1164,11 +1180,8 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
1164 err = ubifs_run_commit(c); 1180 err = ubifs_run_commit(c);
1165 if (err) 1181 if (err)
1166 return err; 1182 return err;
1167 /* 1183
1168 * The data in the dirtiest LEB fits in the GC head LEB, so do the GC 1184 dbg_rcvry("GC'ing LEB %d", lp.lnum);
1169 * - use locking to keep 'ubifs_assert()' happy.
1170 */
1171 dbg_rcvry("GC'ing LEB %d", lnum);
1172 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 1185 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1173 err = ubifs_garbage_collect_leb(c, &lp); 1186 err = ubifs_garbage_collect_leb(c, &lp);
1174 if (err >= 0) { 1187 if (err >= 0) {
@@ -1184,37 +1197,17 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
1184 err = -EINVAL; 1197 err = -EINVAL;
1185 return err; 1198 return err;
1186 } 1199 }
1187 if (err != LEB_RETAINED) { 1200
1188 dbg_err("GC returned %d", err); 1201 ubifs_assert(err == LEB_RETAINED);
1202 if (err != LEB_RETAINED)
1189 return -EINVAL; 1203 return -EINVAL;
1190 } 1204
1191 err = ubifs_leb_unmap(c, c->gc_lnum); 1205 err = ubifs_leb_unmap(c, c->gc_lnum);
1192 if (err) 1206 if (err)
1193 return err; 1207 return err;
1194 dbg_rcvry("allocated LEB %d for GC", lnum);
1195 return 0;
1196 1208
1197find_free: 1209 dbg_rcvry("allocated LEB %d for GC", lp.lnum);
1198 /* 1210 return 0;
1199 * There is no GC head LEB or the free space in the GC head LEB is too
1200 * small, or there are not dirty LEBs. Allocate gc_lnum by calling
1201 * 'ubifs_find_free_leb_for_idx()' so GC is not run.
1202 */
1203 lnum = ubifs_find_free_leb_for_idx(c);
1204 if (lnum < 0) {
1205 dbg_err("could not find an empty LEB");
1206 return lnum;
1207 }
1208 /* And reset the index flag */
1209 err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
1210 LPROPS_INDEX, 0);
1211 if (err)
1212 return err;
1213 c->gc_lnum = lnum;
1214 dbg_rcvry("allocated LEB %d for GC", lnum);
1215 /* Run the commit */
1216 dbg_rcvry("committing");
1217 return ubifs_run_commit(c);
1218} 1211}
1219 1212
1220/** 1213/**
@@ -1456,7 +1449,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
1456 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); 1449 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
1457 if (err) 1450 if (err)
1458 goto out; 1451 goto out;
1459 dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", 1452 dbg_rcvry("inode %lu at %d:%d size %lld -> %lld",
1460 (unsigned long)e->inum, lnum, offs, i_size, e->d_size); 1453 (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
1461 return 0; 1454 return 0;
1462 1455
@@ -1505,20 +1498,27 @@ int ubifs_recover_size(struct ubifs_info *c)
1505 e->i_size = le64_to_cpu(ino->size); 1498 e->i_size = le64_to_cpu(ino->size);
1506 } 1499 }
1507 } 1500 }
1501
1508 if (e->exists && e->i_size < e->d_size) { 1502 if (e->exists && e->i_size < e->d_size) {
1509 if (!e->inode && c->ro_mount) { 1503 if (c->ro_mount) {
1510 /* Fix the inode size and pin it in memory */ 1504 /* Fix the inode size and pin it in memory */
1511 struct inode *inode; 1505 struct inode *inode;
1506 struct ubifs_inode *ui;
1507
1508 ubifs_assert(!e->inode);
1512 1509
1513 inode = ubifs_iget(c->vfs_sb, e->inum); 1510 inode = ubifs_iget(c->vfs_sb, e->inum);
1514 if (IS_ERR(inode)) 1511 if (IS_ERR(inode))
1515 return PTR_ERR(inode); 1512 return PTR_ERR(inode);
1513
1514 ui = ubifs_inode(inode);
1516 if (inode->i_size < e->d_size) { 1515 if (inode->i_size < e->d_size) {
1517 dbg_rcvry("ino %lu size %lld -> %lld", 1516 dbg_rcvry("ino %lu size %lld -> %lld",
1518 (unsigned long)e->inum, 1517 (unsigned long)e->inum,
1519 e->d_size, inode->i_size); 1518 inode->i_size, e->d_size);
1520 inode->i_size = e->d_size; 1519 inode->i_size = e->d_size;
1521 ubifs_inode(inode)->ui_size = e->d_size; 1520 ui->ui_size = e->d_size;
1521 ui->synced_i_size = e->d_size;
1522 e->inode = inode; 1522 e->inode = inode;
1523 this = rb_next(this); 1523 this = rb_next(this);
1524 continue; 1524 continue;
@@ -1533,9 +1533,11 @@ int ubifs_recover_size(struct ubifs_info *c)
1533 iput(e->inode); 1533 iput(e->inode);
1534 } 1534 }
1535 } 1535 }
1536
1536 this = rb_next(this); 1537 this = rb_next(this);
1537 rb_erase(&e->rb, &c->size_tree); 1538 rb_erase(&e->rb, &c->size_tree);
1538 kfree(e); 1539 kfree(e);
1539 } 1540 }
1541
1540 return 0; 1542 return 0;
1541} 1543}
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index d3d6d365bfc1..6617280d1679 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -33,44 +33,32 @@
33 */ 33 */
34 34
35#include "ubifs.h" 35#include "ubifs.h"
36 36#include <linux/list_sort.h>
37/*
38 * Replay flags.
39 *
40 * REPLAY_DELETION: node was deleted
41 * REPLAY_REF: node is a reference node
42 */
43enum {
44 REPLAY_DELETION = 1,
45 REPLAY_REF = 2,
46};
47 37
48/** 38/**
49 * struct replay_entry - replay tree entry. 39 * struct replay_entry - replay list entry.
50 * @lnum: logical eraseblock number of the node 40 * @lnum: logical eraseblock number of the node
51 * @offs: node offset 41 * @offs: node offset
52 * @len: node length 42 * @len: node length
43 * @deletion: non-zero if this entry corresponds to a node deletion
53 * @sqnum: node sequence number 44 * @sqnum: node sequence number
54 * @flags: replay flags 45 * @list: links the replay list
55 * @rb: links the replay tree
56 * @key: node key 46 * @key: node key
57 * @nm: directory entry name 47 * @nm: directory entry name
58 * @old_size: truncation old size 48 * @old_size: truncation old size
59 * @new_size: truncation new size 49 * @new_size: truncation new size
60 * @free: amount of free space in a bud
61 * @dirty: amount of dirty space in a bud from padding and deletion nodes
62 * @jhead: journal head number of the bud
63 * 50 *
64 * UBIFS journal replay must compare node sequence numbers, which means it must 51 * The replay process first scans all buds and builds the replay list, then
65 * build a tree of node information to insert into the TNC. 52 * sorts the replay list in nodes sequence number order, and then inserts all
53 * the replay entries to the TNC.
66 */ 54 */
67struct replay_entry { 55struct replay_entry {
68 int lnum; 56 int lnum;
69 int offs; 57 int offs;
70 int len; 58 int len;
59 unsigned int deletion:1;
71 unsigned long long sqnum; 60 unsigned long long sqnum;
72 int flags; 61 struct list_head list;
73 struct rb_node rb;
74 union ubifs_key key; 62 union ubifs_key key;
75 union { 63 union {
76 struct qstr nm; 64 struct qstr nm;
@@ -78,11 +66,6 @@ struct replay_entry {
78 loff_t old_size; 66 loff_t old_size;
79 loff_t new_size; 67 loff_t new_size;
80 }; 68 };
81 struct {
82 int free;
83 int dirty;
84 int jhead;
85 };
86 }; 69 };
87}; 70};
88 71
@@ -90,57 +73,64 @@ struct replay_entry {
90 * struct bud_entry - entry in the list of buds to replay. 73 * struct bud_entry - entry in the list of buds to replay.
91 * @list: next bud in the list 74 * @list: next bud in the list
92 * @bud: bud description object 75 * @bud: bud description object
93 * @free: free bytes in the bud
94 * @sqnum: reference node sequence number 76 * @sqnum: reference node sequence number
77 * @free: free bytes in the bud
78 * @dirty: dirty bytes in the bud
95 */ 79 */
96struct bud_entry { 80struct bud_entry {
97 struct list_head list; 81 struct list_head list;
98 struct ubifs_bud *bud; 82 struct ubifs_bud *bud;
99 int free;
100 unsigned long long sqnum; 83 unsigned long long sqnum;
84 int free;
85 int dirty;
101}; 86};
102 87
103/** 88/**
104 * set_bud_lprops - set free and dirty space used by a bud. 89 * set_bud_lprops - set free and dirty space used by a bud.
105 * @c: UBIFS file-system description object 90 * @c: UBIFS file-system description object
106 * @r: replay entry of bud 91 * @b: bud entry which describes the bud
92 *
93 * This function makes sure the LEB properties of bud @b are set correctly
94 * after the replay. Returns zero in case of success and a negative error code
95 * in case of failure.
107 */ 96 */
108static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r) 97static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
109{ 98{
110 const struct ubifs_lprops *lp; 99 const struct ubifs_lprops *lp;
111 int err = 0, dirty; 100 int err = 0, dirty;
112 101
113 ubifs_get_lprops(c); 102 ubifs_get_lprops(c);
114 103
115 lp = ubifs_lpt_lookup_dirty(c, r->lnum); 104 lp = ubifs_lpt_lookup_dirty(c, b->bud->lnum);
116 if (IS_ERR(lp)) { 105 if (IS_ERR(lp)) {
117 err = PTR_ERR(lp); 106 err = PTR_ERR(lp);
118 goto out; 107 goto out;
119 } 108 }
120 109
121 dirty = lp->dirty; 110 dirty = lp->dirty;
122 if (r->offs == 0 && (lp->free != c->leb_size || lp->dirty != 0)) { 111 if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) {
123 /* 112 /*
124 * The LEB was added to the journal with a starting offset of 113 * The LEB was added to the journal with a starting offset of
125 * zero which means the LEB must have been empty. The LEB 114 * zero which means the LEB must have been empty. The LEB
126 * property values should be lp->free == c->leb_size and 115 * property values should be @lp->free == @c->leb_size and
127 * lp->dirty == 0, but that is not the case. The reason is that 116 * @lp->dirty == 0, but that is not the case. The reason is that
128 * the LEB was garbage collected. The garbage collector resets 117 * the LEB had been garbage collected before it became the bud,
129 * the free and dirty space without recording it anywhere except 118 * and there was not commit inbetween. The garbage collector
130 * lprops, so if there is not a commit then lprops does not have 119 * resets the free and dirty space without recording it
131 * that information next time the file system is mounted. 120 * anywhere except lprops, so if there was no commit then
121 * lprops does not have that information.
132 * 122 *
133 * We do not need to adjust free space because the scan has told 123 * We do not need to adjust free space because the scan has told
134 * us the exact value which is recorded in the replay entry as 124 * us the exact value which is recorded in the replay entry as
135 * r->free. 125 * @b->free.
136 * 126 *
137 * However we do need to subtract from the dirty space the 127 * However we do need to subtract from the dirty space the
138 * amount of space that the garbage collector reclaimed, which 128 * amount of space that the garbage collector reclaimed, which
139 * is the whole LEB minus the amount of space that was free. 129 * is the whole LEB minus the amount of space that was free.
140 */ 130 */
141 dbg_mnt("bud LEB %d was GC'd (%d free, %d dirty)", r->lnum, 131 dbg_mnt("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
142 lp->free, lp->dirty); 132 lp->free, lp->dirty);
143 dbg_gc("bud LEB %d was GC'd (%d free, %d dirty)", r->lnum, 133 dbg_gc("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
144 lp->free, lp->dirty); 134 lp->free, lp->dirty);
145 dirty -= c->leb_size - lp->free; 135 dirty -= c->leb_size - lp->free;
146 /* 136 /*
@@ -152,10 +142,10 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r)
152 */ 142 */
153 if (dirty != 0) 143 if (dirty != 0)
154 dbg_msg("LEB %d lp: %d free %d dirty " 144 dbg_msg("LEB %d lp: %d free %d dirty "
155 "replay: %d free %d dirty", r->lnum, lp->free, 145 "replay: %d free %d dirty", b->bud->lnum,
156 lp->dirty, r->free, r->dirty); 146 lp->free, lp->dirty, b->free, b->dirty);
157 } 147 }
158 lp = ubifs_change_lp(c, lp, r->free, dirty + r->dirty, 148 lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty,
159 lp->flags | LPROPS_TAKEN, 0); 149 lp->flags | LPROPS_TAKEN, 0);
160 if (IS_ERR(lp)) { 150 if (IS_ERR(lp)) {
161 err = PTR_ERR(lp); 151 err = PTR_ERR(lp);
@@ -163,8 +153,9 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r)
163 } 153 }
164 154
165 /* Make sure the journal head points to the latest bud */ 155 /* Make sure the journal head points to the latest bud */
166 err = ubifs_wbuf_seek_nolock(&c->jheads[r->jhead].wbuf, r->lnum, 156 err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf,
167 c->leb_size - r->free, UBI_SHORTTERM); 157 b->bud->lnum, c->leb_size - b->free,
158 UBI_SHORTTERM);
168 159
169out: 160out:
170 ubifs_release_lprops(c); 161 ubifs_release_lprops(c);
@@ -172,6 +163,27 @@ out:
172} 163}
173 164
174/** 165/**
166 * set_buds_lprops - set free and dirty space for all replayed buds.
167 * @c: UBIFS file-system description object
168 *
169 * This function sets LEB properties for all replayed buds. Returns zero in
170 * case of success and a negative error code in case of failure.
171 */
172static int set_buds_lprops(struct ubifs_info *c)
173{
174 struct bud_entry *b;
175 int err;
176
177 list_for_each_entry(b, &c->replay_buds, list) {
178 err = set_bud_lprops(c, b);
179 if (err)
180 return err;
181 }
182
183 return 0;
184}
185
186/**
175 * trun_remove_range - apply a replay entry for a truncation to the TNC. 187 * trun_remove_range - apply a replay entry for a truncation to the TNC.
176 * @c: UBIFS file-system description object 188 * @c: UBIFS file-system description object
177 * @r: replay entry of truncation 189 * @r: replay entry of truncation
@@ -207,24 +219,22 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
207 */ 219 */
208static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r) 220static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
209{ 221{
210 int err, deletion = ((r->flags & REPLAY_DELETION) != 0); 222 int err;
211 223
212 dbg_mnt("LEB %d:%d len %d flgs %d sqnum %llu %s", r->lnum, 224 dbg_mnt("LEB %d:%d len %d deletion %d sqnum %llu %s", r->lnum,
213 r->offs, r->len, r->flags, r->sqnum, DBGKEY(&r->key)); 225 r->offs, r->len, r->deletion, r->sqnum, DBGKEY(&r->key));
214 226
215 /* Set c->replay_sqnum to help deal with dangling branches. */ 227 /* Set c->replay_sqnum to help deal with dangling branches. */
216 c->replay_sqnum = r->sqnum; 228 c->replay_sqnum = r->sqnum;
217 229
218 if (r->flags & REPLAY_REF) 230 if (is_hash_key(c, &r->key)) {
219 err = set_bud_lprops(c, r); 231 if (r->deletion)
220 else if (is_hash_key(c, &r->key)) {
221 if (deletion)
222 err = ubifs_tnc_remove_nm(c, &r->key, &r->nm); 232 err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
223 else 233 else
224 err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs, 234 err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs,
225 r->len, &r->nm); 235 r->len, &r->nm);
226 } else { 236 } else {
227 if (deletion) 237 if (r->deletion)
228 switch (key_type(c, &r->key)) { 238 switch (key_type(c, &r->key)) {
229 case UBIFS_INO_KEY: 239 case UBIFS_INO_KEY:
230 { 240 {
@@ -247,7 +257,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
247 return err; 257 return err;
248 258
249 if (c->need_recovery) 259 if (c->need_recovery)
250 err = ubifs_recover_size_accum(c, &r->key, deletion, 260 err = ubifs_recover_size_accum(c, &r->key, r->deletion,
251 r->new_size); 261 r->new_size);
252 } 262 }
253 263
@@ -255,68 +265,77 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
255} 265}
256 266
257/** 267/**
258 * destroy_replay_tree - destroy the replay. 268 * replay_entries_cmp - compare 2 replay entries.
259 * @c: UBIFS file-system description object 269 * @priv: UBIFS file-system description object
270 * @a: first replay entry
271 * @a: second replay entry
260 * 272 *
261 * Destroy the replay tree. 273 * This is a comparios function for 'list_sort()' which compares 2 replay
274 * entries @a and @b by comparing their sequence numer. Returns %1 if @a has
275 * greater sequence number and %-1 otherwise.
262 */ 276 */
263static void destroy_replay_tree(struct ubifs_info *c) 277static int replay_entries_cmp(void *priv, struct list_head *a,
278 struct list_head *b)
264{ 279{
265 struct rb_node *this = c->replay_tree.rb_node; 280 struct replay_entry *ra, *rb;
266 struct replay_entry *r; 281
267 282 cond_resched();
268 while (this) { 283 if (a == b)
269 if (this->rb_left) { 284 return 0;
270 this = this->rb_left; 285
271 continue; 286 ra = list_entry(a, struct replay_entry, list);
272 } else if (this->rb_right) { 287 rb = list_entry(b, struct replay_entry, list);
273 this = this->rb_right; 288 ubifs_assert(ra->sqnum != rb->sqnum);
274 continue; 289 if (ra->sqnum > rb->sqnum)
275 } 290 return 1;
276 r = rb_entry(this, struct replay_entry, rb); 291 return -1;
277 this = rb_parent(this);
278 if (this) {
279 if (this->rb_left == &r->rb)
280 this->rb_left = NULL;
281 else
282 this->rb_right = NULL;
283 }
284 if (is_hash_key(c, &r->key))
285 kfree(r->nm.name);
286 kfree(r);
287 }
288 c->replay_tree = RB_ROOT;
289} 292}
290 293
291/** 294/**
292 * apply_replay_tree - apply the replay tree to the TNC. 295 * apply_replay_list - apply the replay list to the TNC.
293 * @c: UBIFS file-system description object 296 * @c: UBIFS file-system description object
294 * 297 *
295 * Apply the replay tree. 298 * Apply all entries in the replay list to the TNC. Returns zero in case of
296 * Returns zero in case of success and a negative error code in case of 299 * success and a negative error code in case of failure.
297 * failure.
298 */ 300 */
299static int apply_replay_tree(struct ubifs_info *c) 301static int apply_replay_list(struct ubifs_info *c)
300{ 302{
301 struct rb_node *this = rb_first(&c->replay_tree); 303 struct replay_entry *r;
304 int err;
302 305
303 while (this) { 306 list_sort(c, &c->replay_list, &replay_entries_cmp);
304 struct replay_entry *r;
305 int err;
306 307
308 list_for_each_entry(r, &c->replay_list, list) {
307 cond_resched(); 309 cond_resched();
308 310
309 r = rb_entry(this, struct replay_entry, rb);
310 err = apply_replay_entry(c, r); 311 err = apply_replay_entry(c, r);
311 if (err) 312 if (err)
312 return err; 313 return err;
313 this = rb_next(this);
314 } 314 }
315
315 return 0; 316 return 0;
316} 317}
317 318
318/** 319/**
319 * insert_node - insert a node to the replay tree. 320 * destroy_replay_list - destroy the replay.
321 * @c: UBIFS file-system description object
322 *
323 * Destroy the replay list.
324 */
325static void destroy_replay_list(struct ubifs_info *c)
326{
327 struct replay_entry *r, *tmp;
328
329 list_for_each_entry_safe(r, tmp, &c->replay_list, list) {
330 if (is_hash_key(c, &r->key))
331 kfree(r->nm.name);
332 list_del(&r->list);
333 kfree(r);
334 }
335}
336
337/**
338 * insert_node - insert a node to the replay list
320 * @c: UBIFS file-system description object 339 * @c: UBIFS file-system description object
321 * @lnum: node logical eraseblock number 340 * @lnum: node logical eraseblock number
322 * @offs: node offset 341 * @offs: node offset
@@ -328,39 +347,25 @@ static int apply_replay_tree(struct ubifs_info *c)
328 * @old_size: truncation old size 347 * @old_size: truncation old size
329 * @new_size: truncation new size 348 * @new_size: truncation new size
330 * 349 *
331 * This function inserts a scanned non-direntry node to the replay tree. The 350 * This function inserts a scanned non-direntry node to the replay list. The
332 * replay tree is an RB-tree containing @struct replay_entry elements which are 351 * replay list contains @struct replay_entry elements, and we sort this list in
333 * indexed by the sequence number. The replay tree is applied at the very end 352 * sequence number order before applying it. The replay list is applied at the
334 * of the replay process. Since the tree is sorted in sequence number order, 353 * very end of the replay process. Since the list is sorted in sequence number
335 * the older modifications are applied first. This function returns zero in 354 * order, the older modifications are applied first. This function returns zero
336 * case of success and a negative error code in case of failure. 355 * in case of success and a negative error code in case of failure.
337 */ 356 */
338static int insert_node(struct ubifs_info *c, int lnum, int offs, int len, 357static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
339 union ubifs_key *key, unsigned long long sqnum, 358 union ubifs_key *key, unsigned long long sqnum,
340 int deletion, int *used, loff_t old_size, 359 int deletion, int *used, loff_t old_size,
341 loff_t new_size) 360 loff_t new_size)
342{ 361{
343 struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
344 struct replay_entry *r; 362 struct replay_entry *r;
345 363
364 dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
365
346 if (key_inum(c, key) >= c->highest_inum) 366 if (key_inum(c, key) >= c->highest_inum)
347 c->highest_inum = key_inum(c, key); 367 c->highest_inum = key_inum(c, key);
348 368
349 dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
350 while (*p) {
351 parent = *p;
352 r = rb_entry(parent, struct replay_entry, rb);
353 if (sqnum < r->sqnum) {
354 p = &(*p)->rb_left;
355 continue;
356 } else if (sqnum > r->sqnum) {
357 p = &(*p)->rb_right;
358 continue;
359 }
360 ubifs_err("duplicate sqnum in replay");
361 return -EINVAL;
362 }
363
364 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL); 369 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
365 if (!r) 370 if (!r)
366 return -ENOMEM; 371 return -ENOMEM;
@@ -370,19 +375,18 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
370 r->lnum = lnum; 375 r->lnum = lnum;
371 r->offs = offs; 376 r->offs = offs;
372 r->len = len; 377 r->len = len;
378 r->deletion = !!deletion;
373 r->sqnum = sqnum; 379 r->sqnum = sqnum;
374 r->flags = (deletion ? REPLAY_DELETION : 0); 380 key_copy(c, key, &r->key);
375 r->old_size = old_size; 381 r->old_size = old_size;
376 r->new_size = new_size; 382 r->new_size = new_size;
377 key_copy(c, key, &r->key);
378 383
379 rb_link_node(&r->rb, parent, p); 384 list_add_tail(&r->list, &c->replay_list);
380 rb_insert_color(&r->rb, &c->replay_tree);
381 return 0; 385 return 0;
382} 386}
383 387
384/** 388/**
385 * insert_dent - insert a directory entry node into the replay tree. 389 * insert_dent - insert a directory entry node into the replay list.
386 * @c: UBIFS file-system description object 390 * @c: UBIFS file-system description object
387 * @lnum: node logical eraseblock number 391 * @lnum: node logical eraseblock number
388 * @offs: node offset 392 * @offs: node offset
@@ -394,43 +398,25 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
394 * @deletion: non-zero if this is a deletion 398 * @deletion: non-zero if this is a deletion
395 * @used: number of bytes in use in a LEB 399 * @used: number of bytes in use in a LEB
396 * 400 *
397 * This function inserts a scanned directory entry node to the replay tree. 401 * This function inserts a scanned directory entry node or an extended
398 * Returns zero in case of success and a negative error code in case of 402 * attribute entry to the replay list. Returns zero in case of success and a
399 * failure. 403 * negative error code in case of failure.
400 *
401 * This function is also used for extended attribute entries because they are
402 * implemented as directory entry nodes.
403 */ 404 */
404static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len, 405static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
405 union ubifs_key *key, const char *name, int nlen, 406 union ubifs_key *key, const char *name, int nlen,
406 unsigned long long sqnum, int deletion, int *used) 407 unsigned long long sqnum, int deletion, int *used)
407{ 408{
408 struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
409 struct replay_entry *r; 409 struct replay_entry *r;
410 char *nbuf; 410 char *nbuf;
411 411
412 dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
412 if (key_inum(c, key) >= c->highest_inum) 413 if (key_inum(c, key) >= c->highest_inum)
413 c->highest_inum = key_inum(c, key); 414 c->highest_inum = key_inum(c, key);
414 415
415 dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
416 while (*p) {
417 parent = *p;
418 r = rb_entry(parent, struct replay_entry, rb);
419 if (sqnum < r->sqnum) {
420 p = &(*p)->rb_left;
421 continue;
422 }
423 if (sqnum > r->sqnum) {
424 p = &(*p)->rb_right;
425 continue;
426 }
427 ubifs_err("duplicate sqnum in replay");
428 return -EINVAL;
429 }
430
431 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL); 416 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
432 if (!r) 417 if (!r)
433 return -ENOMEM; 418 return -ENOMEM;
419
434 nbuf = kmalloc(nlen + 1, GFP_KERNEL); 420 nbuf = kmalloc(nlen + 1, GFP_KERNEL);
435 if (!nbuf) { 421 if (!nbuf) {
436 kfree(r); 422 kfree(r);
@@ -442,17 +428,15 @@ static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
442 r->lnum = lnum; 428 r->lnum = lnum;
443 r->offs = offs; 429 r->offs = offs;
444 r->len = len; 430 r->len = len;
431 r->deletion = !!deletion;
445 r->sqnum = sqnum; 432 r->sqnum = sqnum;
433 key_copy(c, key, &r->key);
446 r->nm.len = nlen; 434 r->nm.len = nlen;
447 memcpy(nbuf, name, nlen); 435 memcpy(nbuf, name, nlen);
448 nbuf[nlen] = '\0'; 436 nbuf[nlen] = '\0';
449 r->nm.name = nbuf; 437 r->nm.name = nbuf;
450 r->flags = (deletion ? REPLAY_DELETION : 0);
451 key_copy(c, key, &r->key);
452 438
453 ubifs_assert(!*p); 439 list_add_tail(&r->list, &c->replay_list);
454 rb_link_node(&r->rb, parent, p);
455 rb_insert_color(&r->rb, &c->replay_tree);
456 return 0; 440 return 0;
457} 441}
458 442
@@ -489,29 +473,92 @@ int ubifs_validate_entry(struct ubifs_info *c,
489} 473}
490 474
491/** 475/**
476 * is_last_bud - check if the bud is the last in the journal head.
477 * @c: UBIFS file-system description object
478 * @bud: bud description object
479 *
480 * This function checks if bud @bud is the last bud in its journal head. This
481 * information is then used by 'replay_bud()' to decide whether the bud can
482 * have corruptions or not. Indeed, only last buds can be corrupted by power
483 * cuts. Returns %1 if this is the last bud, and %0 if not.
484 */
485static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
486{
487 struct ubifs_jhead *jh = &c->jheads[bud->jhead];
488 struct ubifs_bud *next;
489 uint32_t data;
490 int err;
491
492 if (list_is_last(&bud->list, &jh->buds_list))
493 return 1;
494
495 /*
496 * The following is a quirk to make sure we work correctly with UBIFS
497 * images used with older UBIFS.
498 *
499 * Normally, the last bud will be the last in the journal head's list
500 * of bud. However, there is one exception if the UBIFS image belongs
501 * to older UBIFS. This is fairly unlikely: one would need to use old
502 * UBIFS, then have a power cut exactly at the right point, and then
503 * try to mount this image with new UBIFS.
504 *
505 * The exception is: it is possible to have 2 buds A and B, A goes
506 * before B, and B is the last, bud B is contains no data, and bud A is
507 * corrupted at the end. The reason is that in older versions when the
508 * journal code switched the next bud (from A to B), it first added a
509 * log reference node for the new bud (B), and only after this it
510 * synchronized the write-buffer of current bud (A). But later this was
511 * changed and UBIFS started to always synchronize the write-buffer of
512 * the bud (A) before writing the log reference for the new bud (B).
513 *
514 * But because older UBIFS always synchronized A's write-buffer before
515 * writing to B, we can recognize this exceptional situation but
516 * checking the contents of bud B - if it is empty, then A can be
517 * treated as the last and we can recover it.
518 *
519 * TODO: remove this piece of code in a couple of years (today it is
520 * 16.05.2011).
521 */
522 next = list_entry(bud->list.next, struct ubifs_bud, list);
523 if (!list_is_last(&next->list, &jh->buds_list))
524 return 0;
525
526 err = ubi_read(c->ubi, next->lnum, (char *)&data,
527 next->start, 4);
528 if (err)
529 return 0;
530
531 return data == 0xFFFFFFFF;
532}
533
534/**
492 * replay_bud - replay a bud logical eraseblock. 535 * replay_bud - replay a bud logical eraseblock.
493 * @c: UBIFS file-system description object 536 * @c: UBIFS file-system description object
494 * @lnum: bud logical eraseblock number to replay 537 * @b: bud entry which describes the bud
495 * @offs: bud start offset
496 * @jhead: journal head to which this bud belongs
497 * @free: amount of free space in the bud is returned here
498 * @dirty: amount of dirty space from padding and deletion nodes is returned
499 * here
500 * 538 *
501 * This function returns zero in case of success and a negative error code in 539 * This function replays bud @bud, recovers it if needed, and adds all nodes
502 * case of failure. 540 * from this bud to the replay list. Returns zero in case of success and a
541 * negative error code in case of failure.
503 */ 542 */
504static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, 543static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
505 int *free, int *dirty)
506{ 544{
507 int err = 0, used = 0; 545 int is_last = is_last_bud(c, b->bud);
546 int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start;
508 struct ubifs_scan_leb *sleb; 547 struct ubifs_scan_leb *sleb;
509 struct ubifs_scan_node *snod; 548 struct ubifs_scan_node *snod;
510 struct ubifs_bud *bud;
511 549
512 dbg_mnt("replay bud LEB %d, head %d", lnum, jhead); 550 dbg_mnt("replay bud LEB %d, head %d, offs %d, is_last %d",
513 if (c->need_recovery) 551 lnum, b->bud->jhead, offs, is_last);
514 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, jhead != GCHD); 552
553 if (c->need_recovery && is_last)
554 /*
555 * Recover only last LEBs in the journal heads, because power
556 * cuts may cause corruptions only in these LEBs, because only
557 * these LEBs could possibly be written to at the power cut
558 * time.
559 */
560 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf,
561 b->bud->jhead != GCHD);
515 else 562 else
516 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); 563 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
517 if (IS_ERR(sleb)) 564 if (IS_ERR(sleb))
@@ -627,15 +674,13 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
627 goto out; 674 goto out;
628 } 675 }
629 676
630 bud = ubifs_search_bud(c, lnum); 677 ubifs_assert(ubifs_search_bud(c, lnum));
631 if (!bud)
632 BUG();
633
634 ubifs_assert(sleb->endpt - offs >= used); 678 ubifs_assert(sleb->endpt - offs >= used);
635 ubifs_assert(sleb->endpt % c->min_io_size == 0); 679 ubifs_assert(sleb->endpt % c->min_io_size == 0);
636 680
637 *dirty = sleb->endpt - offs - used; 681 b->dirty = sleb->endpt - offs - used;
638 *free = c->leb_size - sleb->endpt; 682 b->free = c->leb_size - sleb->endpt;
683 dbg_mnt("bud LEB %d replied: dirty %d, free %d", lnum, b->dirty, b->free);
639 684
640out: 685out:
641 ubifs_scan_destroy(sleb); 686 ubifs_scan_destroy(sleb);
@@ -649,58 +694,6 @@ out_dump:
649} 694}
650 695
651/** 696/**
652 * insert_ref_node - insert a reference node to the replay tree.
653 * @c: UBIFS file-system description object
654 * @lnum: node logical eraseblock number
655 * @offs: node offset
656 * @sqnum: sequence number
657 * @free: amount of free space in bud
658 * @dirty: amount of dirty space from padding and deletion nodes
659 * @jhead: journal head number for the bud
660 *
661 * This function inserts a reference node to the replay tree and returns zero
662 * in case of success or a negative error code in case of failure.
663 */
664static int insert_ref_node(struct ubifs_info *c, int lnum, int offs,
665 unsigned long long sqnum, int free, int dirty,
666 int jhead)
667{
668 struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
669 struct replay_entry *r;
670
671 dbg_mnt("add ref LEB %d:%d", lnum, offs);
672 while (*p) {
673 parent = *p;
674 r = rb_entry(parent, struct replay_entry, rb);
675 if (sqnum < r->sqnum) {
676 p = &(*p)->rb_left;
677 continue;
678 } else if (sqnum > r->sqnum) {
679 p = &(*p)->rb_right;
680 continue;
681 }
682 ubifs_err("duplicate sqnum in replay tree");
683 return -EINVAL;
684 }
685
686 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
687 if (!r)
688 return -ENOMEM;
689
690 r->lnum = lnum;
691 r->offs = offs;
692 r->sqnum = sqnum;
693 r->flags = REPLAY_REF;
694 r->free = free;
695 r->dirty = dirty;
696 r->jhead = jhead;
697
698 rb_link_node(&r->rb, parent, p);
699 rb_insert_color(&r->rb, &c->replay_tree);
700 return 0;
701}
702
703/**
704 * replay_buds - replay all buds. 697 * replay_buds - replay all buds.
705 * @c: UBIFS file-system description object 698 * @c: UBIFS file-system description object
706 * 699 *
@@ -710,17 +703,16 @@ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs,
710static int replay_buds(struct ubifs_info *c) 703static int replay_buds(struct ubifs_info *c)
711{ 704{
712 struct bud_entry *b; 705 struct bud_entry *b;
713 int err, uninitialized_var(free), uninitialized_var(dirty); 706 int err;
707 unsigned long long prev_sqnum = 0;
714 708
715 list_for_each_entry(b, &c->replay_buds, list) { 709 list_for_each_entry(b, &c->replay_buds, list) {
716 err = replay_bud(c, b->bud->lnum, b->bud->start, b->bud->jhead, 710 err = replay_bud(c, b);
717 &free, &dirty);
718 if (err)
719 return err;
720 err = insert_ref_node(c, b->bud->lnum, b->bud->start, b->sqnum,
721 free, dirty, b->bud->jhead);
722 if (err) 711 if (err)
723 return err; 712 return err;
713
714 ubifs_assert(b->sqnum > prev_sqnum);
715 prev_sqnum = b->sqnum;
724 } 716 }
725 717
726 return 0; 718 return 0;
@@ -1060,25 +1052,29 @@ int ubifs_replay_journal(struct ubifs_info *c)
1060 if (err) 1052 if (err)
1061 goto out; 1053 goto out;
1062 1054
1063 err = apply_replay_tree(c); 1055 err = apply_replay_list(c);
1056 if (err)
1057 goto out;
1058
1059 err = set_buds_lprops(c);
1064 if (err) 1060 if (err)
1065 goto out; 1061 goto out;
1066 1062
1067 /* 1063 /*
1068 * UBIFS budgeting calculations use @c->budg_uncommitted_idx variable 1064 * UBIFS budgeting calculations use @c->bi.uncommitted_idx variable
1069 * to roughly estimate index growth. Things like @c->min_idx_lebs 1065 * to roughly estimate index growth. Things like @c->bi.min_idx_lebs
1070 * depend on it. This means we have to initialize it to make sure 1066 * depend on it. This means we have to initialize it to make sure
1071 * budgeting works properly. 1067 * budgeting works properly.
1072 */ 1068 */
1073 c->budg_uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt); 1069 c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt);
1074 c->budg_uncommitted_idx *= c->max_idx_node_sz; 1070 c->bi.uncommitted_idx *= c->max_idx_node_sz;
1075 1071
1076 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); 1072 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
1077 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " 1073 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
1078 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, 1074 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
1079 (unsigned long)c->highest_inum); 1075 (unsigned long)c->highest_inum);
1080out: 1076out:
1081 destroy_replay_tree(c); 1077 destroy_replay_list(c);
1082 destroy_bud_list(c); 1078 destroy_bud_list(c);
1083 c->replaying = 0; 1079 c->replaying = 0;
1084 return err; 1080 return err;
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index bf31b4729e51..c606f010e8df 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -475,7 +475,8 @@ failed:
475 * @c: UBIFS file-system description object 475 * @c: UBIFS file-system description object
476 * 476 *
477 * This function returns a pointer to the superblock node or a negative error 477 * This function returns a pointer to the superblock node or a negative error
478 * code. 478 * code. Note, the user of this function is responsible of kfree()'ing the
479 * returned superblock buffer.
479 */ 480 */
480struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) 481struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
481{ 482{
@@ -616,6 +617,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
616 c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran); 617 c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran);
617 memcpy(&c->uuid, &sup->uuid, 16); 618 memcpy(&c->uuid, &sup->uuid, 16);
618 c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT); 619 c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT);
620 c->space_fixup = !!(sup_flags & UBIFS_FLG_SPACE_FIXUP);
619 621
620 /* Automatically increase file system size to the maximum size */ 622 /* Automatically increase file system size to the maximum size */
621 c->old_leb_cnt = c->leb_cnt; 623 c->old_leb_cnt = c->leb_cnt;
@@ -650,3 +652,152 @@ out:
650 kfree(sup); 652 kfree(sup);
651 return err; 653 return err;
652} 654}
655
656/**
657 * fixup_leb - fixup/unmap an LEB containing free space.
658 * @c: UBIFS file-system description object
659 * @lnum: the LEB number to fix up
660 * @len: number of used bytes in LEB (starting at offset 0)
661 *
662 * This function reads the contents of the given LEB number @lnum, then fixes
663 * it up, so that empty min. I/O units in the end of LEB are actually erased on
664 * flash (rather than being just all-0xff real data). If the LEB is completely
665 * empty, it is simply unmapped.
666 */
667static int fixup_leb(struct ubifs_info *c, int lnum, int len)
668{
669 int err;
670
671 ubifs_assert(len >= 0);
672 ubifs_assert(len % c->min_io_size == 0);
673 ubifs_assert(len < c->leb_size);
674
675 if (len == 0) {
676 dbg_mnt("unmap empty LEB %d", lnum);
677 return ubi_leb_unmap(c->ubi, lnum);
678 }
679
680 dbg_mnt("fixup LEB %d, data len %d", lnum, len);
681 err = ubi_read(c->ubi, lnum, c->sbuf, 0, len);
682 if (err)
683 return err;
684
685 return ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
686}
687
688/**
689 * fixup_free_space - find & remap all LEBs containing free space.
690 * @c: UBIFS file-system description object
691 *
692 * This function walks through all LEBs in the filesystem and fiexes up those
693 * containing free/empty space.
694 */
695static int fixup_free_space(struct ubifs_info *c)
696{
697 int lnum, err = 0;
698 struct ubifs_lprops *lprops;
699
700 ubifs_get_lprops(c);
701
702 /* Fixup LEBs in the master area */
703 for (lnum = UBIFS_MST_LNUM; lnum < UBIFS_LOG_LNUM; lnum++) {
704 err = fixup_leb(c, lnum, c->mst_offs + c->mst_node_alsz);
705 if (err)
706 goto out;
707 }
708
709 /* Unmap unused log LEBs */
710 lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
711 while (lnum != c->ltail_lnum) {
712 err = fixup_leb(c, lnum, 0);
713 if (err)
714 goto out;
715 lnum = ubifs_next_log_lnum(c, lnum);
716 }
717
718 /* Fixup the current log head */
719 err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
720 if (err)
721 goto out;
722
723 /* Fixup LEBs in the LPT area */
724 for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
725 int free = c->ltab[lnum - c->lpt_first].free;
726
727 if (free > 0) {
728 err = fixup_leb(c, lnum, c->leb_size - free);
729 if (err)
730 goto out;
731 }
732 }
733
734 /* Unmap LEBs in the orphans area */
735 for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
736 err = fixup_leb(c, lnum, 0);
737 if (err)
738 goto out;
739 }
740
741 /* Fixup LEBs in the main area */
742 for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
743 lprops = ubifs_lpt_lookup(c, lnum);
744 if (IS_ERR(lprops)) {
745 err = PTR_ERR(lprops);
746 goto out;
747 }
748
749 if (lprops->free > 0) {
750 err = fixup_leb(c, lnum, c->leb_size - lprops->free);
751 if (err)
752 goto out;
753 }
754 }
755
756out:
757 ubifs_release_lprops(c);
758 return err;
759}
760
761/**
762 * ubifs_fixup_free_space - find & fix all LEBs with free space.
763 * @c: UBIFS file-system description object
764 *
765 * This function fixes up LEBs containing free space on first mount, if the
766 * appropriate flag was set when the FS was created. Each LEB with one or more
767 * empty min. I/O unit (i.e. free-space-count > 0) is re-written, to make sure
768 * the free space is actually erased. E.g., this is necessary for some NAND
769 * chips, since the free space may have been programmed like real "0xff" data
770 * (generating a non-0xff ECC), causing future writes to the not-really-erased
771 * NAND pages to behave badly. After the space is fixed up, the superblock flag
772 * is cleared, so that this is skipped for all future mounts.
773 */
774int ubifs_fixup_free_space(struct ubifs_info *c)
775{
776 int err;
777 struct ubifs_sb_node *sup;
778
779 ubifs_assert(c->space_fixup);
780 ubifs_assert(!c->ro_mount);
781
782 ubifs_msg("start fixing up free space");
783
784 err = fixup_free_space(c);
785 if (err)
786 return err;
787
788 sup = ubifs_read_sb_node(c);
789 if (IS_ERR(sup))
790 return PTR_ERR(sup);
791
792 /* Free-space fixup is no longer required */
793 c->space_fixup = 0;
794 sup->flags &= cpu_to_le32(~UBIFS_FLG_SPACE_FIXUP);
795
796 err = ubifs_write_sb_node(c, sup);
797 kfree(sup);
798 if (err)
799 return err;
800
801 ubifs_msg("free space fixup complete");
802 return err;
803}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 04ad07f4fcc3..6db0bdaa9f74 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -375,7 +375,7 @@ out:
375 ubifs_release_dirty_inode_budget(c, ui); 375 ubifs_release_dirty_inode_budget(c, ui);
376 else { 376 else {
377 /* We've deleted something - clean the "no space" flags */ 377 /* We've deleted something - clean the "no space" flags */
378 c->nospace = c->nospace_rp = 0; 378 c->bi.nospace = c->bi.nospace_rp = 0;
379 smp_wmb(); 379 smp_wmb();
380 } 380 }
381done: 381done:
@@ -694,11 +694,11 @@ static int init_constants_sb(struct ubifs_info *c)
694 * be compressed and direntries are of the maximum size. 694 * be compressed and direntries are of the maximum size.
695 * 695 *
696 * Note, data, which may be stored in inodes is budgeted separately, so 696 * Note, data, which may be stored in inodes is budgeted separately, so
697 * it is not included into 'c->inode_budget'. 697 * it is not included into 'c->bi.inode_budget'.
698 */ 698 */
699 c->page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE; 699 c->bi.page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE;
700 c->inode_budget = UBIFS_INO_NODE_SZ; 700 c->bi.inode_budget = UBIFS_INO_NODE_SZ;
701 c->dent_budget = UBIFS_MAX_DENT_NODE_SZ; 701 c->bi.dent_budget = UBIFS_MAX_DENT_NODE_SZ;
702 702
703 /* 703 /*
704 * When the amount of flash space used by buds becomes 704 * When the amount of flash space used by buds becomes
@@ -742,7 +742,7 @@ static void init_constants_master(struct ubifs_info *c)
742{ 742{
743 long long tmp64; 743 long long tmp64;
744 744
745 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); 745 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
746 c->report_rp_size = ubifs_reported_space(c, c->rp_size); 746 c->report_rp_size = ubifs_reported_space(c, c->rp_size);
747 747
748 /* 748 /*
@@ -1144,8 +1144,8 @@ static int check_free_space(struct ubifs_info *c)
1144{ 1144{
1145 ubifs_assert(c->dark_wm > 0); 1145 ubifs_assert(c->dark_wm > 0);
1146 if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { 1146 if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
1147 ubifs_err("insufficient free space to mount in read/write mode"); 1147 ubifs_err("insufficient free space to mount in R/W mode");
1148 dbg_dump_budg(c); 1148 dbg_dump_budg(c, &c->bi);
1149 dbg_dump_lprops(c); 1149 dbg_dump_lprops(c);
1150 return -ENOSPC; 1150 return -ENOSPC;
1151 } 1151 }
@@ -1304,7 +1304,7 @@ static int mount_ubifs(struct ubifs_info *c)
1304 if (err) 1304 if (err)
1305 goto out_lpt; 1305 goto out_lpt;
1306 1306
1307 err = dbg_check_idx_size(c, c->old_idx_sz); 1307 err = dbg_check_idx_size(c, c->bi.old_idx_sz);
1308 if (err) 1308 if (err)
1309 goto out_lpt; 1309 goto out_lpt;
1310 1310
@@ -1313,7 +1313,7 @@ static int mount_ubifs(struct ubifs_info *c)
1313 goto out_journal; 1313 goto out_journal;
1314 1314
1315 /* Calculate 'min_idx_lebs' after journal replay */ 1315 /* Calculate 'min_idx_lebs' after journal replay */
1316 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); 1316 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
1317 1317
1318 err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount); 1318 err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
1319 if (err) 1319 if (err)
@@ -1396,6 +1396,12 @@ static int mount_ubifs(struct ubifs_info *c)
1396 } else 1396 } else
1397 ubifs_assert(c->lst.taken_empty_lebs > 0); 1397 ubifs_assert(c->lst.taken_empty_lebs > 0);
1398 1398
1399 if (!c->ro_mount && c->space_fixup) {
1400 err = ubifs_fixup_free_space(c);
1401 if (err)
1402 goto out_infos;
1403 }
1404
1399 err = dbg_check_filesystem(c); 1405 err = dbg_check_filesystem(c);
1400 if (err) 1406 if (err)
1401 goto out_infos; 1407 goto out_infos;
@@ -1442,7 +1448,8 @@ static int mount_ubifs(struct ubifs_info *c)
1442 c->main_lebs, c->main_first, c->leb_cnt - 1); 1448 c->main_lebs, c->main_first, c->leb_cnt - 1);
1443 dbg_msg("index LEBs: %d", c->lst.idx_lebs); 1449 dbg_msg("index LEBs: %d", c->lst.idx_lebs);
1444 dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)", 1450 dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)",
1445 c->old_idx_sz, c->old_idx_sz >> 10, c->old_idx_sz >> 20); 1451 c->bi.old_idx_sz, c->bi.old_idx_sz >> 10,
1452 c->bi.old_idx_sz >> 20);
1446 dbg_msg("key hash type: %d", c->key_hash_type); 1453 dbg_msg("key hash type: %d", c->key_hash_type);
1447 dbg_msg("tree fanout: %d", c->fanout); 1454 dbg_msg("tree fanout: %d", c->fanout);
1448 dbg_msg("reserved GC LEB: %d", c->gc_lnum); 1455 dbg_msg("reserved GC LEB: %d", c->gc_lnum);
@@ -1456,7 +1463,7 @@ static int mount_ubifs(struct ubifs_info *c)
1456 dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu", 1463 dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu",
1457 UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); 1464 UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
1458 dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu, idx %d", 1465 dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu, idx %d",
1459 UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ, 1466 UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
1460 UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout)); 1467 UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout));
1461 dbg_msg("dead watermark: %d", c->dead_wm); 1468 dbg_msg("dead watermark: %d", c->dead_wm);
1462 dbg_msg("dark watermark: %d", c->dark_wm); 1469 dbg_msg("dark watermark: %d", c->dark_wm);
@@ -1584,6 +1591,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1584 } 1591 }
1585 sup->leb_cnt = cpu_to_le32(c->leb_cnt); 1592 sup->leb_cnt = cpu_to_le32(c->leb_cnt);
1586 err = ubifs_write_sb_node(c, sup); 1593 err = ubifs_write_sb_node(c, sup);
1594 kfree(sup);
1587 if (err) 1595 if (err)
1588 goto out; 1596 goto out;
1589 } 1597 }
@@ -1684,6 +1692,13 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1684 */ 1692 */
1685 err = dbg_check_space_info(c); 1693 err = dbg_check_space_info(c);
1686 } 1694 }
1695
1696 if (c->space_fixup) {
1697 err = ubifs_fixup_free_space(c);
1698 if (err)
1699 goto out;
1700 }
1701
1687 mutex_unlock(&c->umount_mutex); 1702 mutex_unlock(&c->umount_mutex);
1688 return err; 1703 return err;
1689 1704
@@ -1766,10 +1781,9 @@ static void ubifs_put_super(struct super_block *sb)
1766 * to write them back because of I/O errors. 1781 * to write them back because of I/O errors.
1767 */ 1782 */
1768 if (!c->ro_error) { 1783 if (!c->ro_error) {
1769 ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0); 1784 ubifs_assert(c->bi.idx_growth == 0);
1770 ubifs_assert(c->budg_idx_growth == 0); 1785 ubifs_assert(c->bi.dd_growth == 0);
1771 ubifs_assert(c->budg_dd_growth == 0); 1786 ubifs_assert(c->bi.data_growth == 0);
1772 ubifs_assert(c->budg_data_growth == 0);
1773 } 1787 }
1774 1788
1775 /* 1789 /*
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index de485979ca39..8119b1fd8d94 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -2557,11 +2557,11 @@ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2557 if (err) { 2557 if (err) {
2558 /* Ensure the znode is dirtied */ 2558 /* Ensure the znode is dirtied */
2559 if (znode->cnext || !ubifs_zn_dirty(znode)) { 2559 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2560 znode = dirty_cow_bottom_up(c, znode); 2560 znode = dirty_cow_bottom_up(c, znode);
2561 if (IS_ERR(znode)) { 2561 if (IS_ERR(znode)) {
2562 err = PTR_ERR(znode); 2562 err = PTR_ERR(znode);
2563 goto out_unlock; 2563 goto out_unlock;
2564 } 2564 }
2565 } 2565 }
2566 err = tnc_delete(c, znode, n); 2566 err = tnc_delete(c, znode, n);
2567 } 2567 }
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 53288e5d604e..41920f357bbf 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -377,15 +377,13 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
377 c->gap_lebs = NULL; 377 c->gap_lebs = NULL;
378 return err; 378 return err;
379 } 379 }
380 if (!dbg_force_in_the_gaps_enabled) { 380 if (dbg_force_in_the_gaps_enabled()) {
381 /* 381 /*
382 * Do not print scary warnings if the debugging 382 * Do not print scary warnings if the debugging
383 * option which forces in-the-gaps is enabled. 383 * option which forces in-the-gaps is enabled.
384 */ 384 */
385 ubifs_err("out of space"); 385 ubifs_warn("out of space");
386 spin_lock(&c->space_lock); 386 dbg_dump_budg(c, &c->bi);
387 dbg_dump_budg(c);
388 spin_unlock(&c->space_lock);
389 dbg_dump_lprops(c); 387 dbg_dump_lprops(c);
390 } 388 }
391 /* Try to commit anyway */ 389 /* Try to commit anyway */
@@ -796,16 +794,16 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
796 spin_lock(&c->space_lock); 794 spin_lock(&c->space_lock);
797 /* 795 /*
798 * Although we have not finished committing yet, update size of the 796 * Although we have not finished committing yet, update size of the
799 * committed index ('c->old_idx_sz') and zero out the index growth 797 * committed index ('c->bi.old_idx_sz') and zero out the index growth
800 * budget. It is OK to do this now, because we've reserved all the 798 * budget. It is OK to do this now, because we've reserved all the
801 * space which is needed to commit the index, and it is save for the 799 * space which is needed to commit the index, and it is save for the
802 * budgeting subsystem to assume the index is already committed, 800 * budgeting subsystem to assume the index is already committed,
803 * even though it is not. 801 * even though it is not.
804 */ 802 */
805 ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); 803 ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
806 c->old_idx_sz = c->calc_idx_sz; 804 c->bi.old_idx_sz = c->calc_idx_sz;
807 c->budg_uncommitted_idx = 0; 805 c->bi.uncommitted_idx = 0;
808 c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); 806 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
809 spin_unlock(&c->space_lock); 807 spin_unlock(&c->space_lock);
810 mutex_unlock(&c->tnc_mutex); 808 mutex_unlock(&c->tnc_mutex);
811 809
diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h
index 191ca7863fe7..e24380cf46ed 100644
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -408,9 +408,11 @@ enum {
408 * Superblock flags. 408 * Superblock flags.
409 * 409 *
410 * UBIFS_FLG_BIGLPT: if "big" LPT model is used if set 410 * UBIFS_FLG_BIGLPT: if "big" LPT model is used if set
411 * UBIFS_FLG_SPACE_FIXUP: first-mount "fixup" of free space within LEBs needed
411 */ 412 */
412enum { 413enum {
413 UBIFS_FLG_BIGLPT = 0x02, 414 UBIFS_FLG_BIGLPT = 0x02,
415 UBIFS_FLG_SPACE_FIXUP = 0x04,
414}; 416};
415 417
416/** 418/**
@@ -434,7 +436,7 @@ struct ubifs_ch {
434 __u8 node_type; 436 __u8 node_type;
435 __u8 group_type; 437 __u8 group_type;
436 __u8 padding[2]; 438 __u8 padding[2];
437} __attribute__ ((packed)); 439} __packed;
438 440
439/** 441/**
440 * union ubifs_dev_desc - device node descriptor. 442 * union ubifs_dev_desc - device node descriptor.
@@ -448,7 +450,7 @@ struct ubifs_ch {
448union ubifs_dev_desc { 450union ubifs_dev_desc {
449 __le32 new; 451 __le32 new;
450 __le64 huge; 452 __le64 huge;
451} __attribute__ ((packed)); 453} __packed;
452 454
453/** 455/**
454 * struct ubifs_ino_node - inode node. 456 * struct ubifs_ino_node - inode node.
@@ -509,7 +511,7 @@ struct ubifs_ino_node {
509 __le16 compr_type; 511 __le16 compr_type;
510 __u8 padding2[26]; /* Watch 'zero_ino_node_unused()' if changing! */ 512 __u8 padding2[26]; /* Watch 'zero_ino_node_unused()' if changing! */
511 __u8 data[]; 513 __u8 data[];
512} __attribute__ ((packed)); 514} __packed;
513 515
514/** 516/**
515 * struct ubifs_dent_node - directory entry node. 517 * struct ubifs_dent_node - directory entry node.
@@ -534,7 +536,7 @@ struct ubifs_dent_node {
534 __le16 nlen; 536 __le16 nlen;
535 __u8 padding2[4]; /* Watch 'zero_dent_node_unused()' if changing! */ 537 __u8 padding2[4]; /* Watch 'zero_dent_node_unused()' if changing! */
536 __u8 name[]; 538 __u8 name[];
537} __attribute__ ((packed)); 539} __packed;
538 540
539/** 541/**
540 * struct ubifs_data_node - data node. 542 * struct ubifs_data_node - data node.
@@ -555,7 +557,7 @@ struct ubifs_data_node {
555 __le16 compr_type; 557 __le16 compr_type;
556 __u8 padding[2]; /* Watch 'zero_data_node_unused()' if changing! */ 558 __u8 padding[2]; /* Watch 'zero_data_node_unused()' if changing! */
557 __u8 data[]; 559 __u8 data[];
558} __attribute__ ((packed)); 560} __packed;
559 561
560/** 562/**
561 * struct ubifs_trun_node - truncation node. 563 * struct ubifs_trun_node - truncation node.
@@ -575,7 +577,7 @@ struct ubifs_trun_node {
575 __u8 padding[12]; /* Watch 'zero_trun_node_unused()' if changing! */ 577 __u8 padding[12]; /* Watch 'zero_trun_node_unused()' if changing! */
576 __le64 old_size; 578 __le64 old_size;
577 __le64 new_size; 579 __le64 new_size;
578} __attribute__ ((packed)); 580} __packed;
579 581
580/** 582/**
581 * struct ubifs_pad_node - padding node. 583 * struct ubifs_pad_node - padding node.
@@ -586,7 +588,7 @@ struct ubifs_trun_node {
586struct ubifs_pad_node { 588struct ubifs_pad_node {
587 struct ubifs_ch ch; 589 struct ubifs_ch ch;
588 __le32 pad_len; 590 __le32 pad_len;
589} __attribute__ ((packed)); 591} __packed;
590 592
591/** 593/**
592 * struct ubifs_sb_node - superblock node. 594 * struct ubifs_sb_node - superblock node.
@@ -644,7 +646,7 @@ struct ubifs_sb_node {
644 __u8 uuid[16]; 646 __u8 uuid[16];
645 __le32 ro_compat_version; 647 __le32 ro_compat_version;
646 __u8 padding2[3968]; 648 __u8 padding2[3968];
647} __attribute__ ((packed)); 649} __packed;
648 650
649/** 651/**
650 * struct ubifs_mst_node - master node. 652 * struct ubifs_mst_node - master node.
@@ -711,7 +713,7 @@ struct ubifs_mst_node {
711 __le32 idx_lebs; 713 __le32 idx_lebs;
712 __le32 leb_cnt; 714 __le32 leb_cnt;
713 __u8 padding[344]; 715 __u8 padding[344];
714} __attribute__ ((packed)); 716} __packed;
715 717
716/** 718/**
717 * struct ubifs_ref_node - logical eraseblock reference node. 719 * struct ubifs_ref_node - logical eraseblock reference node.
@@ -727,7 +729,7 @@ struct ubifs_ref_node {
727 __le32 offs; 729 __le32 offs;
728 __le32 jhead; 730 __le32 jhead;
729 __u8 padding[28]; 731 __u8 padding[28];
730} __attribute__ ((packed)); 732} __packed;
731 733
732/** 734/**
733 * struct ubifs_branch - key/reference/length branch 735 * struct ubifs_branch - key/reference/length branch
@@ -741,7 +743,7 @@ struct ubifs_branch {
741 __le32 offs; 743 __le32 offs;
742 __le32 len; 744 __le32 len;
743 __u8 key[]; 745 __u8 key[];
744} __attribute__ ((packed)); 746} __packed;
745 747
746/** 748/**
747 * struct ubifs_idx_node - indexing node. 749 * struct ubifs_idx_node - indexing node.
@@ -755,7 +757,7 @@ struct ubifs_idx_node {
755 __le16 child_cnt; 757 __le16 child_cnt;
756 __le16 level; 758 __le16 level;
757 __u8 branches[]; 759 __u8 branches[];
758} __attribute__ ((packed)); 760} __packed;
759 761
760/** 762/**
761 * struct ubifs_cs_node - commit start node. 763 * struct ubifs_cs_node - commit start node.
@@ -765,7 +767,7 @@ struct ubifs_idx_node {
765struct ubifs_cs_node { 767struct ubifs_cs_node {
766 struct ubifs_ch ch; 768 struct ubifs_ch ch;
767 __le64 cmt_no; 769 __le64 cmt_no;
768} __attribute__ ((packed)); 770} __packed;
769 771
770/** 772/**
771 * struct ubifs_orph_node - orphan node. 773 * struct ubifs_orph_node - orphan node.
@@ -777,6 +779,6 @@ struct ubifs_orph_node {
777 struct ubifs_ch ch; 779 struct ubifs_ch ch;
778 __le64 cmt_no; 780 __le64 cmt_no;
779 __le64 inos[]; 781 __le64 inos[];
780} __attribute__ ((packed)); 782} __packed;
781 783
782#endif /* __UBIFS_MEDIA_H__ */ 784#endif /* __UBIFS_MEDIA_H__ */
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 8c40ad3c6721..93d1412a06f0 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -389,9 +389,9 @@ struct ubifs_gced_idx_leb {
389 * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses 389 * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses
390 * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot 390 * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot
391 * make sure @inode->i_size is always changed under @ui_mutex, because it 391 * make sure @inode->i_size is always changed under @ui_mutex, because it
392 * cannot call 'truncate_setsize()' with @ui_mutex locked, because it would deadlock 392 * cannot call 'truncate_setsize()' with @ui_mutex locked, because it would
393 * with 'ubifs_writepage()' (see file.c). All the other inode fields are 393 * deadlock with 'ubifs_writepage()' (see file.c). All the other inode fields
394 * changed under @ui_mutex, so they do not need "shadow" fields. Note, one 394 * are changed under @ui_mutex, so they do not need "shadow" fields. Note, one
395 * could consider to rework locking and base it on "shadow" fields. 395 * could consider to rework locking and base it on "shadow" fields.
396 */ 396 */
397struct ubifs_inode { 397struct ubifs_inode {
@@ -937,6 +937,40 @@ struct ubifs_mount_opts {
937 unsigned int compr_type:2; 937 unsigned int compr_type:2;
938}; 938};
939 939
940/**
941 * struct ubifs_budg_info - UBIFS budgeting information.
942 * @idx_growth: amount of bytes budgeted for index growth
943 * @data_growth: amount of bytes budgeted for cached data
944 * @dd_growth: amount of bytes budgeted for cached data that will make
945 * other data dirty
946 * @uncommitted_idx: amount of bytes were budgeted for growth of the index, but
947 * which still have to be taken into account because the index
948 * has not been committed so far
949 * @old_idx_sz: size of index on flash
950 * @min_idx_lebs: minimum number of LEBs required for the index
951 * @nospace: non-zero if the file-system does not have flash space (used as
952 * optimization)
953 * @nospace_rp: the same as @nospace, but additionally means that even reserved
954 * pool is full
955 * @page_budget: budget for a page (constant, nenver changed after mount)
956 * @inode_budget: budget for an inode (constant, nenver changed after mount)
957 * @dent_budget: budget for a directory entry (constant, nenver changed after
958 * mount)
959 */
960struct ubifs_budg_info {
961 long long idx_growth;
962 long long data_growth;
963 long long dd_growth;
964 long long uncommitted_idx;
965 unsigned long long old_idx_sz;
966 int min_idx_lebs;
967 unsigned int nospace:1;
968 unsigned int nospace_rp:1;
969 int page_budget;
970 int inode_budget;
971 int dent_budget;
972};
973
940struct ubifs_debug_info; 974struct ubifs_debug_info;
941 975
942/** 976/**
@@ -980,6 +1014,7 @@ struct ubifs_debug_info;
980 * @cmt_wq: wait queue to sleep on if the log is full and a commit is running 1014 * @cmt_wq: wait queue to sleep on if the log is full and a commit is running
981 * 1015 *
982 * @big_lpt: flag that LPT is too big to write whole during commit 1016 * @big_lpt: flag that LPT is too big to write whole during commit
1017 * @space_fixup: flag indicating that free space in LEBs needs to be cleaned up
983 * @no_chk_data_crc: do not check CRCs when reading data nodes (except during 1018 * @no_chk_data_crc: do not check CRCs when reading data nodes (except during
984 * recovery) 1019 * recovery)
985 * @bulk_read: enable bulk-reads 1020 * @bulk_read: enable bulk-reads
@@ -1057,32 +1092,14 @@ struct ubifs_debug_info;
1057 * @dirty_zn_cnt: number of dirty znodes 1092 * @dirty_zn_cnt: number of dirty znodes
1058 * @clean_zn_cnt: number of clean znodes 1093 * @clean_zn_cnt: number of clean znodes
1059 * 1094 *
1060 * @budg_idx_growth: amount of bytes budgeted for index growth 1095 * @space_lock: protects @bi and @lst
1061 * @budg_data_growth: amount of bytes budgeted for cached data 1096 * @lst: lprops statistics
1062 * @budg_dd_growth: amount of bytes budgeted for cached data that will make 1097 * @bi: budgeting information
1063 * other data dirty
1064 * @budg_uncommitted_idx: amount of bytes were budgeted for growth of the index,
1065 * but which still have to be taken into account because
1066 * the index has not been committed so far
1067 * @space_lock: protects @budg_idx_growth, @budg_data_growth, @budg_dd_growth,
1068 * @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, @lst,
1069 * @nospace, and @nospace_rp;
1070 * @min_idx_lebs: minimum number of LEBs required for the index
1071 * @old_idx_sz: size of index on flash
1072 * @calc_idx_sz: temporary variable which is used to calculate new index size 1098 * @calc_idx_sz: temporary variable which is used to calculate new index size
1073 * (contains accurate new index size at end of TNC commit start) 1099 * (contains accurate new index size at end of TNC commit start)
1074 * @lst: lprops statistics
1075 * @nospace: non-zero if the file-system does not have flash space (used as
1076 * optimization)
1077 * @nospace_rp: the same as @nospace, but additionally means that even reserved
1078 * pool is full
1079 *
1080 * @page_budget: budget for a page
1081 * @inode_budget: budget for an inode
1082 * @dent_budget: budget for a directory entry
1083 * 1100 *
1084 * @ref_node_alsz: size of the LEB reference node aligned to the min. flash 1101 * @ref_node_alsz: size of the LEB reference node aligned to the min. flash
1085 * I/O unit 1102 * I/O unit
1086 * @mst_node_alsz: master node aligned size 1103 * @mst_node_alsz: master node aligned size
1087 * @min_idx_node_sz: minimum indexing node aligned on 8-bytes boundary 1104 * @min_idx_node_sz: minimum indexing node aligned on 8-bytes boundary
1088 * @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary 1105 * @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary
@@ -1189,7 +1206,6 @@ struct ubifs_debug_info;
1189 * @replaying: %1 during journal replay 1206 * @replaying: %1 during journal replay
1190 * @mounting: %1 while mounting 1207 * @mounting: %1 while mounting
1191 * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode 1208 * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
1192 * @replay_tree: temporary tree used during journal replay
1193 * @replay_list: temporary list used during journal replay 1209 * @replay_list: temporary list used during journal replay
1194 * @replay_buds: list of buds to replay 1210 * @replay_buds: list of buds to replay
1195 * @cs_sqnum: sequence number of first node in the log (commit start node) 1211 * @cs_sqnum: sequence number of first node in the log (commit start node)
@@ -1238,6 +1254,7 @@ struct ubifs_info {
1238 wait_queue_head_t cmt_wq; 1254 wait_queue_head_t cmt_wq;
1239 1255
1240 unsigned int big_lpt:1; 1256 unsigned int big_lpt:1;
1257 unsigned int space_fixup:1;
1241 unsigned int no_chk_data_crc:1; 1258 unsigned int no_chk_data_crc:1;
1242 unsigned int bulk_read:1; 1259 unsigned int bulk_read:1;
1243 unsigned int default_compr:2; 1260 unsigned int default_compr:2;
@@ -1308,21 +1325,10 @@ struct ubifs_info {
1308 atomic_long_t dirty_zn_cnt; 1325 atomic_long_t dirty_zn_cnt;
1309 atomic_long_t clean_zn_cnt; 1326 atomic_long_t clean_zn_cnt;
1310 1327
1311 long long budg_idx_growth;
1312 long long budg_data_growth;
1313 long long budg_dd_growth;
1314 long long budg_uncommitted_idx;
1315 spinlock_t space_lock; 1328 spinlock_t space_lock;
1316 int min_idx_lebs;
1317 unsigned long long old_idx_sz;
1318 unsigned long long calc_idx_sz;
1319 struct ubifs_lp_stats lst; 1329 struct ubifs_lp_stats lst;
1320 unsigned int nospace:1; 1330 struct ubifs_budg_info bi;
1321 unsigned int nospace_rp:1; 1331 unsigned long long calc_idx_sz;
1322
1323 int page_budget;
1324 int inode_budget;
1325 int dent_budget;
1326 1332
1327 int ref_node_alsz; 1333 int ref_node_alsz;
1328 int mst_node_alsz; 1334 int mst_node_alsz;
@@ -1430,7 +1436,6 @@ struct ubifs_info {
1430 unsigned int replaying:1; 1436 unsigned int replaying:1;
1431 unsigned int mounting:1; 1437 unsigned int mounting:1;
1432 unsigned int remounting_rw:1; 1438 unsigned int remounting_rw:1;
1433 struct rb_root replay_tree;
1434 struct list_head replay_list; 1439 struct list_head replay_list;
1435 struct list_head replay_buds; 1440 struct list_head replay_buds;
1436 unsigned long long cs_sqnum; 1441 unsigned long long cs_sqnum;
@@ -1628,6 +1633,7 @@ int ubifs_write_master(struct ubifs_info *c);
1628int ubifs_read_superblock(struct ubifs_info *c); 1633int ubifs_read_superblock(struct ubifs_info *c);
1629struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c); 1634struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c);
1630int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup); 1635int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup);
1636int ubifs_fixup_free_space(struct ubifs_info *c);
1631 1637
1632/* replay.c */ 1638/* replay.c */
1633int ubifs_validate_entry(struct ubifs_info *c, 1639int ubifs_validate_entry(struct ubifs_info *c,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 3299f469e712..16f19f55e63f 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -80,8 +80,8 @@ enum {
80 SECURITY_XATTR, 80 SECURITY_XATTR,
81}; 81};
82 82
83static const struct inode_operations none_inode_operations; 83static const struct inode_operations empty_iops;
84static const struct file_operations none_file_operations; 84static const struct file_operations empty_fops;
85 85
86/** 86/**
87 * create_xattr - create an extended attribute. 87 * create_xattr - create an extended attribute.
@@ -131,8 +131,8 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
131 131
132 /* Re-define all operations to be "nothing" */ 132 /* Re-define all operations to be "nothing" */
133 inode->i_mapping->a_ops = &empty_aops; 133 inode->i_mapping->a_ops = &empty_aops;
134 inode->i_op = &none_inode_operations; 134 inode->i_op = &empty_iops;
135 inode->i_fop = &none_file_operations; 135 inode->i_fop = &empty_fops;
136 136
137 inode->i_flags |= S_SYNC | S_NOATIME | S_NOCMTIME | S_NOQUOTA; 137 inode->i_flags |= S_SYNC | S_NOATIME | S_NOCMTIME | S_NOQUOTA;
138 ui = ubifs_inode(inode); 138 ui = ubifs_inode(inode);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index b4bfe338ea0e..e9b8e5926bef 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -184,22 +184,18 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185#endif 185#endif
186 186
187#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY 187#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
188#define page_test_dirty(page) (0) 188#define page_test_and_clear_dirty(pfn, mapped) (0)
189#endif 189#endif
190 190
191#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY 191#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
192#define page_clear_dirty(page, mapped) do { } while (0)
193#endif
194
195#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
196#define pte_maybe_dirty(pte) pte_dirty(pte) 192#define pte_maybe_dirty(pte) pte_dirty(pte)
197#else 193#else
198#define pte_maybe_dirty(pte) (1) 194#define pte_maybe_dirty(pte) (1)
199#endif 195#endif
200 196
201#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 197#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
202#define page_test_and_clear_young(page) (0) 198#define page_test_and_clear_young(pfn) (0)
203#endif 199#endif
204 200
205#ifndef __HAVE_ARCH_PGD_OFFSET_GATE 201#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 077c00d94f6e..db22d136ad08 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
15 * HEAD_TEXT_SECTION 15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE) 16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...) 17 * INIT_DATA_SECTION(...)
18 * PERCPU(CACHELINE_SIZE, PAGE_SIZE) 18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .; 19 * __init_end = .;
20 * 20 *
21 * _stext = .; 21 * _stext = .;
@@ -682,6 +682,28 @@
682 } 682 }
683 683
684/** 684/**
685 * PERCPU_INPUT - the percpu input sections
686 * @cacheline: cacheline size
687 *
688 * The core percpu section names and core symbols which do not rely
689 * directly upon load addresses.
690 *
691 * @cacheline is used to align subsections to avoid false cacheline
692 * sharing between subsections for different purposes.
693 */
694#define PERCPU_INPUT(cacheline) \
695 VMLINUX_SYMBOL(__per_cpu_start) = .; \
696 *(.data..percpu..first) \
697 . = ALIGN(PAGE_SIZE); \
698 *(.data..percpu..page_aligned) \
699 . = ALIGN(cacheline); \
700 *(.data..percpu..readmostly) \
701 . = ALIGN(cacheline); \
702 *(.data..percpu) \
703 *(.data..percpu..shared_aligned) \
704 VMLINUX_SYMBOL(__per_cpu_end) = .;
705
706/**
685 * PERCPU_VADDR - define output section for percpu area 707 * PERCPU_VADDR - define output section for percpu area
686 * @cacheline: cacheline size 708 * @cacheline: cacheline size
687 * @vaddr: explicit base address (optional) 709 * @vaddr: explicit base address (optional)
@@ -703,52 +725,33 @@
703 * 725 *
704 * Note that this macros defines __per_cpu_load as an absolute symbol. 726 * Note that this macros defines __per_cpu_load as an absolute symbol.
705 * If there is no need to put the percpu section at a predetermined 727 * If there is no need to put the percpu section at a predetermined
706 * address, use PERCPU(). 728 * address, use PERCPU_SECTION.
707 */ 729 */
708#define PERCPU_VADDR(cacheline, vaddr, phdr) \ 730#define PERCPU_VADDR(cacheline, vaddr, phdr) \
709 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 731 VMLINUX_SYMBOL(__per_cpu_load) = .; \
710 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 732 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
711 - LOAD_OFFSET) { \ 733 - LOAD_OFFSET) { \
712 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 734 PERCPU_INPUT(cacheline) \
713 *(.data..percpu..first) \
714 . = ALIGN(PAGE_SIZE); \
715 *(.data..percpu..page_aligned) \
716 . = ALIGN(cacheline); \
717 *(.data..percpu..readmostly) \
718 . = ALIGN(cacheline); \
719 *(.data..percpu) \
720 *(.data..percpu..shared_aligned) \
721 VMLINUX_SYMBOL(__per_cpu_end) = .; \
722 } phdr \ 735 } phdr \
723 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 736 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
724 737
725/** 738/**
726 * PERCPU - define output section for percpu area, simple version 739 * PERCPU_SECTION - define output section for percpu area, simple version
727 * @cacheline: cacheline size 740 * @cacheline: cacheline size
728 * @align: required alignment
729 * 741 *
730 * Align to @align and outputs output section for percpu area. This macro 742 * Align to PAGE_SIZE and outputs output section for percpu area. This
731 * doesn't manipulate @vaddr or @phdr and __per_cpu_load and 743 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
732 * __per_cpu_start will be identical. 744 * __per_cpu_start will be identical.
733 * 745 *
734 * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,) 746 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
735 * except that __per_cpu_load is defined as a relative symbol against 747 * except that __per_cpu_load is defined as a relative symbol against
736 * .data..percpu which is required for relocatable x86_32 configuration. 748 * .data..percpu which is required for relocatable x86_32 configuration.
737 */ 749 */
738#define PERCPU(cacheline, align) \ 750#define PERCPU_SECTION(cacheline) \
739 . = ALIGN(align); \ 751 . = ALIGN(PAGE_SIZE); \
740 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 752 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
741 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 753 VMLINUX_SYMBOL(__per_cpu_load) = .; \
742 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 754 PERCPU_INPUT(cacheline) \
743 *(.data..percpu..first) \
744 . = ALIGN(PAGE_SIZE); \
745 *(.data..percpu..page_aligned) \
746 . = ALIGN(cacheline); \
747 *(.data..percpu..readmostly) \
748 . = ALIGN(cacheline); \
749 *(.data..percpu) \
750 *(.data..percpu..shared_aligned) \
751 VMLINUX_SYMBOL(__per_cpu_end) = .; \
752 } 755 }
753 756
754 757
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 202424d17ed7..738b3a5faa12 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -122,10 +122,14 @@ struct drm_device;
122 * using the DRM_DEBUG_KMS and DRM_DEBUG. 122 * using the DRM_DEBUG_KMS and DRM_DEBUG.
123 */ 123 */
124 124
125extern void drm_ut_debug_printk(unsigned int request_level, 125extern __attribute__((format (printf, 4, 5)))
126void drm_ut_debug_printk(unsigned int request_level,
126 const char *prefix, 127 const char *prefix,
127 const char *function_name, 128 const char *function_name,
128 const char *format, ...); 129 const char *format, ...);
130extern __attribute__((format (printf, 2, 3)))
131int drm_err(const char *func, const char *format, ...);
132
129/***********************************************************************/ 133/***********************************************************************/
130/** \name DRM template customization defaults */ 134/** \name DRM template customization defaults */
131/*@{*/ 135/*@{*/
@@ -181,21 +185,11 @@ extern void drm_ut_debug_printk(unsigned int request_level,
181 * \param fmt printf() like format string. 185 * \param fmt printf() like format string.
182 * \param arg arguments 186 * \param arg arguments
183 */ 187 */
184#define DRM_ERROR(fmt, arg...) \ 188#define DRM_ERROR(fmt, ...) \
185 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) 189 drm_err(__func__, fmt, ##__VA_ARGS__)
186
187/**
188 * Memory error output.
189 *
190 * \param area memory area where the error occurred.
191 * \param fmt printf() like format string.
192 * \param arg arguments
193 */
194#define DRM_MEM_ERROR(area, fmt, arg...) \
195 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
196 drm_mem_stats[area].name , ##arg)
197 190
198#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) 191#define DRM_INFO(fmt, ...) \
192 printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
199 193
200/** 194/**
201 * Debug output. 195 * Debug output.
@@ -1000,6 +994,22 @@ struct drm_minor {
1000 struct drm_mode_group mode_group; 994 struct drm_mode_group mode_group;
1001}; 995};
1002 996
997/* mode specified on the command line */
998struct drm_cmdline_mode {
999 bool specified;
1000 bool refresh_specified;
1001 bool bpp_specified;
1002 int xres, yres;
1003 int bpp;
1004 int refresh;
1005 bool rb;
1006 bool interlace;
1007 bool cvt;
1008 bool margins;
1009 enum drm_connector_force force;
1010};
1011
1012
1003struct drm_pending_vblank_event { 1013struct drm_pending_vblank_event {
1004 struct drm_pending_event base; 1014 struct drm_pending_event base;
1005 int pipe; 1015 int pipe;
@@ -1395,6 +1405,15 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1395 struct drm_crtc *refcrtc); 1405 struct drm_crtc *refcrtc);
1396extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); 1406extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
1397 1407
1408extern bool
1409drm_mode_parse_command_line_for_connector(const char *mode_option,
1410 struct drm_connector *connector,
1411 struct drm_cmdline_mode *mode);
1412
1413extern struct drm_display_mode *
1414drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1415 struct drm_cmdline_mode *cmd);
1416
1398/* Modesetting support */ 1417/* Modesetting support */
1399extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1418extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1400extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 1419extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index d94684b7ba34..9573e0ce3120 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -183,7 +183,9 @@ enum subpixel_order {
183 SubPixelNone, 183 SubPixelNone,
184}; 184};
185 185
186 186#define DRM_COLOR_FORMAT_RGB444 (1<<0)
187#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
188#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
187/* 189/*
188 * Describes a given display (e.g. CRT or flat panel) and its limitations. 190 * Describes a given display (e.g. CRT or flat panel) and its limitations.
189 */ 191 */
@@ -198,8 +200,10 @@ struct drm_display_info {
198 unsigned int min_vfreq, max_vfreq; 200 unsigned int min_vfreq, max_vfreq;
199 unsigned int min_hfreq, max_hfreq; 201 unsigned int min_hfreq, max_hfreq;
200 unsigned int pixel_clock; 202 unsigned int pixel_clock;
203 unsigned int bpc;
201 204
202 enum subpixel_order subpixel_order; 205 enum subpixel_order subpixel_order;
206 u32 color_formats;
203 207
204 char *raw_edid; /* if any */ 208 char *raw_edid; /* if any */
205}; 209};
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 83a389e44543..91567bbdb027 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -53,6 +53,7 @@
53 53
54#define DP_MAX_LANE_COUNT 0x002 54#define DP_MAX_LANE_COUNT 0x002
55# define DP_MAX_LANE_COUNT_MASK 0x1f 55# define DP_MAX_LANE_COUNT_MASK 0x1f
56# define DP_TPS3_SUPPORTED (1 << 6)
56# define DP_ENHANCED_FRAME_CAP (1 << 7) 57# define DP_ENHANCED_FRAME_CAP (1 << 7)
57 58
58#define DP_MAX_DOWNSPREAD 0x003 59#define DP_MAX_DOWNSPREAD 0x003
@@ -71,10 +72,13 @@
71 72
72#define DP_MAIN_LINK_CHANNEL_CODING 0x006 73#define DP_MAIN_LINK_CHANNEL_CODING 0x006
73 74
75#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
76
74/* link configuration */ 77/* link configuration */
75#define DP_LINK_BW_SET 0x100 78#define DP_LINK_BW_SET 0x100
76# define DP_LINK_BW_1_62 0x06 79# define DP_LINK_BW_1_62 0x06
77# define DP_LINK_BW_2_7 0x0a 80# define DP_LINK_BW_2_7 0x0a
81# define DP_LINK_BW_5_4 0x14
78 82
79#define DP_LANE_COUNT_SET 0x101 83#define DP_LANE_COUNT_SET 0x101
80# define DP_LANE_COUNT_MASK 0x0f 84# define DP_LANE_COUNT_MASK 0x0f
@@ -84,6 +88,7 @@
84# define DP_TRAINING_PATTERN_DISABLE 0 88# define DP_TRAINING_PATTERN_DISABLE 0
85# define DP_TRAINING_PATTERN_1 1 89# define DP_TRAINING_PATTERN_1 1
86# define DP_TRAINING_PATTERN_2 2 90# define DP_TRAINING_PATTERN_2 2
91# define DP_TRAINING_PATTERN_3 3
87# define DP_TRAINING_PATTERN_MASK 0x3 92# define DP_TRAINING_PATTERN_MASK 0x3
88 93
89# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) 94# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 5881fad91faa..eacb415b309a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -155,12 +155,35 @@ struct detailed_timing {
155#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) 155#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
156#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) 156#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
157#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) 157#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
158#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */ 158#define DRM_EDID_INPUT_DIGITAL (1 << 7)
159#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
160#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
161#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
162#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
163#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
164#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
165#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
166#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
167#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
168#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
169#define DRM_EDID_DIGITAL_TYPE_DVI (1)
170#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
171#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
172#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
173#define DRM_EDID_DIGITAL_TYPE_DP (5)
159 174
160#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) 175#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
161#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) 176#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
162#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) 177#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
178/* If analog */
163#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ 179#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
180/* If digital */
181#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
182#define DRM_EDID_FEATURE_RGB (0 << 3)
183#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
184#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
185#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
186
164#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) 187#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
165#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) 188#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
166#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) 189#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index c99c3d3e7811..6e3076ad646e 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -40,20 +40,6 @@ struct drm_fb_helper_crtc {
40 struct drm_display_mode *desired_mode; 40 struct drm_display_mode *desired_mode;
41}; 41};
42 42
43/* mode specified on the command line */
44struct drm_fb_helper_cmdline_mode {
45 bool specified;
46 bool refresh_specified;
47 bool bpp_specified;
48 int xres, yres;
49 int bpp;
50 int refresh;
51 bool rb;
52 bool interlace;
53 bool cvt;
54 bool margins;
55};
56
57struct drm_fb_helper_surface_size { 43struct drm_fb_helper_surface_size {
58 u32 fb_width; 44 u32 fb_width;
59 u32 fb_height; 45 u32 fb_height;
@@ -74,8 +60,8 @@ struct drm_fb_helper_funcs {
74}; 60};
75 61
76struct drm_fb_helper_connector { 62struct drm_fb_helper_connector {
77 struct drm_fb_helper_cmdline_mode cmdline_mode;
78 struct drm_connector *connector; 63 struct drm_connector *connector;
64 struct drm_cmdline_mode cmdline_mode;
79}; 65};
80 66
81struct drm_fb_helper { 67struct drm_fb_helper {
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 4554db0cde86..c42112350003 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -417,7 +417,6 @@ extern const kernel_cap_t __cap_init_eff_set;
417 417
418# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) 418# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
419# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) 419# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
420# define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
421# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ 420# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
422 | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ 421 | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
423 CAP_FS_MASK_B1 } }) 422 CAP_FS_MASK_B1 } })
@@ -427,11 +426,7 @@ extern const kernel_cap_t __cap_init_eff_set;
427 426
428#endif /* _KERNEL_CAPABILITY_U32S != 2 */ 427#endif /* _KERNEL_CAPABILITY_U32S != 2 */
429 428
430#define CAP_INIT_INH_SET CAP_EMPTY_SET
431
432# define cap_clear(c) do { (c) = __cap_empty_set; } while (0) 429# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
433# define cap_set_full(c) do { (c) = __cap_full_set; } while (0)
434# define cap_set_init_eff(c) do { (c) = __cap_init_eff_set; } while (0)
435 430
436#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag)) 431#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
437#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag)) 432#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index 2dd21243104f..3b1cc1be419f 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -14,7 +14,7 @@
14#define DLM_PLOCK_MISC_NAME "dlm_plock" 14#define DLM_PLOCK_MISC_NAME "dlm_plock"
15 15
16#define DLM_PLOCK_VERSION_MAJOR 1 16#define DLM_PLOCK_VERSION_MAJOR 1
17#define DLM_PLOCK_VERSION_MINOR 1 17#define DLM_PLOCK_VERSION_MINOR 2
18#define DLM_PLOCK_VERSION_PATCH 0 18#define DLM_PLOCK_VERSION_PATCH 0
19 19
20enum { 20enum {
@@ -23,12 +23,14 @@ enum {
23 DLM_PLOCK_OP_GET, 23 DLM_PLOCK_OP_GET,
24}; 24};
25 25
26#define DLM_PLOCK_FL_CLOSE 1
27
26struct dlm_plock_info { 28struct dlm_plock_info {
27 __u32 version[3]; 29 __u32 version[3];
28 __u8 optype; 30 __u8 optype;
29 __u8 ex; 31 __u8 ex;
30 __u8 wait; 32 __u8 wait;
31 __u8 pad; 33 __u8 flags;
32 __u32 pid; 34 __u32 pid;
33 __s32 nodeid; 35 __s32 nodeid;
34 __s32 rv; 36 __s32 rv;
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index dd1a56fbe924..b5ca4b2c08ec 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -3,14 +3,15 @@
3 3
4struct gpio_keys_button { 4struct gpio_keys_button {
5 /* Configuration parameters */ 5 /* Configuration parameters */
6 int code; /* input event code (KEY_*, SW_*) */ 6 unsigned int code; /* input event code (KEY_*, SW_*) */
7 int gpio; 7 int gpio;
8 int active_low; 8 int active_low;
9 char *desc; 9 const char *desc;
10 int type; /* input event type (EV_KEY, EV_SW) */ 10 unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */
11 int wakeup; /* configure the button as a wake-up source */ 11 int wakeup; /* configure the button as a wake-up source */
12 int debounce_interval; /* debounce ticks interval in msecs */ 12 int debounce_interval; /* debounce ticks interval in msecs */
13 bool can_disable; 13 bool can_disable;
14 int value; /* axis value for EV_ABS */
14}; 15};
15 16
16struct gpio_keys_platform_data { 17struct gpio_keys_platform_data {
@@ -21,6 +22,7 @@ struct gpio_keys_platform_data {
21 unsigned int rep:1; /* enable input subsystem auto repeat */ 22 unsigned int rep:1; /* enable input subsystem auto repeat */
22 int (*enable)(struct device *dev); 23 int (*enable)(struct device *dev);
23 void (*disable)(struct device *dev); 24 void (*disable)(struct device *dev);
25 const char *name; /* input device name */
24}; 26};
25 27
26#endif 28#endif
diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h
new file mode 100644
index 000000000000..beda7081aead
--- /dev/null
+++ b/include/linux/i2c/i2c-sh_mobile.h
@@ -0,0 +1,10 @@
1#ifndef __I2C_SH_MOBILE_H__
2#define __I2C_SH_MOBILE_H__
3
4#include <linux/platform_device.h>
5
6struct i2c_sh_mobile_platform_data {
7 unsigned long bus_speed;
8};
9
10#endif /* __I2C_SH_MOBILE_H__ */
diff --git a/include/linux/i2c/mpr121_touchkey.h b/include/linux/i2c/mpr121_touchkey.h
new file mode 100644
index 000000000000..f0bcc38bbb97
--- /dev/null
+++ b/include/linux/i2c/mpr121_touchkey.h
@@ -0,0 +1,20 @@
1/* Header file for Freescale MPR121 Capacitive Touch Sensor */
2
3#ifndef _MPR121_TOUCHKEY_H
4#define _MPR121_TOUCHKEY_H
5
6/**
7 * struct mpr121_platform_data - platform data for mpr121 sensor
8 * @keymap: pointer to array of KEY_* values representing keymap
9 * @keymap_size: size of the keymap
10 * @wakeup: configure the button as a wake-up source
11 * @vdd_uv: VDD voltage in uV
12 */
13struct mpr121_platform_data {
14 const unsigned short *keymap;
15 unsigned int keymap_size;
16 bool wakeup;
17 int vdd_uv;
18};
19
20#endif /* _MPR121_TOUCHKEY_H */
diff --git a/include/linux/i2c/tsc2007.h b/include/linux/i2c/tsc2007.h
index c6361fbb7bf9..591427a63b06 100644
--- a/include/linux/i2c/tsc2007.h
+++ b/include/linux/i2c/tsc2007.h
@@ -6,6 +6,13 @@
6struct tsc2007_platform_data { 6struct tsc2007_platform_data {
7 u16 model; /* 2007. */ 7 u16 model; /* 2007. */
8 u16 x_plate_ohms; 8 u16 x_plate_ohms;
9 u16 max_rt; /* max. resistance above which samples are ignored */
10 unsigned long poll_delay; /* delay (in ms) after pen-down event
11 before polling starts */
12 unsigned long poll_period; /* time (in ms) between samples */
13 int fuzzx; /* fuzz factor for X, Y and pressure axes */
14 int fuzzy;
15 int fuzzz;
9 16
10 int (*get_pendown_state)(void); 17 int (*get_pendown_state)(void);
11 void (*clear_penirq)(void); /* If needed, clear 2nd level 18 void (*clear_penirq)(void); /* If needed, clear 2nd level
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 689496bb6654..bafc58c00fc3 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -83,13 +83,6 @@ extern struct group_info init_groups;
83#define INIT_IDS 83#define INIT_IDS
84#endif 84#endif
85 85
86/*
87 * Because of the reduced scope of CAP_SETPCAP when filesystem
88 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
89 * be available in the default configuration.
90 */
91# define CAP_INIT_BSET CAP_FULL_SET
92
93#ifdef CONFIG_RCU_BOOST 86#ifdef CONFIG_RCU_BOOST
94#define INIT_TASK_RCU_BOOST() \ 87#define INIT_TASK_RCU_BOOST() \
95 .rcu_boost_mutex = NULL, 88 .rcu_boost_mutex = NULL,
diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h
index 0cbe5e81482e..d388d857bf14 100644
--- a/include/linux/input/ad714x.h
+++ b/include/linux/input/ad714x.h
@@ -6,7 +6,7 @@
6 * The platform_data for the device's "struct device" holds this 6 * The platform_data for the device's "struct device" holds this
7 * information. 7 * information.
8 * 8 *
9 * Copyright 2009 Analog Devices Inc. 9 * Copyright 2009-2011 Analog Devices Inc.
10 * 10 *
11 * Licensed under the GPL-2 or later. 11 * Licensed under the GPL-2 or later.
12 */ 12 */
@@ -58,6 +58,7 @@ struct ad714x_platform_data {
58 struct ad714x_button_plat *button; 58 struct ad714x_button_plat *button;
59 unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM]; 59 unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM];
60 unsigned short sys_cfg_reg[SYS_CFGREG_NUM]; 60 unsigned short sys_cfg_reg[SYS_CFGREG_NUM];
61 unsigned long irqflags;
61}; 62};
62 63
63#endif 64#endif
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
new file mode 100644
index 000000000000..ef792ecfaabf
--- /dev/null
+++ b/include/linux/input/adp5589.h
@@ -0,0 +1,213 @@
1/*
2 * Analog Devices ADP5589 I/O Expander and QWERTY Keypad Controller
3 *
4 * Copyright 2010-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2.
7 */
8
9#ifndef _ADP5589_H
10#define _ADP5589_H
11
12#define ADP5589_ID 0x00
13#define ADP5589_INT_STATUS 0x01
14#define ADP5589_STATUS 0x02
15#define ADP5589_FIFO_1 0x03
16#define ADP5589_FIFO_2 0x04
17#define ADP5589_FIFO_3 0x05
18#define ADP5589_FIFO_4 0x06
19#define ADP5589_FIFO_5 0x07
20#define ADP5589_FIFO_6 0x08
21#define ADP5589_FIFO_7 0x09
22#define ADP5589_FIFO_8 0x0A
23#define ADP5589_FIFO_9 0x0B
24#define ADP5589_FIFO_10 0x0C
25#define ADP5589_FIFO_11 0x0D
26#define ADP5589_FIFO_12 0x0E
27#define ADP5589_FIFO_13 0x0F
28#define ADP5589_FIFO_14 0x10
29#define ADP5589_FIFO_15 0x11
30#define ADP5589_FIFO_16 0x12
31#define ADP5589_GPI_INT_STAT_A 0x13
32#define ADP5589_GPI_INT_STAT_B 0x14
33#define ADP5589_GPI_INT_STAT_C 0x15
34#define ADP5589_GPI_STATUS_A 0x16
35#define ADP5589_GPI_STATUS_B 0x17
36#define ADP5589_GPI_STATUS_C 0x18
37#define ADP5589_RPULL_CONFIG_A 0x19
38#define ADP5589_RPULL_CONFIG_B 0x1A
39#define ADP5589_RPULL_CONFIG_C 0x1B
40#define ADP5589_RPULL_CONFIG_D 0x1C
41#define ADP5589_RPULL_CONFIG_E 0x1D
42#define ADP5589_GPI_INT_LEVEL_A 0x1E
43#define ADP5589_GPI_INT_LEVEL_B 0x1F
44#define ADP5589_GPI_INT_LEVEL_C 0x20
45#define ADP5589_GPI_EVENT_EN_A 0x21
46#define ADP5589_GPI_EVENT_EN_B 0x22
47#define ADP5589_GPI_EVENT_EN_C 0x23
48#define ADP5589_GPI_INTERRUPT_EN_A 0x24
49#define ADP5589_GPI_INTERRUPT_EN_B 0x25
50#define ADP5589_GPI_INTERRUPT_EN_C 0x26
51#define ADP5589_DEBOUNCE_DIS_A 0x27
52#define ADP5589_DEBOUNCE_DIS_B 0x28
53#define ADP5589_DEBOUNCE_DIS_C 0x29
54#define ADP5589_GPO_DATA_OUT_A 0x2A
55#define ADP5589_GPO_DATA_OUT_B 0x2B
56#define ADP5589_GPO_DATA_OUT_C 0x2C
57#define ADP5589_GPO_OUT_MODE_A 0x2D
58#define ADP5589_GPO_OUT_MODE_B 0x2E
59#define ADP5589_GPO_OUT_MODE_C 0x2F
60#define ADP5589_GPIO_DIRECTION_A 0x30
61#define ADP5589_GPIO_DIRECTION_B 0x31
62#define ADP5589_GPIO_DIRECTION_C 0x32
63#define ADP5589_UNLOCK1 0x33
64#define ADP5589_UNLOCK2 0x34
65#define ADP5589_EXT_LOCK_EVENT 0x35
66#define ADP5589_UNLOCK_TIMERS 0x36
67#define ADP5589_LOCK_CFG 0x37
68#define ADP5589_RESET1_EVENT_A 0x38
69#define ADP5589_RESET1_EVENT_B 0x39
70#define ADP5589_RESET1_EVENT_C 0x3A
71#define ADP5589_RESET2_EVENT_A 0x3B
72#define ADP5589_RESET2_EVENT_B 0x3C
73#define ADP5589_RESET_CFG 0x3D
74#define ADP5589_PWM_OFFT_LOW 0x3E
75#define ADP5589_PWM_OFFT_HIGH 0x3F
76#define ADP5589_PWM_ONT_LOW 0x40
77#define ADP5589_PWM_ONT_HIGH 0x41
78#define ADP5589_PWM_CFG 0x42
79#define ADP5589_CLOCK_DIV_CFG 0x43
80#define ADP5589_LOGIC_1_CFG 0x44
81#define ADP5589_LOGIC_2_CFG 0x45
82#define ADP5589_LOGIC_FF_CFG 0x46
83#define ADP5589_LOGIC_INT_EVENT_EN 0x47
84#define ADP5589_POLL_PTIME_CFG 0x48
85#define ADP5589_PIN_CONFIG_A 0x49
86#define ADP5589_PIN_CONFIG_B 0x4A
87#define ADP5589_PIN_CONFIG_C 0x4B
88#define ADP5589_PIN_CONFIG_D 0x4C
89#define ADP5589_GENERAL_CFG 0x4D
90#define ADP5589_INT_EN 0x4E
91
92#define ADP5589_DEVICE_ID_MASK 0xF
93
94/* Put one of these structures in i2c_board_info platform_data */
95
96#define ADP5589_KEYMAPSIZE 88
97
98#define ADP5589_GPI_PIN_ROW0 97
99#define ADP5589_GPI_PIN_ROW1 98
100#define ADP5589_GPI_PIN_ROW2 99
101#define ADP5589_GPI_PIN_ROW3 100
102#define ADP5589_GPI_PIN_ROW4 101
103#define ADP5589_GPI_PIN_ROW5 102
104#define ADP5589_GPI_PIN_ROW6 103
105#define ADP5589_GPI_PIN_ROW7 104
106#define ADP5589_GPI_PIN_COL0 105
107#define ADP5589_GPI_PIN_COL1 106
108#define ADP5589_GPI_PIN_COL2 107
109#define ADP5589_GPI_PIN_COL3 108
110#define ADP5589_GPI_PIN_COL4 109
111#define ADP5589_GPI_PIN_COL5 110
112#define ADP5589_GPI_PIN_COL6 111
113#define ADP5589_GPI_PIN_COL7 112
114#define ADP5589_GPI_PIN_COL8 113
115#define ADP5589_GPI_PIN_COL9 114
116#define ADP5589_GPI_PIN_COL10 115
117#define GPI_LOGIC1 116
118#define GPI_LOGIC2 117
119
120#define ADP5589_GPI_PIN_ROW_BASE ADP5589_GPI_PIN_ROW0
121#define ADP5589_GPI_PIN_ROW_END ADP5589_GPI_PIN_ROW7
122#define ADP5589_GPI_PIN_COL_BASE ADP5589_GPI_PIN_COL0
123#define ADP5589_GPI_PIN_COL_END ADP5589_GPI_PIN_COL10
124
125#define ADP5589_GPI_PIN_BASE ADP5589_GPI_PIN_ROW_BASE
126#define ADP5589_GPI_PIN_END ADP5589_GPI_PIN_COL_END
127
128#define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1)
129
130struct adp5589_gpi_map {
131 unsigned short pin;
132 unsigned short sw_evt;
133};
134
135/* scan_cycle_time */
136#define ADP5589_SCAN_CYCLE_10ms 0
137#define ADP5589_SCAN_CYCLE_20ms 1
138#define ADP5589_SCAN_CYCLE_30ms 2
139#define ADP5589_SCAN_CYCLE_40ms 3
140
141/* RESET_CFG */
142#define RESET_PULSE_WIDTH_500us 0
143#define RESET_PULSE_WIDTH_1ms 1
144#define RESET_PULSE_WIDTH_2ms 2
145#define RESET_PULSE_WIDTH_10ms 3
146
147#define RESET_TRIG_TIME_0ms (0 << 2)
148#define RESET_TRIG_TIME_1000ms (1 << 2)
149#define RESET_TRIG_TIME_1500ms (2 << 2)
150#define RESET_TRIG_TIME_2000ms (3 << 2)
151#define RESET_TRIG_TIME_2500ms (4 << 2)
152#define RESET_TRIG_TIME_3000ms (5 << 2)
153#define RESET_TRIG_TIME_3500ms (6 << 2)
154#define RESET_TRIG_TIME_4000ms (7 << 2)
155
156#define RESET_PASSTHRU_EN (1 << 5)
157#define RESET1_POL_HIGH (1 << 6)
158#define RESET1_POL_LOW (0 << 6)
159#define RESET2_POL_HIGH (1 << 7)
160#define RESET2_POL_LOW (0 << 7)
161
162/* Mask Bits:
163 * C C C C C C C C C C C | R R R R R R R R
164 * 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0
165 * 0
166 * ---------------- BIT ------------------
167 * 1 1 1 1 1 1 1 1 1 0 0 | 0 0 0 0 0 0 0 0
168 * 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0
169 */
170
171#define ADP_ROW(x) (1 << (x))
172#define ADP_COL(x) (1 << (x + 8))
173
174struct adp5589_kpad_platform_data {
175 unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */
176 const unsigned short *keymap; /* Pointer to keymap */
177 unsigned short keymapsize; /* Keymap size */
178 bool repeat; /* Enable key repeat */
179 bool en_keylock; /* Enable key lock feature */
180 unsigned char unlock_key1; /* Unlock Key 1 */
181 unsigned char unlock_key2; /* Unlock Key 2 */
182 unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable */
183 unsigned char scan_cycle_time; /* Time between consecutive scan cycles */
184 unsigned char reset_cfg; /* Reset config */
185 unsigned short reset1_key_1; /* Reset Key 1 */
186 unsigned short reset1_key_2; /* Reset Key 2 */
187 unsigned short reset1_key_3; /* Reset Key 3 */
188 unsigned short reset2_key_1; /* Reset Key 1 */
189 unsigned short reset2_key_2; /* Reset Key 2 */
190 unsigned debounce_dis_mask; /* Disable debounce mask */
191 unsigned pull_dis_mask; /* Disable all pull resistors mask */
192 unsigned pullup_en_100k; /* Pull-Up 100k Enable Mask */
193 unsigned pullup_en_300k; /* Pull-Up 300k Enable Mask */
194 unsigned pulldown_en_300k; /* Pull-Down 300k Enable Mask */
195 const struct adp5589_gpi_map *gpimap;
196 unsigned short gpimapsize;
197 const struct adp5589_gpio_platform_data *gpio_data;
198};
199
200struct i2c_client; /* forward declaration */
201
202struct adp5589_gpio_platform_data {
203 int gpio_start; /* GPIO Chip base # */
204 int (*setup)(struct i2c_client *client,
205 int gpio, unsigned ngpio,
206 void *context);
207 int (*teardown)(struct i2c_client *client,
208 int gpio, unsigned ngpio,
209 void *context);
210 void *context;
211};
212
213#endif
diff --git a/include/linux/key.h b/include/linux/key.h
index b2bb01719561..ef19b99aff98 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -276,6 +276,19 @@ static inline key_serial_t key_serial(struct key *key)
276 return key ? key->serial : 0; 276 return key ? key->serial : 0;
277} 277}
278 278
279/**
280 * key_is_instantiated - Determine if a key has been positively instantiated
281 * @key: The key to check.
282 *
283 * Return true if the specified key has been positively instantiated, false
284 * otherwise.
285 */
286static inline bool key_is_instantiated(const struct key *key)
287{
288 return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
289 !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
290}
291
279#define rcu_dereference_key(KEY) \ 292#define rcu_dereference_key(KEY) \
280 (rcu_dereference_protected((KEY)->payload.rcudata, \ 293 (rcu_dereference_protected((KEY)->payload.rcudata, \
281 rwsem_is_locked(&((struct key *)(KEY))->sem))) 294 rwsem_is_locked(&((struct key *)(KEY))->sem)))
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 310231823852..d4a5c84c503d 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -24,6 +24,7 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/sysctl.h>
27 28
28#define KMOD_PATH_LEN 256 29#define KMOD_PATH_LEN 256
29 30
@@ -109,6 +110,8 @@ call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
109 NULL, NULL, NULL); 110 NULL, NULL, NULL);
110} 111}
111 112
113extern struct ctl_table usermodehelper_table[];
114
112extern void usermodehelper_init(void); 115extern void usermodehelper_init(void);
113 116
114extern int usermodehelper_disable(void); 117extern int usermodehelper_disable(void);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 7135ebc8428c..3f46aedea42f 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -14,10 +14,6 @@
14#define asmlinkage CPP_ASMLINKAGE 14#define asmlinkage CPP_ASMLINKAGE
15#endif 15#endif
16 16
17#ifndef asmregparm
18# define asmregparm
19#endif
20
21#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) 17#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
22#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) 18#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
23 19
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 112a55033352..88e78dedc2e8 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -27,7 +27,7 @@
27/* Auxiliary data to use in generating the audit record. */ 27/* Auxiliary data to use in generating the audit record. */
28struct common_audit_data { 28struct common_audit_data {
29 char type; 29 char type;
30#define LSM_AUDIT_DATA_FS 1 30#define LSM_AUDIT_DATA_PATH 1
31#define LSM_AUDIT_DATA_NET 2 31#define LSM_AUDIT_DATA_NET 2
32#define LSM_AUDIT_DATA_CAP 3 32#define LSM_AUDIT_DATA_CAP 3
33#define LSM_AUDIT_DATA_IPC 4 33#define LSM_AUDIT_DATA_IPC 4
@@ -35,12 +35,13 @@ struct common_audit_data {
35#define LSM_AUDIT_DATA_KEY 6 35#define LSM_AUDIT_DATA_KEY 6
36#define LSM_AUDIT_DATA_NONE 7 36#define LSM_AUDIT_DATA_NONE 7
37#define LSM_AUDIT_DATA_KMOD 8 37#define LSM_AUDIT_DATA_KMOD 8
38#define LSM_AUDIT_DATA_INODE 9
39#define LSM_AUDIT_DATA_DENTRY 10
38 struct task_struct *tsk; 40 struct task_struct *tsk;
39 union { 41 union {
40 struct { 42 struct path path;
41 struct path path; 43 struct dentry *dentry;
42 struct inode *inode; 44 struct inode *inode;
43 } fs;
44 struct { 45 struct {
45 int netif; 46 int netif;
46 struct sock *sk; 47 struct sock *sk;
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 84854edf4436..15da0e99f48a 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -21,7 +21,7 @@
21#ifndef __LINUX_UBI_H__ 21#ifndef __LINUX_UBI_H__
22#define __LINUX_UBI_H__ 22#define __LINUX_UBI_H__
23 23
24#include <asm/ioctl.h> 24#include <linux/ioctl.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <mtd/ubi-user.h> 26#include <mtd/ubi-user.h>
27 27
@@ -87,7 +87,7 @@ enum {
87 * physical eraseblock size and on how much bytes UBI headers consume. But 87 * physical eraseblock size and on how much bytes UBI headers consume. But
88 * because of the volume alignment (@alignment), the usable size of logical 88 * because of the volume alignment (@alignment), the usable size of logical
89 * eraseblocks if a volume may be less. The following equation is true: 89 * eraseblocks if a volume may be less. The following equation is true:
90 * @usable_leb_size = LEB size - (LEB size mod @alignment), 90 * @usable_leb_size = LEB size - (LEB size mod @alignment),
91 * where LEB size is the logical eraseblock size defined by the UBI device. 91 * where LEB size is the logical eraseblock size defined by the UBI device.
92 * 92 *
93 * The alignment is multiple to the minimal flash input/output unit size or %1 93 * The alignment is multiple to the minimal flash input/output unit size or %1
diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h
new file mode 100644
index 000000000000..617a2950523c
--- /dev/null
+++ b/include/linux/mxm-wmi.h
@@ -0,0 +1,33 @@
1/*
2 * MXM WMI driver
3 *
4 * Copyright(C) 2010 Red Hat.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef MXM_WMI_H
22#define MXM_WMI_H
23
24/* discrete adapters */
25#define MXM_MXDS_ADAPTER_0 0x0
26#define MXM_MXDS_ADAPTER_1 0x0
27/* integrated adapter */
28#define MXM_MXDS_ADAPTER_IGD 0x10
29int mxm_wmi_call_mxds(int adapter);
30int mxm_wmi_call_mxmx(int adapter);
31bool mxm_wmi_supported(void);
32
33#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 811183de1ef5..79a6700b7162 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -308,7 +308,7 @@ static inline void SetPageUptodate(struct page *page)
308{ 308{
309#ifdef CONFIG_S390 309#ifdef CONFIG_S390
310 if (!test_and_set_bit(PG_uptodate, &page->flags)) 310 if (!test_and_set_bit(PG_uptodate, &page->flags))
311 page_clear_dirty(page, 0); 311 page_set_storage_key(page_to_pfn(page), PAGE_DEFAULT_KEY, 0);
312#else 312#else
313 /* 313 /*
314 * Memory barrier must be issued before setting the PG_uptodate bit, 314 * Memory barrier must be issued before setting the PG_uptodate bit,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4604d1d5514d..c446b5ca2d38 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -941,8 +941,11 @@ int pci_cfg_space_size_ext(struct pci_dev *dev);
941int pci_cfg_space_size(struct pci_dev *dev); 941int pci_cfg_space_size(struct pci_dev *dev);
942unsigned char pci_bus_max_busnr(struct pci_bus *bus); 942unsigned char pci_bus_max_busnr(struct pci_bus *bus);
943 943
944#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
945#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
946
944int pci_set_vga_state(struct pci_dev *pdev, bool decode, 947int pci_set_vga_state(struct pci_dev *pdev, bool decode,
945 unsigned int command_bits, bool change_bridge); 948 unsigned int command_bits, u32 flags);
946/* kmem_cache style wrapper around pci_alloc_consistent() */ 949/* kmem_cache style wrapper around pci_alloc_consistent() */
947 950
948#include <linux/pci-dma.h> 951#include <linux/pci-dma.h>
@@ -1087,7 +1090,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
1087 1090
1088/* some architectures require additional setup to direct VGA traffic */ 1091/* some architectures require additional setup to direct VGA traffic */
1089typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1092typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1090 unsigned int command_bits, bool change_bridge); 1093 unsigned int command_bits, u32 flags);
1091extern void pci_register_set_vga_state(arch_set_vga_state_t func); 1094extern void pci_register_set_vga_state(arch_set_vga_state_t func);
1092 1095
1093#else /* CONFIG_PCI is not enabled */ 1096#else /* CONFIG_PCI is not enabled */
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h
index 215278b8df2a..3f594dce5716 100644
--- a/include/linux/rotary_encoder.h
+++ b/include/linux/rotary_encoder.h
@@ -10,6 +10,7 @@ struct rotary_encoder_platform_data {
10 unsigned int inverted_b; 10 unsigned int inverted_b;
11 bool relative_axis; 11 bool relative_axis;
12 bool rollover; 12 bool rollover;
13 bool half_period;
13}; 14};
14 15
15#endif /* __ROTARY_ENCODER_H__ */ 16#endif /* __ROTARY_ENCODER_H__ */
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 92bd0839d5b4..c64de9dd7631 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -14,7 +14,8 @@ enum ads7846_filter {
14struct ads7846_platform_data { 14struct ads7846_platform_data {
15 u16 model; /* 7843, 7845, 7846, 7873. */ 15 u16 model; /* 7843, 7845, 7846, 7873. */
16 u16 vref_delay_usecs; /* 0 for external vref; etc */ 16 u16 vref_delay_usecs; /* 0 for external vref; etc */
17 u16 vref_mv; /* external vref value, milliVolts */ 17 u16 vref_mv; /* external vref value, milliVolts
18 * ads7846: if 0, use internal vref */
18 bool keep_vref_on; /* set to keep vref on for differential 19 bool keep_vref_on; /* set to keep vref on for differential
19 * measurements as well */ 20 * measurements as well */
20 bool swap_xy; /* swap x and y axes */ 21 bool swap_xy; /* swap x and y axes */
diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h
index c0d47ad4b103..3c4109777aff 100644
--- a/include/mtd/ubi-user.h
+++ b/include/mtd/ubi-user.h
@@ -131,7 +131,7 @@
131 * ~~~~~~~~~~~~~~~~~~~~~~~~~ 131 * ~~~~~~~~~~~~~~~~~~~~~~~~~
132 * 132 *
133 * To set an UBI volume property the %UBI_IOCSETPROP ioctl command should be 133 * To set an UBI volume property the %UBI_IOCSETPROP ioctl command should be
134 * used. A pointer to a &struct ubi_set_prop_req object is expected to be 134 * used. A pointer to a &struct ubi_set_vol_prop_req object is expected to be
135 * passed. The object describes which property should be set, and to which value 135 * passed. The object describes which property should be set, and to which value
136 * it should be set. 136 * it should be set.
137 */ 137 */
@@ -186,7 +186,8 @@
186/* Check if LEB is mapped command */ 186/* Check if LEB is mapped command */
187#define UBI_IOCEBISMAP _IOR(UBI_VOL_IOC_MAGIC, 5, __s32) 187#define UBI_IOCEBISMAP _IOR(UBI_VOL_IOC_MAGIC, 5, __s32)
188/* Set an UBI volume property */ 188/* Set an UBI volume property */
189#define UBI_IOCSETPROP _IOW(UBI_VOL_IOC_MAGIC, 6, struct ubi_set_prop_req) 189#define UBI_IOCSETVOLPROP _IOW(UBI_VOL_IOC_MAGIC, 6, \
190 struct ubi_set_vol_prop_req)
190 191
191/* Maximum MTD device name length supported by UBI */ 192/* Maximum MTD device name length supported by UBI */
192#define MAX_UBI_MTD_NAME_LEN 127 193#define MAX_UBI_MTD_NAME_LEN 127
@@ -223,13 +224,14 @@ enum {
223}; 224};
224 225
225/* 226/*
226 * UBI set property ioctl constants 227 * UBI set volume property ioctl constants.
227 * 228 *
228 * @UBI_PROP_DIRECT_WRITE: allow / disallow user to directly write and 229 * @UBI_VOL_PROP_DIRECT_WRITE: allow (any non-zero value) or disallow (value 0)
229 * erase individual eraseblocks on dynamic volumes 230 * user to directly write and erase individual
231 * eraseblocks on dynamic volumes
230 */ 232 */
231enum { 233enum {
232 UBI_PROP_DIRECT_WRITE = 1, 234 UBI_VOL_PROP_DIRECT_WRITE = 1,
233}; 235};
234 236
235/** 237/**
@@ -308,7 +310,7 @@ struct ubi_mkvol_req {
308 __s16 name_len; 310 __s16 name_len;
309 __s8 padding2[4]; 311 __s8 padding2[4];
310 char name[UBI_MAX_VOLUME_NAME + 1]; 312 char name[UBI_MAX_VOLUME_NAME + 1];
311} __attribute__ ((packed)); 313} __packed;
312 314
313/** 315/**
314 * struct ubi_rsvol_req - a data structure used in volume re-size requests. 316 * struct ubi_rsvol_req - a data structure used in volume re-size requests.
@@ -324,7 +326,7 @@ struct ubi_mkvol_req {
324struct ubi_rsvol_req { 326struct ubi_rsvol_req {
325 __s64 bytes; 327 __s64 bytes;
326 __s32 vol_id; 328 __s32 vol_id;
327} __attribute__ ((packed)); 329} __packed;
328 330
329/** 331/**
330 * struct ubi_rnvol_req - volumes re-name request. 332 * struct ubi_rnvol_req - volumes re-name request.
@@ -366,7 +368,7 @@ struct ubi_rnvol_req {
366 __s8 padding2[2]; 368 __s8 padding2[2];
367 char name[UBI_MAX_VOLUME_NAME + 1]; 369 char name[UBI_MAX_VOLUME_NAME + 1];
368 } ents[UBI_MAX_RNVOL]; 370 } ents[UBI_MAX_RNVOL];
369} __attribute__ ((packed)); 371} __packed;
370 372
371/** 373/**
372 * struct ubi_leb_change_req - a data structure used in atomic LEB change 374 * struct ubi_leb_change_req - a data structure used in atomic LEB change
@@ -381,7 +383,7 @@ struct ubi_leb_change_req {
381 __s32 bytes; 383 __s32 bytes;
382 __s8 dtype; 384 __s8 dtype;
383 __s8 padding[7]; 385 __s8 padding[7];
384} __attribute__ ((packed)); 386} __packed;
385 387
386/** 388/**
387 * struct ubi_map_req - a data structure used in map LEB requests. 389 * struct ubi_map_req - a data structure used in map LEB requests.
@@ -393,20 +395,20 @@ struct ubi_map_req {
393 __s32 lnum; 395 __s32 lnum;
394 __s8 dtype; 396 __s8 dtype;
395 __s8 padding[3]; 397 __s8 padding[3];
396} __attribute__ ((packed)); 398} __packed;
397 399
398 400
399/** 401/**
400 * struct ubi_set_prop_req - a data structure used to set an ubi volume 402 * struct ubi_set_vol_prop_req - a data structure used to set an UBI volume
401 * property. 403 * property.
402 * @property: property to set (%UBI_PROP_DIRECT_WRITE) 404 * @property: property to set (%UBI_VOL_PROP_DIRECT_WRITE)
403 * @padding: reserved for future, not used, has to be zeroed 405 * @padding: reserved for future, not used, has to be zeroed
404 * @value: value to set 406 * @value: value to set
405 */ 407 */
406struct ubi_set_prop_req { 408struct ubi_set_vol_prop_req {
407 __u8 property; 409 __u8 property;
408 __u8 padding[7]; 410 __u8 padding[7];
409 __u64 value; 411 __u64 value;
410} __attribute__ ((packed)); 412} __packed;
411 413
412#endif /* __UBI_USER_H__ */ 414#endif /* __UBI_USER_H__ */
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 3fd5064dd43a..7b82080eb02c 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -56,7 +56,7 @@ struct pcmcia_driver {
56 int (*resume) (struct pcmcia_device *dev); 56 int (*resume) (struct pcmcia_device *dev);
57 57
58 struct module *owner; 58 struct module *owner;
59 struct pcmcia_device_id *id_table; 59 const struct pcmcia_device_id *id_table;
60 struct device_driver drv; 60 struct device_driver drv;
61 struct pcmcia_dynids dynids; 61 struct pcmcia_dynids dynids;
62}; 62};
diff --git a/init/Kconfig b/init/Kconfig
index c8b172efaa65..332aac649966 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -959,24 +959,18 @@ config KALLSYMS_ALL
959 bool "Include all symbols in kallsyms" 959 bool "Include all symbols in kallsyms"
960 depends on DEBUG_KERNEL && KALLSYMS 960 depends on DEBUG_KERNEL && KALLSYMS
961 help 961 help
962 Normally kallsyms only contains the symbols of functions, for nicer 962 Normally kallsyms only contains the symbols of functions for nicer
963 OOPS messages. Some debuggers can use kallsyms for other 963 OOPS messages and backtraces (i.e., symbols from the text and inittext
964 symbols too: say Y here to include all symbols, if you need them 964 sections). This is sufficient for most cases. And only in very rare
965 and you don't care about adding 300k to the size of your kernel. 965 cases (e.g., when a debugger is used) all symbols are required (e.g.,
966 966 names of variables from the data sections, etc).
967 Say N. 967
968 968 This option makes sure that all symbols are loaded into the kernel
969config KALLSYMS_EXTRA_PASS 969 image (i.e., symbols from all sections) in cost of increased kernel
970 bool "Do an extra kallsyms pass" 970 size (depending on the kernel configuration, it may be 300KiB or
971 depends on KALLSYMS 971 something like this).
972 help 972
973 If kallsyms is not working correctly, the build will fail with 973 Say N unless you really need all symbols.
974 inconsistent kallsyms data. If that occurs, log a bug report and
975 turn on KALLSYMS_EXTRA_PASS which should result in a stable build.
976 Always say N here unless you find a bug in kallsyms, which must be
977 reported. KALLSYMS_EXTRA_PASS is only a temporary workaround while
978 you wait for kallsyms to be fixed.
979
980 974
981config HOTPLUG 975config HOTPLUG
982 bool "Support for hot-pluggable devices" if EXPERT 976 bool "Support for hot-pluggable devices" if EXPERT
diff --git a/kernel/capability.c b/kernel/capability.c
index 32a80e08ff4b..283c529f8b1c 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -22,12 +22,8 @@
22 */ 22 */
23 23
24const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET; 24const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET;
25const kernel_cap_t __cap_full_set = CAP_FULL_SET;
26const kernel_cap_t __cap_init_eff_set = CAP_INIT_EFF_SET;
27 25
28EXPORT_SYMBOL(__cap_empty_set); 26EXPORT_SYMBOL(__cap_empty_set);
29EXPORT_SYMBOL(__cap_full_set);
30EXPORT_SYMBOL(__cap_init_eff_set);
31 27
32int file_caps_enabled = 1; 28int file_caps_enabled = 1;
33 29
diff --git a/kernel/cred.c b/kernel/cred.c
index 8093c16b84b1..e12c8af793f8 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -49,10 +49,10 @@ struct cred init_cred = {
49 .magic = CRED_MAGIC, 49 .magic = CRED_MAGIC,
50#endif 50#endif
51 .securebits = SECUREBITS_DEFAULT, 51 .securebits = SECUREBITS_DEFAULT,
52 .cap_inheritable = CAP_INIT_INH_SET, 52 .cap_inheritable = CAP_EMPTY_SET,
53 .cap_permitted = CAP_FULL_SET, 53 .cap_permitted = CAP_FULL_SET,
54 .cap_effective = CAP_INIT_EFF_SET, 54 .cap_effective = CAP_FULL_SET,
55 .cap_bset = CAP_INIT_BSET, 55 .cap_bset = CAP_FULL_SET,
56 .user = INIT_USER, 56 .user = INIT_USER,
57 .user_ns = &init_user_ns, 57 .user_ns = &init_user_ns,
58 .group_info = &init_groups, 58 .group_info = &init_groups,
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 5ae0ff38425f..ad6a81c58b44 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -25,6 +25,7 @@
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/completion.h> 27#include <linux/completion.h>
28#include <linux/cred.h>
28#include <linux/file.h> 29#include <linux/file.h>
29#include <linux/fdtable.h> 30#include <linux/fdtable.h>
30#include <linux/workqueue.h> 31#include <linux/workqueue.h>
@@ -43,6 +44,13 @@ extern int max_threads;
43 44
44static struct workqueue_struct *khelper_wq; 45static struct workqueue_struct *khelper_wq;
45 46
47#define CAP_BSET (void *)1
48#define CAP_PI (void *)2
49
50static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
51static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
52static DEFINE_SPINLOCK(umh_sysctl_lock);
53
46#ifdef CONFIG_MODULES 54#ifdef CONFIG_MODULES
47 55
48/* 56/*
@@ -132,6 +140,7 @@ EXPORT_SYMBOL(__request_module);
132static int ____call_usermodehelper(void *data) 140static int ____call_usermodehelper(void *data)
133{ 141{
134 struct subprocess_info *sub_info = data; 142 struct subprocess_info *sub_info = data;
143 struct cred *new;
135 int retval; 144 int retval;
136 145
137 spin_lock_irq(&current->sighand->siglock); 146 spin_lock_irq(&current->sighand->siglock);
@@ -153,6 +162,19 @@ static int ____call_usermodehelper(void *data)
153 goto fail; 162 goto fail;
154 } 163 }
155 164
165 retval = -ENOMEM;
166 new = prepare_kernel_cred(current);
167 if (!new)
168 goto fail;
169
170 spin_lock(&umh_sysctl_lock);
171 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
172 new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
173 new->cap_inheritable);
174 spin_unlock(&umh_sysctl_lock);
175
176 commit_creds(new);
177
156 retval = kernel_execve(sub_info->path, 178 retval = kernel_execve(sub_info->path,
157 (const char *const *)sub_info->argv, 179 (const char *const *)sub_info->argv,
158 (const char *const *)sub_info->envp); 180 (const char *const *)sub_info->envp);
@@ -420,6 +442,84 @@ unlock:
420} 442}
421EXPORT_SYMBOL(call_usermodehelper_exec); 443EXPORT_SYMBOL(call_usermodehelper_exec);
422 444
445static int proc_cap_handler(struct ctl_table *table, int write,
446 void __user *buffer, size_t *lenp, loff_t *ppos)
447{
448 struct ctl_table t;
449 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
450 kernel_cap_t new_cap;
451 int err, i;
452
453 if (write && (!capable(CAP_SETPCAP) ||
454 !capable(CAP_SYS_MODULE)))
455 return -EPERM;
456
457 /*
458 * convert from the global kernel_cap_t to the ulong array to print to
459 * userspace if this is a read.
460 */
461 spin_lock(&umh_sysctl_lock);
462 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
463 if (table->data == CAP_BSET)
464 cap_array[i] = usermodehelper_bset.cap[i];
465 else if (table->data == CAP_PI)
466 cap_array[i] = usermodehelper_inheritable.cap[i];
467 else
468 BUG();
469 }
470 spin_unlock(&umh_sysctl_lock);
471
472 t = *table;
473 t.data = &cap_array;
474
475 /*
476 * actually read or write and array of ulongs from userspace. Remember
477 * these are least significant 32 bits first
478 */
479 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
480 if (err < 0)
481 return err;
482
483 /*
484 * convert from the sysctl array of ulongs to the kernel_cap_t
485 * internal representation
486 */
487 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
488 new_cap.cap[i] = cap_array[i];
489
490 /*
491 * Drop everything not in the new_cap (but don't add things)
492 */
493 spin_lock(&umh_sysctl_lock);
494 if (write) {
495 if (table->data == CAP_BSET)
496 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
497 if (table->data == CAP_PI)
498 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
499 }
500 spin_unlock(&umh_sysctl_lock);
501
502 return 0;
503}
504
505struct ctl_table usermodehelper_table[] = {
506 {
507 .procname = "bset",
508 .data = CAP_BSET,
509 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
510 .mode = 0600,
511 .proc_handler = proc_cap_handler,
512 },
513 {
514 .procname = "inheritable",
515 .data = CAP_PI,
516 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
517 .mode = 0600,
518 .proc_handler = proc_cap_handler,
519 },
520 { }
521};
522
423void __init usermodehelper_init(void) 523void __init usermodehelper_init(void)
424{ 524{
425 khelper_wq = create_singlethread_workqueue("khelper"); 525 khelper_wq = create_singlethread_workqueue("khelper");
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3dd0c46fa3bb..4bffd62c2f13 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -56,6 +56,7 @@
56#include <linux/kprobes.h> 56#include <linux/kprobes.h>
57#include <linux/pipe_fs_i.h> 57#include <linux/pipe_fs_i.h>
58#include <linux/oom.h> 58#include <linux/oom.h>
59#include <linux/kmod.h>
59 60
60#include <asm/uaccess.h> 61#include <asm/uaccess.h>
61#include <asm/processor.h> 62#include <asm/processor.h>
@@ -616,6 +617,11 @@ static struct ctl_table kern_table[] = {
616 .child = random_table, 617 .child = random_table,
617 }, 618 },
618 { 619 {
620 .procname = "usermodehelper",
621 .mode = 0555,
622 .child = usermodehelper_table,
623 },
624 {
619 .procname = "overflowuid", 625 .procname = "overflowuid",
620 .data = &overflowuid, 626 .data = &overflowuid,
621 .maxlen = sizeof(int), 627 .maxlen = sizeof(int),
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e3378e8d3a5c..0400553f0d04 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2866,9 +2866,7 @@ static int alloc_cwqs(struct workqueue_struct *wq)
2866 } 2866 }
2867 } 2867 }
2868 2868
2869 /* just in case, make sure it's actually aligned 2869 /* just in case, make sure it's actually aligned */
2870 * - this is affected by PERCPU() alignment in vmlinux.lds.S
2871 */
2872 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 2870 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2873 return wq->cpu_wq.v ? 0 : -ENOMEM; 2871 return wq->cpu_wq.v ? 0 : -ENOMEM;
2874} 2872}
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 854b57bd7d9d..cab7621f98aa 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -88,8 +88,11 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
88 gfp_t flags) 88 gfp_t flags)
89{ 89{
90 struct flex_array *ret; 90 struct flex_array *ret;
91 int max_size = FLEX_ARRAY_NR_BASE_PTRS * 91 int max_size = 0;
92 FLEX_ARRAY_ELEMENTS_PER_PART(element_size); 92
93 if (element_size)
94 max_size = FLEX_ARRAY_NR_BASE_PTRS *
95 FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
93 96
94 /* max_size will end up 0 if element_size > PAGE_SIZE */ 97 /* max_size will end up 0 if element_size > PAGE_SIZE */
95 if (total > max_size) 98 if (total > max_size)
@@ -183,15 +186,18 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
183int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, 186int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
184 gfp_t flags) 187 gfp_t flags)
185{ 188{
186 int part_nr = fa_element_to_part_nr(fa, element_nr); 189 int part_nr;
187 struct flex_array_part *part; 190 struct flex_array_part *part;
188 void *dst; 191 void *dst;
189 192
190 if (element_nr >= fa->total_nr_elements) 193 if (element_nr >= fa->total_nr_elements)
191 return -ENOSPC; 194 return -ENOSPC;
195 if (!fa->element_size)
196 return 0;
192 if (elements_fit_in_base(fa)) 197 if (elements_fit_in_base(fa))
193 part = (struct flex_array_part *)&fa->parts[0]; 198 part = (struct flex_array_part *)&fa->parts[0];
194 else { 199 else {
200 part_nr = fa_element_to_part_nr(fa, element_nr);
195 part = __fa_get_part(fa, part_nr, flags); 201 part = __fa_get_part(fa, part_nr, flags);
196 if (!part) 202 if (!part)
197 return -ENOMEM; 203 return -ENOMEM;
@@ -211,15 +217,18 @@ EXPORT_SYMBOL(flex_array_put);
211 */ 217 */
212int flex_array_clear(struct flex_array *fa, unsigned int element_nr) 218int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
213{ 219{
214 int part_nr = fa_element_to_part_nr(fa, element_nr); 220 int part_nr;
215 struct flex_array_part *part; 221 struct flex_array_part *part;
216 void *dst; 222 void *dst;
217 223
218 if (element_nr >= fa->total_nr_elements) 224 if (element_nr >= fa->total_nr_elements)
219 return -ENOSPC; 225 return -ENOSPC;
226 if (!fa->element_size)
227 return 0;
220 if (elements_fit_in_base(fa)) 228 if (elements_fit_in_base(fa))
221 part = (struct flex_array_part *)&fa->parts[0]; 229 part = (struct flex_array_part *)&fa->parts[0];
222 else { 230 else {
231 part_nr = fa_element_to_part_nr(fa, element_nr);
223 part = fa->parts[part_nr]; 232 part = fa->parts[part_nr];
224 if (!part) 233 if (!part)
225 return -EINVAL; 234 return -EINVAL;
@@ -264,6 +273,8 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start,
264 273
265 if (end >= fa->total_nr_elements) 274 if (end >= fa->total_nr_elements)
266 return -ENOSPC; 275 return -ENOSPC;
276 if (!fa->element_size)
277 return 0;
267 if (elements_fit_in_base(fa)) 278 if (elements_fit_in_base(fa))
268 return 0; 279 return 0;
269 start_part = fa_element_to_part_nr(fa, start); 280 start_part = fa_element_to_part_nr(fa, start);
@@ -291,14 +302,17 @@ EXPORT_SYMBOL(flex_array_prealloc);
291 */ 302 */
292void *flex_array_get(struct flex_array *fa, unsigned int element_nr) 303void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
293{ 304{
294 int part_nr = fa_element_to_part_nr(fa, element_nr); 305 int part_nr;
295 struct flex_array_part *part; 306 struct flex_array_part *part;
296 307
308 if (!fa->element_size)
309 return NULL;
297 if (element_nr >= fa->total_nr_elements) 310 if (element_nr >= fa->total_nr_elements)
298 return NULL; 311 return NULL;
299 if (elements_fit_in_base(fa)) 312 if (elements_fit_in_base(fa))
300 part = (struct flex_array_part *)&fa->parts[0]; 313 part = (struct flex_array_part *)&fa->parts[0];
301 else { 314 else {
315 part_nr = fa_element_to_part_nr(fa, element_nr);
302 part = fa->parts[part_nr]; 316 part = fa->parts[part_nr];
303 if (!part) 317 if (!part)
304 return NULL; 318 return NULL;
@@ -353,7 +367,7 @@ int flex_array_shrink(struct flex_array *fa)
353 int part_nr; 367 int part_nr;
354 int ret = 0; 368 int ret = 0;
355 369
356 if (!fa->total_nr_elements) 370 if (!fa->total_nr_elements || !fa->element_size)
357 return 0; 371 return 0;
358 if (elements_fit_in_base(fa)) 372 if (elements_fit_in_base(fa))
359 return ret; 373 return ret;
diff --git a/mm/percpu.c b/mm/percpu.c
index a160db39b810..bf80e55dbed7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1215,8 +1215,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1215 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1215 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1216#ifdef CONFIG_SMP 1216#ifdef CONFIG_SMP
1217 PCPU_SETUP_BUG_ON(!ai->static_size); 1217 PCPU_SETUP_BUG_ON(!ai->static_size);
1218 PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1218#endif 1219#endif
1219 PCPU_SETUP_BUG_ON(!base_addr); 1220 PCPU_SETUP_BUG_ON(!base_addr);
1221 PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1220 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1222 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1221 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1223 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1222 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1224 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
@@ -1645,8 +1647,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1645 /* warn if maximum distance is further than 75% of vmalloc space */ 1647 /* warn if maximum distance is further than 75% of vmalloc space */
1646 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 1648 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1647 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 1649 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1648 "space 0x%lx\n", 1650 "space 0x%lx\n", max_distance,
1649 max_distance, VMALLOC_END - VMALLOC_START); 1651 (unsigned long)(VMALLOC_END - VMALLOC_START));
1650#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1652#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1651 /* and fail if we have fallback */ 1653 /* and fail if we have fallback */
1652 rc = -EINVAL; 1654 rc = -EINVAL;
diff --git a/mm/rmap.c b/mm/rmap.c
index 8da044a1db0f..522e4a93cadd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -719,7 +719,7 @@ int page_referenced(struct page *page,
719 unlock_page(page); 719 unlock_page(page);
720 } 720 }
721out: 721out:
722 if (page_test_and_clear_young(page)) 722 if (page_test_and_clear_young(page_to_pfn(page)))
723 referenced++; 723 referenced++;
724 724
725 return referenced; 725 return referenced;
@@ -785,10 +785,8 @@ int page_mkclean(struct page *page)
785 struct address_space *mapping = page_mapping(page); 785 struct address_space *mapping = page_mapping(page);
786 if (mapping) { 786 if (mapping) {
787 ret = page_mkclean_file(mapping, page); 787 ret = page_mkclean_file(mapping, page);
788 if (page_test_dirty(page)) { 788 if (page_test_and_clear_dirty(page_to_pfn(page), 1))
789 page_clear_dirty(page, 1);
790 ret = 1; 789 ret = 1;
791 }
792 } 790 }
793 } 791 }
794 792
@@ -981,10 +979,9 @@ void page_remove_rmap(struct page *page)
981 * not if it's in swapcache - there might be another pte slot 979 * not if it's in swapcache - there might be another pte slot
982 * containing the swap entry, but page not yet written to swap. 980 * containing the swap entry, but page not yet written to swap.
983 */ 981 */
984 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 982 if ((!PageAnon(page) || PageSwapCache(page)) &&
985 page_clear_dirty(page, 1); 983 page_test_and_clear_dirty(page_to_pfn(page), 1))
986 set_page_dirty(page); 984 set_page_dirty(page);
987 }
988 /* 985 /*
989 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 986 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
990 * and not charged by memcg for now. 987 * and not charged by memcg for now.
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index cfa7a5e1c5c9..fa000d26dc60 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -212,10 +212,12 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
212 int err = key->type_data.x[0]; 212 int err = key->type_data.x[0];
213 213
214 seq_puts(m, key->description); 214 seq_puts(m, key->description);
215 if (err) 215 if (key_is_instantiated(key)) {
216 seq_printf(m, ": %d", err); 216 if (err)
217 else 217 seq_printf(m, ": %d", err);
218 seq_printf(m, ": %u", key->datalen); 218 else
219 seq_printf(m, ": %u", key->datalen);
220 }
219} 221}
220 222
221/* 223/*
diff --git a/scripts/.gitignore b/scripts/.gitignore
index e2741d23bab8..105b21f08185 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -8,3 +8,4 @@ bin2c
8unifdef 8unifdef
9ihex2fw 9ihex2fw
10recordmcount 10recordmcount
11docproc
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index ed2773edfe71..be39cd1c74cf 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -118,6 +118,11 @@ cc-option-yn = $(call try-run,\
118cc-option-align = $(subst -functions=0,,\ 118cc-option-align = $(subst -functions=0,,\
119 $(call cc-option,-falign-functions=0,-malign-functions=0)) 119 $(call cc-option,-falign-functions=0,-malign-functions=0))
120 120
121# cc-disable-warning
122# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
123cc-disable-warning = $(call try-run,\
124 $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
125
121# cc-version 126# cc-version
122# Usage gcc-ver := $(call cc-version) 127# Usage gcc-ver := $(call cc-version)
123cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) 128cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
@@ -141,6 +146,11 @@ cc-ldoption = $(call try-run,\
141ld-option = $(call try-run,\ 146ld-option = $(call try-run,\
142 $(CC) /dev/null -c -o "$$TMPO" ; $(LD) $(1) "$$TMPO" -o "$$TMP",$(1),$(2)) 147 $(CC) /dev/null -c -o "$$TMPO" ; $(LD) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
143 148
149# ar-option
150# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
151# Important: no spaces around options
152ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
153
144###### 154######
145 155
146### 156###
@@ -187,6 +197,8 @@ ifneq ($(KBUILD_NOCMDDEP),1)
187# User may override this check using make KBUILD_NOCMDDEP=1 197# User may override this check using make KBUILD_NOCMDDEP=1
188arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \ 198arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
189 $(filter-out $(cmd_$@), $(cmd_$(1))) ) 199 $(filter-out $(cmd_$@), $(cmd_$(1))) )
200else
201arg-check = $(if $(strip $(cmd_$@)),,1)
190endif 202endif
191 203
192# >'< substitution is for echo to work, 204# >'< substitution is for echo to work,
diff --git a/scripts/Makefile b/scripts/Makefile
index fcea26168bca..df7678febf27 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -6,6 +6,7 @@
6# pnmttologo: Convert pnm files to logo files 6# pnmttologo: Convert pnm files to logo files
7# conmakehash: Create chartable 7# conmakehash: Create chartable
8# conmakehash: Create arrays for initializing the kernel console tables 8# conmakehash: Create arrays for initializing the kernel console tables
9# docproc: Used in Documentation/DocBook
9 10
10hostprogs-$(CONFIG_KALLSYMS) += kallsyms 11hostprogs-$(CONFIG_KALLSYMS) += kallsyms
11hostprogs-$(CONFIG_LOGO) += pnmtologo 12hostprogs-$(CONFIG_LOGO) += pnmtologo
@@ -16,12 +17,14 @@ hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
16always := $(hostprogs-y) $(hostprogs-m) 17always := $(hostprogs-y) $(hostprogs-m)
17 18
18# The following hostprogs-y programs are only build on demand 19# The following hostprogs-y programs are only build on demand
19hostprogs-y += unifdef 20hostprogs-y += unifdef docproc
20 21
21# This target is used internally to avoid "is up to date" messages 22# These targets are used internally to avoid "is up to date" messages
22PHONY += build_unifdef 23PHONY += build_unifdef
23build_unifdef: scripts/unifdef FORCE 24build_unifdef: scripts/unifdef FORCE
24 @: 25 @:
26build_docproc: scripts/docproc FORCE
27 @:
25 28
26subdir-$(CONFIG_MODVERSIONS) += genksyms 29subdir-$(CONFIG_MODVERSIONS) += genksyms
27subdir-y += mod 30subdir-y += mod
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
new file mode 100644
index 000000000000..490122c3e2aa
--- /dev/null
+++ b/scripts/Makefile.asm-generic
@@ -0,0 +1,23 @@
1# include/asm-generic contains a lot of files that are used
2# verbatim by several architectures.
3#
4# This Makefile reads the file arch/$(SRCARCH)/include/asm/Kbuild
5# and for each file listed in this file with generic-y creates
6# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/asm)
7
8kbuild-file := $(srctree)/arch/$(SRCARCH)/include/asm/Kbuild
9-include $(kbuild-file)
10
11include scripts/Kbuild.include
12
13# Create output directory if not already present
14_dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
15
16quiet_cmd_wrap = WRAP $@
17cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@
18
19all: $(patsubst %, $(obj)/%, $(generic-y))
20
21$(obj)/%.h:
22 $(call cmd,wrap)
23
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 6165622c3e29..a0fd5029cfe7 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -51,36 +51,52 @@ ifeq ($(KBUILD_NOPEDANTIC),)
51endif 51endif
52 52
53# 53#
54# make W=1 settings 54# make W=... settings
55# 55#
56# $(call cc-option... ) handles gcc -W.. options which 56# W=1 - warnings that may be relevant and does not occur too often
57# W=2 - warnings that occur quite often but may still be relevant
58# W=3 - the more obscure warnings, can most likely be ignored
59#
60# $(call cc-option, -W...) handles gcc -W.. options which
57# are not supported by all versions of the compiler 61# are not supported by all versions of the compiler
58ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS 62ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS
59KBUILD_EXTRA_WARNINGS := -Wextra 63warning- := $(empty)
60KBUILD_EXTRA_WARNINGS += -Wunused -Wno-unused-parameter 64
61KBUILD_EXTRA_WARNINGS += -Waggregate-return 65warning-1 := -Wextra -Wunused -Wno-unused-parameter
62KBUILD_EXTRA_WARNINGS += -Wbad-function-cast 66warning-1 += -Wmissing-declarations
63KBUILD_EXTRA_WARNINGS += -Wcast-qual 67warning-1 += -Wmissing-format-attribute
64KBUILD_EXTRA_WARNINGS += -Wcast-align 68warning-1 += -Wmissing-prototypes
65KBUILD_EXTRA_WARNINGS += -Wconversion 69warning-1 += -Wold-style-definition
66KBUILD_EXTRA_WARNINGS += -Wdisabled-optimization 70warning-1 += $(call cc-option, -Wmissing-include-dirs)
67KBUILD_EXTRA_WARNINGS += -Wlogical-op 71warning-1 += $(call cc-option, -Wunused-but-set-variable)
68KBUILD_EXTRA_WARNINGS += -Wmissing-declarations 72
69KBUILD_EXTRA_WARNINGS += -Wmissing-format-attribute 73warning-2 := -Waggregate-return
70KBUILD_EXTRA_WARNINGS += $(call cc-option, -Wmissing-include-dirs,) 74warning-2 += -Wcast-align
71KBUILD_EXTRA_WARNINGS += -Wmissing-prototypes 75warning-2 += -Wdisabled-optimization
72KBUILD_EXTRA_WARNINGS += -Wnested-externs 76warning-2 += -Wnested-externs
73KBUILD_EXTRA_WARNINGS += -Wold-style-definition 77warning-2 += -Wshadow
74KBUILD_EXTRA_WARNINGS += $(call cc-option, -Woverlength-strings,) 78warning-2 += $(call cc-option, -Wlogical-op)
75KBUILD_EXTRA_WARNINGS += -Wpacked 79
76KBUILD_EXTRA_WARNINGS += -Wpacked-bitfield-compat 80warning-3 := -Wbad-function-cast
77KBUILD_EXTRA_WARNINGS += -Wpadded 81warning-3 += -Wcast-qual
78KBUILD_EXTRA_WARNINGS += -Wpointer-arith 82warning-3 += -Wconversion
79KBUILD_EXTRA_WARNINGS += -Wredundant-decls 83warning-3 += -Wpacked
80KBUILD_EXTRA_WARNINGS += -Wshadow 84warning-3 += -Wpadded
81KBUILD_EXTRA_WARNINGS += -Wswitch-default 85warning-3 += -Wpointer-arith
82KBUILD_EXTRA_WARNINGS += $(call cc-option, -Wvla,) 86warning-3 += -Wredundant-decls
83KBUILD_CFLAGS += $(KBUILD_EXTRA_WARNINGS) 87warning-3 += -Wswitch-default
88warning-3 += $(call cc-option, -Wpacked-bitfield-compat)
89warning-3 += $(call cc-option, -Wvla)
90
91warning := $(warning-$(findstring 1, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)))
92warning += $(warning-$(findstring 2, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)))
93warning += $(warning-$(findstring 3, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)))
94
95ifeq ("$(strip $(warning))","")
96 $(error W=$(KBUILD_ENABLE_EXTRA_GCC_CHECKS) is unknown)
97endif
98
99KBUILD_CFLAGS += $(warning)
84endif 100endif
85 101
86include scripts/Makefile.lib 102include scripts/Makefile.lib
@@ -351,7 +367,7 @@ quiet_cmd_link_o_target = LD $@
351cmd_link_o_target = $(if $(strip $(obj-y)),\ 367cmd_link_o_target = $(if $(strip $(obj-y)),\
352 $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \ 368 $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \
353 $(cmd_secanalysis),\ 369 $(cmd_secanalysis),\
354 rm -f $@; $(AR) rcs $@) 370 rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@)
355 371
356$(builtin-target): $(obj-y) FORCE 372$(builtin-target): $(obj-y) FORCE
357 $(call if_changed,link_o_target) 373 $(call if_changed,link_o_target)
@@ -377,7 +393,7 @@ $(modorder-target): $(subdir-ym) FORCE
377# 393#
378ifdef lib-target 394ifdef lib-target
379quiet_cmd_link_l_target = AR $@ 395quiet_cmd_link_l_target = AR $@
380cmd_link_l_target = rm -f $@; $(AR) rcs $@ $(lib-y) 396cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
381 397
382$(lib-target): $(lib-y) FORCE 398$(lib-target): $(lib-y) FORCE
383 $(call if_changed,link_l_target) 399 $(call if_changed,link_l_target)
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index f89cb87f5c01..a57f5bd5a13d 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -27,8 +27,13 @@ header-y := $(filter-out %/, $(header-y))
27install-file := $(install)/.install 27install-file := $(install)/.install
28check-file := $(install)/.check 28check-file := $(install)/.check
29 29
30# generic-y list all files an architecture uses from asm-generic
31# Use this to build a list of headers which require a wrapper
32wrapper-files := $(filter $(header-y), $(generic-y))
33
30# all headers files for this dir 34# all headers files for this dir
31all-files := $(header-y) $(objhdr-y) 35header-y := $(filter-out $(generic-y), $(header-y))
36all-files := $(header-y) $(objhdr-y) $(wrapper-files)
32input-files := $(addprefix $(srctree)/$(obj)/,$(header-y)) \ 37input-files := $(addprefix $(srctree)/$(obj)/,$(header-y)) \
33 $(addprefix $(objtree)/$(obj)/,$(objhdr-y)) 38 $(addprefix $(objtree)/$(obj)/,$(objhdr-y))
34output-files := $(addprefix $(install)/, $(all-files)) 39output-files := $(addprefix $(install)/, $(all-files))
@@ -47,6 +52,9 @@ quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\
47 cmd_install = \ 52 cmd_install = \
48 $(PERL) $< $(srctree)/$(obj) $(install) $(SRCARCH) $(header-y); \ 53 $(PERL) $< $(srctree)/$(obj) $(install) $(SRCARCH) $(header-y); \
49 $(PERL) $< $(objtree)/$(obj) $(install) $(SRCARCH) $(objhdr-y); \ 54 $(PERL) $< $(objtree)/$(obj) $(install) $(SRCARCH) $(objhdr-y); \
55 for F in $(wrapper-files); do \
56 echo "\#include <asm-generic/$$F>" > $(install)/$$F; \
57 done; \
50 touch $@ 58 touch $@
51 59
52quiet_cmd_remove = REMOVE $(unwanted) 60quiet_cmd_remove = REMOVE $(unwanted)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 1c702ca8aac8..93b2b5938a2e 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -197,7 +197,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
197# --------------------------------------------------------------------------- 197# ---------------------------------------------------------------------------
198 198
199quiet_cmd_gzip = GZIP $@ 199quiet_cmd_gzip = GZIP $@
200cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -f -9 > $@) || \ 200cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \
201 (rm -f $@ ; false) 201 (rm -f $@ ; false)
202 202
203# DTC 203# DTC
diff --git a/scripts/basic/.gitignore b/scripts/basic/.gitignore
index bf8b199ec598..a776371a3502 100644
--- a/scripts/basic/.gitignore
+++ b/scripts/basic/.gitignore
@@ -1,3 +1 @@
1hash
2fixdep fixdep
3docproc
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index 4c324a1f1e0e..4fcef87bb875 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -7,9 +7,8 @@
7# .config is included by main Makefile. 7# .config is included by main Makefile.
8# --------------------------------------------------------------------------- 8# ---------------------------------------------------------------------------
9# fixdep: Used to generate dependency information during build process 9# fixdep: Used to generate dependency information during build process
10# docproc: Used in Documentation/DocBook
11 10
12hostprogs-y := fixdep docproc 11hostprogs-y := fixdep
13always := $(hostprogs-y) 12always := $(hostprogs-y)
14 13
15# fixdep is needed to compile other host programs 14# fixdep is needed to compile other host programs
diff --git a/scripts/basic/docproc.c b/scripts/docproc.c
index 98dec87974d0..98dec87974d0 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/docproc.c
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index e12b1a7525cf..b482f162a18a 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -1,4 +1,4 @@
1#!/bin/bash 1#!/bin/sh
2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org> 2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org>
3# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org> 3# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org>
4# 4#
@@ -105,9 +105,9 @@ list_parse() {
105# for links, devices etc the format differs. See gen_init_cpio for details 105# for links, devices etc the format differs. See gen_init_cpio for details
106parse() { 106parse() {
107 local location="$1" 107 local location="$1"
108 local name="${location/${srcdir}//}" 108 local name="/${location#${srcdir}}"
109 # change '//' into '/' 109 # change '//' into '/'
110 name="${name//\/\///}" 110 name=$(echo "$name" | sed -e 's://*:/:g')
111 local mode="$2" 111 local mode="$2"
112 local uid="$3" 112 local uid="$3"
113 local gid="$4" 113 local gid="$4"
@@ -117,8 +117,8 @@ parse() {
117 [ "$root_gid" = "squash" ] && gid=0 || [ "$gid" -eq "$root_gid" ] && gid=0 117 [ "$root_gid" = "squash" ] && gid=0 || [ "$gid" -eq "$root_gid" ] && gid=0
118 local str="${mode} ${uid} ${gid}" 118 local str="${mode} ${uid} ${gid}"
119 119
120 [ "${ftype}" == "invalid" ] && return 0 120 [ "${ftype}" = "invalid" ] && return 0
121 [ "${location}" == "${srcdir}" ] && return 0 121 [ "${location}" = "${srcdir}" ] && return 0
122 122
123 case "${ftype}" in 123 case "${ftype}" in
124 "file") 124 "file")
@@ -192,7 +192,7 @@ input_file() {
192 if [ -f "$1" ]; then 192 if [ -f "$1" ]; then
193 ${dep_list}header "$1" 193 ${dep_list}header "$1"
194 is_cpio="$(echo "$1" | sed 's/^.*\.cpio\(\..*\)\?/cpio/')" 194 is_cpio="$(echo "$1" | sed 's/^.*\.cpio\(\..*\)\?/cpio/')"
195 if [ $2 -eq 0 -a ${is_cpio} == "cpio" ]; then 195 if [ $2 -eq 0 -a ${is_cpio} = "cpio" ]; then
196 cpio_file=$1 196 cpio_file=$1
197 echo "$1" | grep -q '^.*\.cpio\..*' && is_cpio_compressed="compressed" 197 echo "$1" | grep -q '^.*\.cpio\..*' && is_cpio_compressed="compressed"
198 [ ! -z ${dep_list} ] && echo "$1" 198 [ ! -z ${dep_list} ] && echo "$1"
@@ -204,7 +204,7 @@ input_file() {
204 else 204 else
205 echo "$1 \\" 205 echo "$1 \\"
206 cat "$1" | while read type dir file perm ; do 206 cat "$1" | while read type dir file perm ; do
207 if [ "$type" == "file" ]; then 207 if [ "$type" = "file" ]; then
208 echo "$file \\"; 208 echo "$file \\";
209 fi 209 fi
210 done 210 done
@@ -226,7 +226,7 @@ cpio_list=
226output="/dev/stdout" 226output="/dev/stdout"
227output_file="" 227output_file=""
228is_cpio_compressed= 228is_cpio_compressed=
229compr="gzip -9 -f" 229compr="gzip -n -9 -f"
230 230
231arg="$1" 231arg="$1"
232case "$arg" in 232case "$arg" in
@@ -240,7 +240,7 @@ case "$arg" in
240 output_file="$1" 240 output_file="$1"
241 cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)" 241 cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
242 output=${cpio_list} 242 output=${cpio_list}
243 echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f" 243 echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f"
244 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" 244 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
245 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" 245 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
246 echo "$output_file" | grep -q "\.xz$" && \ 246 echo "$output_file" | grep -q "\.xz$" && \
@@ -287,8 +287,15 @@ done
287# we are careful to delete tmp files 287# we are careful to delete tmp files
288if [ ! -z ${output_file} ]; then 288if [ ! -z ${output_file} ]; then
289 if [ -z ${cpio_file} ]; then 289 if [ -z ${cpio_file} ]; then
290 timestamp=
291 if test -n "$KBUILD_BUILD_TIMESTAMP"; then
292 timestamp="$(date -d"$KBUILD_BUILD_TIMESTAMP" +%s || :)"
293 if test -n "$timestamp"; then
294 timestamp="-t $timestamp"
295 fi
296 fi
290 cpio_tfile="$(mktemp ${TMPDIR:-/tmp}/cpiofile.XXXXXX)" 297 cpio_tfile="$(mktemp ${TMPDIR:-/tmp}/cpiofile.XXXXXX)"
291 usr/gen_init_cpio ${cpio_list} > ${cpio_tfile} 298 usr/gen_init_cpio $timestamp ${cpio_list} > ${cpio_tfile}
292 else 299 else
293 cpio_tfile=${cpio_file} 300 cpio_tfile=${cpio_file}
294 fi 301 fi
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 60dd3eb9366e..487ac6f37ca2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -500,6 +500,8 @@ static void optimize_result(void)
500 500
501 /* find the token with the breates profit value */ 501 /* find the token with the breates profit value */
502 best = find_best_token(); 502 best = find_best_token();
503 if (token_profit[best] == 0)
504 break;
503 505
504 /* place it in the "best" table */ 506 /* place it in the "best" table */
505 best_table_len[i] = 2; 507 best_table_len[i] = 2;
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 50ad317a4bf9..f221ddf69080 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -42,6 +42,16 @@ if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then
42else 42else
43 TIMESTAMP=$KBUILD_BUILD_TIMESTAMP 43 TIMESTAMP=$KBUILD_BUILD_TIMESTAMP
44fi 44fi
45if test -z "$KBUILD_BUILD_USER"; then
46 LINUX_COMPILE_BY=$(whoami | sed 's/\\/\\\\/')
47else
48 LINUX_COMPILE_BY=$KBUILD_BUILD_USER
49fi
50if test -z "$KBUILD_BUILD_HOST"; then
51 LINUX_COMPILE_HOST=`hostname`
52else
53 LINUX_COMPILE_HOST=$KBUILD_BUILD_HOST
54fi
45 55
46UTS_VERSION="#$VERSION" 56UTS_VERSION="#$VERSION"
47CONFIG_FLAGS="" 57CONFIG_FLAGS=""
@@ -63,20 +73,8 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
63 73
64 echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" 74 echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\"
65 75
66 echo \#define LINUX_COMPILE_TIME \"`date +%T`\" 76 echo \#define LINUX_COMPILE_BY \"`echo $LINUX_COMPILE_BY | $UTS_TRUNCATE`\"
67 echo \#define LINUX_COMPILE_BY \"`whoami`\" 77 echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\"
68 echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\"
69
70 domain=`dnsdomainname 2> /dev/null`
71 if [ -z "$domain" ]; then
72 domain=`domainname 2> /dev/null`
73 fi
74
75 if [ -n "$domain" ]; then
76 echo \#define LINUX_COMPILE_DOMAIN \"`echo $domain | $UTS_TRUNCATE`\"
77 else
78 echo \#define LINUX_COMPILE_DOMAIN
79 fi
80 78
81 echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" 79 echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
82) > .tmpcompile 80) > .tmpcompile
@@ -91,8 +89,8 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
91# first line. 89# first line.
92 90
93if [ -r $TARGET ] && \ 91if [ -r $TARGET ] && \
94 grep -v 'UTS_VERSION\|LINUX_COMPILE_TIME' $TARGET > .tmpver.1 && \ 92 grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \
95 grep -v 'UTS_VERSION\|LINUX_COMPILE_TIME' .tmpcompile > .tmpver.2 && \ 93 grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \
96 cmp -s .tmpver.1 .tmpver.2; then 94 cmp -s .tmpver.1 .tmpver.2; then
97 rm -f .tmpcompile 95 rm -f .tmpcompile
98else 96else
diff --git a/security/Kconfig b/security/Kconfig
index 95accd442d55..e0f08b52e4ab 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -167,6 +167,7 @@ config INTEL_TXT
167config LSM_MMAP_MIN_ADDR 167config LSM_MMAP_MIN_ADDR
168 int "Low address space for LSM to protect from user allocation" 168 int "Low address space for LSM to protect from user allocation"
169 depends on SECURITY && SECURITY_SELINUX 169 depends on SECURITY && SECURITY_SELINUX
170 default 32768 if ARM
170 default 65536 171 default 65536
171 help 172 help
172 This is the portion of low virtual memory which should be protected 173 This is the portion of low virtual memory which should be protected
diff --git a/security/commoncap.c b/security/commoncap.c
index f20e984ccfb4..a93b3b733079 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -529,15 +529,10 @@ skip:
529 new->suid = new->fsuid = new->euid; 529 new->suid = new->fsuid = new->euid;
530 new->sgid = new->fsgid = new->egid; 530 new->sgid = new->fsgid = new->egid;
531 531
532 /* For init, we want to retain the capabilities set in the initial 532 if (effective)
533 * task. Thus we skip the usual capability rules 533 new->cap_effective = new->cap_permitted;
534 */ 534 else
535 if (!is_global_init(current)) { 535 cap_clear(new->cap_effective);
536 if (effective)
537 new->cap_effective = new->cap_permitted;
538 else
539 cap_clear(new->cap_effective);
540 }
541 bprm->cap_effective = effective; 536 bprm->cap_effective = effective;
542 537
543 /* 538 /*
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 07a025f81902..f375152a2500 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -109,11 +109,13 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
109 const struct cred *cred, 109 const struct cred *cred,
110 struct key_type *type, 110 struct key_type *type,
111 const void *description, 111 const void *description,
112 key_match_func_t match); 112 key_match_func_t match,
113 bool no_state_check);
113 114
114extern key_ref_t search_my_process_keyrings(struct key_type *type, 115extern key_ref_t search_my_process_keyrings(struct key_type *type,
115 const void *description, 116 const void *description,
116 key_match_func_t match, 117 key_match_func_t match,
118 bool no_state_check,
117 const struct cred *cred); 119 const struct cred *cred);
118extern key_ref_t search_process_keyrings(struct key_type *type, 120extern key_ref_t search_process_keyrings(struct key_type *type,
119 const void *description, 121 const void *description,
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 427fddcaeb19..eca51918c951 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -206,8 +206,14 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
206 goto error5; 206 goto error5;
207 } 207 }
208 208
209 /* wait for the key to finish being constructed */
210 ret = wait_for_key_construction(key, 1);
211 if (ret < 0)
212 goto error6;
213
209 ret = key->serial; 214 ret = key->serial;
210 215
216error6:
211 key_put(key); 217 key_put(key);
212error5: 218error5:
213 key_type_put(ktype); 219 key_type_put(ktype);
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index cdd2f3f88c88..a06ffab38568 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -176,13 +176,15 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
176 else 176 else
177 seq_puts(m, "[anon]"); 177 seq_puts(m, "[anon]");
178 178
179 rcu_read_lock(); 179 if (key_is_instantiated(keyring)) {
180 klist = rcu_dereference(keyring->payload.subscriptions); 180 rcu_read_lock();
181 if (klist) 181 klist = rcu_dereference(keyring->payload.subscriptions);
182 seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys); 182 if (klist)
183 else 183 seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
184 seq_puts(m, ": empty"); 184 else
185 rcu_read_unlock(); 185 seq_puts(m, ": empty");
186 rcu_read_unlock();
187 }
186} 188}
187 189
188/* 190/*
@@ -271,6 +273,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
271 * @type: The type of key to search for. 273 * @type: The type of key to search for.
272 * @description: Parameter for @match. 274 * @description: Parameter for @match.
273 * @match: Function to rule on whether or not a key is the one required. 275 * @match: Function to rule on whether or not a key is the one required.
276 * @no_state_check: Don't check if a matching key is bad
274 * 277 *
275 * Search the supplied keyring tree for a key that matches the criteria given. 278 * Search the supplied keyring tree for a key that matches the criteria given.
276 * The root keyring and any linked keyrings must grant Search permission to the 279 * The root keyring and any linked keyrings must grant Search permission to the
@@ -303,7 +306,8 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
303 const struct cred *cred, 306 const struct cred *cred,
304 struct key_type *type, 307 struct key_type *type,
305 const void *description, 308 const void *description,
306 key_match_func_t match) 309 key_match_func_t match,
310 bool no_state_check)
307{ 311{
308 struct { 312 struct {
309 struct keyring_list *keylist; 313 struct keyring_list *keylist;
@@ -345,6 +349,8 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
345 kflags = keyring->flags; 349 kflags = keyring->flags;
346 if (keyring->type == type && match(keyring, description)) { 350 if (keyring->type == type && match(keyring, description)) {
347 key = keyring; 351 key = keyring;
352 if (no_state_check)
353 goto found;
348 354
349 /* check it isn't negative and hasn't expired or been 355 /* check it isn't negative and hasn't expired or been
350 * revoked */ 356 * revoked */
@@ -384,11 +390,13 @@ descend:
384 continue; 390 continue;
385 391
386 /* skip revoked keys and expired keys */ 392 /* skip revoked keys and expired keys */
387 if (kflags & (1 << KEY_FLAG_REVOKED)) 393 if (!no_state_check) {
388 continue; 394 if (kflags & (1 << KEY_FLAG_REVOKED))
395 continue;
389 396
390 if (key->expiry && now.tv_sec >= key->expiry) 397 if (key->expiry && now.tv_sec >= key->expiry)
391 continue; 398 continue;
399 }
392 400
393 /* keys that don't match */ 401 /* keys that don't match */
394 if (!match(key, description)) 402 if (!match(key, description))
@@ -399,6 +407,9 @@ descend:
399 cred, KEY_SEARCH) < 0) 407 cred, KEY_SEARCH) < 0)
400 continue; 408 continue;
401 409
410 if (no_state_check)
411 goto found;
412
402 /* we set a different error code if we pass a negative key */ 413 /* we set a different error code if we pass a negative key */
403 if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 414 if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
404 err = key->type_data.reject_error; 415 err = key->type_data.reject_error;
@@ -478,7 +489,7 @@ key_ref_t keyring_search(key_ref_t keyring,
478 return ERR_PTR(-ENOKEY); 489 return ERR_PTR(-ENOKEY);
479 490
480 return keyring_search_aux(keyring, current->cred, 491 return keyring_search_aux(keyring, current->cred,
481 type, description, type->match); 492 type, description, type->match, false);
482} 493}
483EXPORT_SYMBOL(keyring_search); 494EXPORT_SYMBOL(keyring_search);
484 495
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 525cf8a29cdd..49bbc97943ad 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -199,7 +199,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
199 if (key->perm & KEY_POS_VIEW) { 199 if (key->perm & KEY_POS_VIEW) {
200 skey_ref = search_my_process_keyrings(key->type, key, 200 skey_ref = search_my_process_keyrings(key->type, key,
201 lookup_user_key_possessed, 201 lookup_user_key_possessed,
202 cred); 202 true, cred);
203 if (!IS_ERR(skey_ref)) { 203 if (!IS_ERR(skey_ref)) {
204 key_ref_put(skey_ref); 204 key_ref_put(skey_ref);
205 key_ref = make_key_ref(key, 1); 205 key_ref = make_key_ref(key, 1);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 930634e45149..6c0480db8885 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -331,6 +331,7 @@ void key_fsgid_changed(struct task_struct *tsk)
331key_ref_t search_my_process_keyrings(struct key_type *type, 331key_ref_t search_my_process_keyrings(struct key_type *type,
332 const void *description, 332 const void *description,
333 key_match_func_t match, 333 key_match_func_t match,
334 bool no_state_check,
334 const struct cred *cred) 335 const struct cred *cred)
335{ 336{
336 key_ref_t key_ref, ret, err; 337 key_ref_t key_ref, ret, err;
@@ -350,7 +351,7 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
350 if (cred->thread_keyring) { 351 if (cred->thread_keyring) {
351 key_ref = keyring_search_aux( 352 key_ref = keyring_search_aux(
352 make_key_ref(cred->thread_keyring, 1), 353 make_key_ref(cred->thread_keyring, 1),
353 cred, type, description, match); 354 cred, type, description, match, no_state_check);
354 if (!IS_ERR(key_ref)) 355 if (!IS_ERR(key_ref))
355 goto found; 356 goto found;
356 357
@@ -371,7 +372,7 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
371 if (cred->tgcred->process_keyring) { 372 if (cred->tgcred->process_keyring) {
372 key_ref = keyring_search_aux( 373 key_ref = keyring_search_aux(
373 make_key_ref(cred->tgcred->process_keyring, 1), 374 make_key_ref(cred->tgcred->process_keyring, 1),
374 cred, type, description, match); 375 cred, type, description, match, no_state_check);
375 if (!IS_ERR(key_ref)) 376 if (!IS_ERR(key_ref))
376 goto found; 377 goto found;
377 378
@@ -395,7 +396,7 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
395 make_key_ref(rcu_dereference( 396 make_key_ref(rcu_dereference(
396 cred->tgcred->session_keyring), 397 cred->tgcred->session_keyring),
397 1), 398 1),
398 cred, type, description, match); 399 cred, type, description, match, no_state_check);
399 rcu_read_unlock(); 400 rcu_read_unlock();
400 401
401 if (!IS_ERR(key_ref)) 402 if (!IS_ERR(key_ref))
@@ -417,7 +418,7 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
417 else if (cred->user->session_keyring) { 418 else if (cred->user->session_keyring) {
418 key_ref = keyring_search_aux( 419 key_ref = keyring_search_aux(
419 make_key_ref(cred->user->session_keyring, 1), 420 make_key_ref(cred->user->session_keyring, 1),
420 cred, type, description, match); 421 cred, type, description, match, no_state_check);
421 if (!IS_ERR(key_ref)) 422 if (!IS_ERR(key_ref))
422 goto found; 423 goto found;
423 424
@@ -459,7 +460,8 @@ key_ref_t search_process_keyrings(struct key_type *type,
459 460
460 might_sleep(); 461 might_sleep();
461 462
462 key_ref = search_my_process_keyrings(type, description, match, cred); 463 key_ref = search_my_process_keyrings(type, description, match,
464 false, cred);
463 if (!IS_ERR(key_ref)) 465 if (!IS_ERR(key_ref))
464 goto found; 466 goto found;
465 err = key_ref; 467 err = key_ref;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index df3c0417ee40..b18a71745901 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -530,8 +530,7 @@ struct key *request_key_and_link(struct key_type *type,
530 dest_keyring, flags); 530 dest_keyring, flags);
531 531
532 /* search all the process keyrings for a key */ 532 /* search all the process keyrings for a key */
533 key_ref = search_process_keyrings(type, description, type->match, 533 key_ref = search_process_keyrings(type, description, type->match, cred);
534 cred);
535 534
536 if (!IS_ERR(key_ref)) { 535 if (!IS_ERR(key_ref)) {
537 key = key_ref_to_ptr(key_ref); 536 key = key_ref_to_ptr(key_ref);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 68164031a74e..f6337c9082eb 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -59,7 +59,8 @@ static void request_key_auth_describe(const struct key *key,
59 59
60 seq_puts(m, "key:"); 60 seq_puts(m, "key:");
61 seq_puts(m, key->description); 61 seq_puts(m, key->description);
62 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); 62 if (key_is_instantiated(key))
63 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
63} 64}
64 65
65/* 66/*
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index f66baf44f32d..5b366d7af3c4 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -157,8 +157,8 @@ EXPORT_SYMBOL_GPL(user_destroy);
157void user_describe(const struct key *key, struct seq_file *m) 157void user_describe(const struct key *key, struct seq_file *m)
158{ 158{
159 seq_puts(m, key->description); 159 seq_puts(m, key->description);
160 160 if (key_is_instantiated(key))
161 seq_printf(m, ": %u", key->datalen); 161 seq_printf(m, ": %u", key->datalen);
162} 162}
163 163
164EXPORT_SYMBOL_GPL(user_describe); 164EXPORT_SYMBOL_GPL(user_describe);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 908aa712816a..893af8a2fa1e 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -210,7 +210,6 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
210static void dump_common_audit_data(struct audit_buffer *ab, 210static void dump_common_audit_data(struct audit_buffer *ab,
211 struct common_audit_data *a) 211 struct common_audit_data *a)
212{ 212{
213 struct inode *inode = NULL;
214 struct task_struct *tsk = current; 213 struct task_struct *tsk = current;
215 214
216 if (a->tsk) 215 if (a->tsk)
@@ -229,33 +228,47 @@ static void dump_common_audit_data(struct audit_buffer *ab,
229 case LSM_AUDIT_DATA_CAP: 228 case LSM_AUDIT_DATA_CAP:
230 audit_log_format(ab, " capability=%d ", a->u.cap); 229 audit_log_format(ab, " capability=%d ", a->u.cap);
231 break; 230 break;
232 case LSM_AUDIT_DATA_FS: 231 case LSM_AUDIT_DATA_PATH: {
233 if (a->u.fs.path.dentry) { 232 struct inode *inode;
234 struct dentry *dentry = a->u.fs.path.dentry; 233
235 if (a->u.fs.path.mnt) { 234 audit_log_d_path(ab, "path=", &a->u.path);
236 audit_log_d_path(ab, "path=", &a->u.fs.path); 235
237 } else { 236 inode = a->u.path.dentry->d_inode;
238 audit_log_format(ab, " name=");
239 audit_log_untrustedstring(ab,
240 dentry->d_name.name);
241 }
242 inode = dentry->d_inode;
243 } else if (a->u.fs.inode) {
244 struct dentry *dentry;
245 inode = a->u.fs.inode;
246 dentry = d_find_alias(inode);
247 if (dentry) {
248 audit_log_format(ab, " name=");
249 audit_log_untrustedstring(ab,
250 dentry->d_name.name);
251 dput(dentry);
252 }
253 }
254 if (inode) 237 if (inode)
255 audit_log_format(ab, " dev=%s ino=%lu", 238 audit_log_format(ab, " dev=%s ino=%lu",
256 inode->i_sb->s_id, 239 inode->i_sb->s_id,
257 inode->i_ino); 240 inode->i_ino);
258 break; 241 break;
242 }
243 case LSM_AUDIT_DATA_DENTRY: {
244 struct inode *inode;
245
246 audit_log_format(ab, " name=");
247 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
248
249 inode = a->u.dentry->d_inode;
250 if (inode)
251 audit_log_format(ab, " dev=%s ino=%lu",
252 inode->i_sb->s_id,
253 inode->i_ino);
254 break;
255 }
256 case LSM_AUDIT_DATA_INODE: {
257 struct dentry *dentry;
258 struct inode *inode;
259
260 inode = a->u.inode;
261 dentry = d_find_alias(inode);
262 if (dentry) {
263 audit_log_format(ab, " name=");
264 audit_log_untrustedstring(ab,
265 dentry->d_name.name);
266 dput(dentry);
267 }
268 audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
269 inode->i_ino);
270 break;
271 }
259 case LSM_AUDIT_DATA_TASK: 272 case LSM_AUDIT_DATA_TASK:
260 tsk = a->u.tsk; 273 tsk = a->u.tsk;
261 if (tsk && tsk->pid) { 274 if (tsk && tsk->pid) {
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 3d2715fd35ea..fcb89cb0f223 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -526,7 +526,7 @@ int avc_audit(u32 ssid, u32 tsid,
526 * during retry. However this is logically just as if the operation 526 * during retry. However this is logically just as if the operation
527 * happened a little later. 527 * happened a little later.
528 */ 528 */
529 if ((a->type == LSM_AUDIT_DATA_FS) && 529 if ((a->type == LSM_AUDIT_DATA_INODE) &&
530 (flags & IPERM_FLAG_RCU)) 530 (flags & IPERM_FLAG_RCU))
531 return -ECHILD; 531 return -ECHILD;
532 532
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8fb248843009..a0d38459d650 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -990,6 +990,7 @@ static void selinux_write_opts(struct seq_file *m,
990 continue; 990 continue;
991 default: 991 default:
992 BUG(); 992 BUG();
993 return;
993 }; 994 };
994 /* we need a comma before each option */ 995 /* we need a comma before each option */
995 seq_putc(m, ','); 996 seq_putc(m, ',');
@@ -1443,6 +1444,7 @@ static int task_has_capability(struct task_struct *tsk,
1443 printk(KERN_ERR 1444 printk(KERN_ERR
1444 "SELinux: out of range capability %d\n", cap); 1445 "SELinux: out of range capability %d\n", cap);
1445 BUG(); 1446 BUG();
1447 return -EINVAL;
1446 } 1448 }
1447 1449
1448 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); 1450 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
@@ -1487,8 +1489,8 @@ static int inode_has_perm(const struct cred *cred,
1487 1489
1488 if (!adp) { 1490 if (!adp) {
1489 adp = &ad; 1491 adp = &ad;
1490 COMMON_AUDIT_DATA_INIT(&ad, FS); 1492 COMMON_AUDIT_DATA_INIT(&ad, INODE);
1491 ad.u.fs.inode = inode; 1493 ad.u.inode = inode;
1492 } 1494 }
1493 1495
1494 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags); 1496 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
@@ -1498,16 +1500,29 @@ static int inode_has_perm(const struct cred *cred,
1498 the dentry to help the auditing code to more easily generate the 1500 the dentry to help the auditing code to more easily generate the
1499 pathname if needed. */ 1501 pathname if needed. */
1500static inline int dentry_has_perm(const struct cred *cred, 1502static inline int dentry_has_perm(const struct cred *cred,
1501 struct vfsmount *mnt,
1502 struct dentry *dentry, 1503 struct dentry *dentry,
1503 u32 av) 1504 u32 av)
1504{ 1505{
1505 struct inode *inode = dentry->d_inode; 1506 struct inode *inode = dentry->d_inode;
1506 struct common_audit_data ad; 1507 struct common_audit_data ad;
1507 1508
1508 COMMON_AUDIT_DATA_INIT(&ad, FS); 1509 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
1509 ad.u.fs.path.mnt = mnt; 1510 ad.u.dentry = dentry;
1510 ad.u.fs.path.dentry = dentry; 1511 return inode_has_perm(cred, inode, av, &ad, 0);
1512}
1513
1514/* Same as inode_has_perm, but pass explicit audit data containing
1515 the path to help the auditing code to more easily generate the
1516 pathname if needed. */
1517static inline int path_has_perm(const struct cred *cred,
1518 struct path *path,
1519 u32 av)
1520{
1521 struct inode *inode = path->dentry->d_inode;
1522 struct common_audit_data ad;
1523
1524 COMMON_AUDIT_DATA_INIT(&ad, PATH);
1525 ad.u.path = *path;
1511 return inode_has_perm(cred, inode, av, &ad, 0); 1526 return inode_has_perm(cred, inode, av, &ad, 0);
1512} 1527}
1513 1528
@@ -1529,8 +1544,8 @@ static int file_has_perm(const struct cred *cred,
1529 u32 sid = cred_sid(cred); 1544 u32 sid = cred_sid(cred);
1530 int rc; 1545 int rc;
1531 1546
1532 COMMON_AUDIT_DATA_INIT(&ad, FS); 1547 COMMON_AUDIT_DATA_INIT(&ad, PATH);
1533 ad.u.fs.path = file->f_path; 1548 ad.u.path = file->f_path;
1534 1549
1535 if (sid != fsec->sid) { 1550 if (sid != fsec->sid) {
1536 rc = avc_has_perm(sid, fsec->sid, 1551 rc = avc_has_perm(sid, fsec->sid,
@@ -1568,8 +1583,8 @@ static int may_create(struct inode *dir,
1568 sid = tsec->sid; 1583 sid = tsec->sid;
1569 newsid = tsec->create_sid; 1584 newsid = tsec->create_sid;
1570 1585
1571 COMMON_AUDIT_DATA_INIT(&ad, FS); 1586 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
1572 ad.u.fs.path.dentry = dentry; 1587 ad.u.dentry = dentry;
1573 1588
1574 rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR, 1589 rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
1575 DIR__ADD_NAME | DIR__SEARCH, 1590 DIR__ADD_NAME | DIR__SEARCH,
@@ -1621,8 +1636,8 @@ static int may_link(struct inode *dir,
1621 dsec = dir->i_security; 1636 dsec = dir->i_security;
1622 isec = dentry->d_inode->i_security; 1637 isec = dentry->d_inode->i_security;
1623 1638
1624 COMMON_AUDIT_DATA_INIT(&ad, FS); 1639 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
1625 ad.u.fs.path.dentry = dentry; 1640 ad.u.dentry = dentry;
1626 1641
1627 av = DIR__SEARCH; 1642 av = DIR__SEARCH;
1628 av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME); 1643 av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
@@ -1667,9 +1682,9 @@ static inline int may_rename(struct inode *old_dir,
1667 old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 1682 old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
1668 new_dsec = new_dir->i_security; 1683 new_dsec = new_dir->i_security;
1669 1684
1670 COMMON_AUDIT_DATA_INIT(&ad, FS); 1685 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
1671 1686
1672 ad.u.fs.path.dentry = old_dentry; 1687 ad.u.dentry = old_dentry;
1673 rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR, 1688 rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
1674 DIR__REMOVE_NAME | DIR__SEARCH, &ad); 1689 DIR__REMOVE_NAME | DIR__SEARCH, &ad);
1675 if (rc) 1690 if (rc)
@@ -1685,7 +1700,7 @@ static inline int may_rename(struct inode *old_dir,
1685 return rc; 1700 return rc;
1686 } 1701 }
1687 1702
1688 ad.u.fs.path.dentry = new_dentry; 1703 ad.u.dentry = new_dentry;
1689 av = DIR__ADD_NAME | DIR__SEARCH; 1704 av = DIR__ADD_NAME | DIR__SEARCH;
1690 if (new_dentry->d_inode) 1705 if (new_dentry->d_inode)
1691 av |= DIR__REMOVE_NAME; 1706 av |= DIR__REMOVE_NAME;
@@ -1895,7 +1910,7 @@ static int selinux_quota_on(struct dentry *dentry)
1895{ 1910{
1896 const struct cred *cred = current_cred(); 1911 const struct cred *cred = current_cred();
1897 1912
1898 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); 1913 return dentry_has_perm(cred, dentry, FILE__QUOTAON);
1899} 1914}
1900 1915
1901static int selinux_syslog(int type) 1916static int selinux_syslog(int type)
@@ -1992,8 +2007,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
1992 return rc; 2007 return rc;
1993 } 2008 }
1994 2009
1995 COMMON_AUDIT_DATA_INIT(&ad, FS); 2010 COMMON_AUDIT_DATA_INIT(&ad, PATH);
1996 ad.u.fs.path = bprm->file->f_path; 2011 ad.u.path = bprm->file->f_path;
1997 2012
1998 if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) 2013 if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
1999 new_tsec->sid = old_tsec->sid; 2014 new_tsec->sid = old_tsec->sid;
@@ -2121,7 +2136,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2121 2136
2122 /* Revalidate access to inherited open files. */ 2137 /* Revalidate access to inherited open files. */
2123 2138
2124 COMMON_AUDIT_DATA_INIT(&ad, FS); 2139 COMMON_AUDIT_DATA_INIT(&ad, INODE);
2125 2140
2126 spin_lock(&files->file_lock); 2141 spin_lock(&files->file_lock);
2127 for (;;) { 2142 for (;;) {
@@ -2469,8 +2484,8 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
2469 if (flags & MS_KERNMOUNT) 2484 if (flags & MS_KERNMOUNT)
2470 return 0; 2485 return 0;
2471 2486
2472 COMMON_AUDIT_DATA_INIT(&ad, FS); 2487 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
2473 ad.u.fs.path.dentry = sb->s_root; 2488 ad.u.dentry = sb->s_root;
2474 return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad); 2489 return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
2475} 2490}
2476 2491
@@ -2479,8 +2494,8 @@ static int selinux_sb_statfs(struct dentry *dentry)
2479 const struct cred *cred = current_cred(); 2494 const struct cred *cred = current_cred();
2480 struct common_audit_data ad; 2495 struct common_audit_data ad;
2481 2496
2482 COMMON_AUDIT_DATA_INIT(&ad, FS); 2497 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
2483 ad.u.fs.path.dentry = dentry->d_sb->s_root; 2498 ad.u.dentry = dentry->d_sb->s_root;
2484 return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad); 2499 return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
2485} 2500}
2486 2501
@@ -2496,8 +2511,7 @@ static int selinux_mount(char *dev_name,
2496 return superblock_has_perm(cred, path->mnt->mnt_sb, 2511 return superblock_has_perm(cred, path->mnt->mnt_sb,
2497 FILESYSTEM__REMOUNT, NULL); 2512 FILESYSTEM__REMOUNT, NULL);
2498 else 2513 else
2499 return dentry_has_perm(cred, path->mnt, path->dentry, 2514 return path_has_perm(cred, path, FILE__MOUNTON);
2500 FILE__MOUNTON);
2501} 2515}
2502 2516
2503static int selinux_umount(struct vfsmount *mnt, int flags) 2517static int selinux_umount(struct vfsmount *mnt, int flags)
@@ -2630,14 +2644,14 @@ static int selinux_inode_readlink(struct dentry *dentry)
2630{ 2644{
2631 const struct cred *cred = current_cred(); 2645 const struct cred *cred = current_cred();
2632 2646
2633 return dentry_has_perm(cred, NULL, dentry, FILE__READ); 2647 return dentry_has_perm(cred, dentry, FILE__READ);
2634} 2648}
2635 2649
2636static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata) 2650static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata)
2637{ 2651{
2638 const struct cred *cred = current_cred(); 2652 const struct cred *cred = current_cred();
2639 2653
2640 return dentry_has_perm(cred, NULL, dentry, FILE__READ); 2654 return dentry_has_perm(cred, dentry, FILE__READ);
2641} 2655}
2642 2656
2643static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags) 2657static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags)
@@ -2654,8 +2668,8 @@ static int selinux_inode_permission(struct inode *inode, int mask, unsigned flag
2654 if (!mask) 2668 if (!mask)
2655 return 0; 2669 return 0;
2656 2670
2657 COMMON_AUDIT_DATA_INIT(&ad, FS); 2671 COMMON_AUDIT_DATA_INIT(&ad, INODE);
2658 ad.u.fs.inode = inode; 2672 ad.u.inode = inode;
2659 2673
2660 if (from_access) 2674 if (from_access)
2661 ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS; 2675 ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS;
@@ -2680,16 +2694,20 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
2680 2694
2681 if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | 2695 if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
2682 ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) 2696 ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
2683 return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR); 2697 return dentry_has_perm(cred, dentry, FILE__SETATTR);
2684 2698
2685 return dentry_has_perm(cred, NULL, dentry, FILE__WRITE); 2699 return dentry_has_perm(cred, dentry, FILE__WRITE);
2686} 2700}
2687 2701
2688static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) 2702static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
2689{ 2703{
2690 const struct cred *cred = current_cred(); 2704 const struct cred *cred = current_cred();
2705 struct path path;
2706
2707 path.dentry = dentry;
2708 path.mnt = mnt;
2691 2709
2692 return dentry_has_perm(cred, mnt, dentry, FILE__GETATTR); 2710 return path_has_perm(cred, &path, FILE__GETATTR);
2693} 2711}
2694 2712
2695static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) 2713static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
@@ -2710,7 +2728,7 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
2710 2728
2711 /* Not an attribute we recognize, so just check the 2729 /* Not an attribute we recognize, so just check the
2712 ordinary setattr permission. */ 2730 ordinary setattr permission. */
2713 return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR); 2731 return dentry_has_perm(cred, dentry, FILE__SETATTR);
2714} 2732}
2715 2733
2716static int selinux_inode_setxattr(struct dentry *dentry, const char *name, 2734static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
@@ -2733,8 +2751,8 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
2733 if (!inode_owner_or_capable(inode)) 2751 if (!inode_owner_or_capable(inode))
2734 return -EPERM; 2752 return -EPERM;
2735 2753
2736 COMMON_AUDIT_DATA_INIT(&ad, FS); 2754 COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
2737 ad.u.fs.path.dentry = dentry; 2755 ad.u.dentry = dentry;
2738 2756
2739 rc = avc_has_perm(sid, isec->sid, isec->sclass, 2757 rc = avc_has_perm(sid, isec->sid, isec->sclass,
2740 FILE__RELABELFROM, &ad); 2758 FILE__RELABELFROM, &ad);
@@ -2797,14 +2815,14 @@ static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
2797{ 2815{
2798 const struct cred *cred = current_cred(); 2816 const struct cred *cred = current_cred();
2799 2817
2800 return dentry_has_perm(cred, NULL, dentry, FILE__GETATTR); 2818 return dentry_has_perm(cred, dentry, FILE__GETATTR);
2801} 2819}
2802 2820
2803static int selinux_inode_listxattr(struct dentry *dentry) 2821static int selinux_inode_listxattr(struct dentry *dentry)
2804{ 2822{
2805 const struct cred *cred = current_cred(); 2823 const struct cred *cred = current_cred();
2806 2824
2807 return dentry_has_perm(cred, NULL, dentry, FILE__GETATTR); 2825 return dentry_has_perm(cred, dentry, FILE__GETATTR);
2808} 2826}
2809 2827
2810static int selinux_inode_removexattr(struct dentry *dentry, const char *name) 2828static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 348eb00cb668..3ba4feba048a 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -30,13 +30,14 @@
30#define POLICYDB_VERSION_PERMISSIVE 23 30#define POLICYDB_VERSION_PERMISSIVE 23
31#define POLICYDB_VERSION_BOUNDARY 24 31#define POLICYDB_VERSION_BOUNDARY 24
32#define POLICYDB_VERSION_FILENAME_TRANS 25 32#define POLICYDB_VERSION_FILENAME_TRANS 25
33#define POLICYDB_VERSION_ROLETRANS 26
33 34
34/* Range of policy versions we understand*/ 35/* Range of policy versions we understand*/
35#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE 36#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
36#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX 37#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
37#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE 38#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
38#else 39#else
39#define POLICYDB_VERSION_MAX POLICYDB_VERSION_FILENAME_TRANS 40#define POLICYDB_VERSION_MAX POLICYDB_VERSION_ROLETRANS
40#endif 41#endif
41 42
42/* Mask for just the mount related flags */ 43/* Mask for just the mount related flags */
@@ -85,7 +86,7 @@ extern int selinux_policycap_openperm;
85int security_mls_enabled(void); 86int security_mls_enabled(void);
86 87
87int security_load_policy(void *data, size_t len); 88int security_load_policy(void *data, size_t len);
88int security_read_policy(void **data, ssize_t *len); 89int security_read_policy(void **data, size_t *len);
89size_t security_policydb_len(void); 90size_t security_policydb_len(void);
90 91
91int security_policycap_supported(unsigned int req_cap); 92int security_policycap_supported(unsigned int req_cap);
@@ -111,8 +112,8 @@ void security_compute_av_user(u32 ssid, u32 tsid,
111int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, 112int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
112 const struct qstr *qstr, u32 *out_sid); 113 const struct qstr *qstr, u32 *out_sid);
113 114
114int security_transition_sid_user(u32 ssid, u32 tsid, 115int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
115 u16 tclass, u32 *out_sid); 116 const char *objname, u32 *out_sid);
116 117
117int security_member_sid(u32 ssid, u32 tsid, 118int security_member_sid(u32 ssid, u32 tsid,
118 u16 tclass, u32 *out_sid); 119 u16 tclass, u32 *out_sid);
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 65ebfe954f85..3618251d0fdb 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -141,6 +141,7 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
141 break; 141 break;
142 default: 142 default:
143 BUG(); 143 BUG();
144 return NULL;
144 } 145 }
145 146
146 list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list) 147 list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 2d3373b2e256..77d44138864f 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -28,6 +28,7 @@
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/audit.h> 29#include <linux/audit.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/kobject.h>
31 32
32/* selinuxfs pseudo filesystem for exporting the security policy API. 33/* selinuxfs pseudo filesystem for exporting the security policy API.
33 Based on the proc code and the fs/nfsd/nfsctl.c code. */ 34 Based on the proc code and the fs/nfsd/nfsctl.c code. */
@@ -753,11 +754,13 @@ out:
753static ssize_t sel_write_create(struct file *file, char *buf, size_t size) 754static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
754{ 755{
755 char *scon = NULL, *tcon = NULL; 756 char *scon = NULL, *tcon = NULL;
757 char *namebuf = NULL, *objname = NULL;
756 u32 ssid, tsid, newsid; 758 u32 ssid, tsid, newsid;
757 u16 tclass; 759 u16 tclass;
758 ssize_t length; 760 ssize_t length;
759 char *newcon = NULL; 761 char *newcon = NULL;
760 u32 len; 762 u32 len;
763 int nargs;
761 764
762 length = task_has_security(current, SECURITY__COMPUTE_CREATE); 765 length = task_has_security(current, SECURITY__COMPUTE_CREATE);
763 if (length) 766 if (length)
@@ -773,9 +776,17 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
773 if (!tcon) 776 if (!tcon)
774 goto out; 777 goto out;
775 778
779 length = -ENOMEM;
780 namebuf = kzalloc(size + 1, GFP_KERNEL);
781 if (!namebuf)
782 goto out;
783
776 length = -EINVAL; 784 length = -EINVAL;
777 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 785 nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
786 if (nargs < 3 || nargs > 4)
778 goto out; 787 goto out;
788 if (nargs == 4)
789 objname = namebuf;
779 790
780 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 791 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
781 if (length) 792 if (length)
@@ -785,7 +796,8 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
785 if (length) 796 if (length)
786 goto out; 797 goto out;
787 798
788 length = security_transition_sid_user(ssid, tsid, tclass, &newsid); 799 length = security_transition_sid_user(ssid, tsid, tclass,
800 objname, &newsid);
789 if (length) 801 if (length)
790 goto out; 802 goto out;
791 803
@@ -804,6 +816,7 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
804 length = len; 816 length = len;
805out: 817out:
806 kfree(newcon); 818 kfree(newcon);
819 kfree(namebuf);
807 kfree(tcon); 820 kfree(tcon);
808 kfree(scon); 821 kfree(scon);
809 return length; 822 return length;
@@ -1901,6 +1914,7 @@ static struct file_system_type sel_fs_type = {
1901}; 1914};
1902 1915
1903struct vfsmount *selinuxfs_mount; 1916struct vfsmount *selinuxfs_mount;
1917static struct kobject *selinuxfs_kobj;
1904 1918
1905static int __init init_sel_fs(void) 1919static int __init init_sel_fs(void)
1906{ 1920{
@@ -1908,9 +1922,16 @@ static int __init init_sel_fs(void)
1908 1922
1909 if (!selinux_enabled) 1923 if (!selinux_enabled)
1910 return 0; 1924 return 0;
1925
1926 selinuxfs_kobj = kobject_create_and_add("selinux", fs_kobj);
1927 if (!selinuxfs_kobj)
1928 return -ENOMEM;
1929
1911 err = register_filesystem(&sel_fs_type); 1930 err = register_filesystem(&sel_fs_type);
1912 if (err) 1931 if (err) {
1932 kobject_put(selinuxfs_kobj);
1913 return err; 1933 return err;
1934 }
1914 1935
1915 selinuxfs_mount = kern_mount(&sel_fs_type); 1936 selinuxfs_mount = kern_mount(&sel_fs_type);
1916 if (IS_ERR(selinuxfs_mount)) { 1937 if (IS_ERR(selinuxfs_mount)) {
@@ -1927,6 +1948,7 @@ __initcall(init_sel_fs);
1927#ifdef CONFIG_SECURITY_SELINUX_DISABLE 1948#ifdef CONFIG_SECURITY_SELINUX_DISABLE
1928void exit_sel_fs(void) 1949void exit_sel_fs(void)
1929{ 1950{
1951 kobject_put(selinuxfs_kobj);
1930 unregister_filesystem(&sel_fs_type); 1952 unregister_filesystem(&sel_fs_type);
1931} 1953}
1932#endif 1954#endif
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 7102457661d6..102e9ec1b77a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -128,6 +128,11 @@ static struct policydb_compat_info policydb_compat[] = {
128 .sym_num = SYM_NUM, 128 .sym_num = SYM_NUM,
129 .ocon_num = OCON_NUM, 129 .ocon_num = OCON_NUM,
130 }, 130 },
131 {
132 .version = POLICYDB_VERSION_ROLETRANS,
133 .sym_num = SYM_NUM,
134 .ocon_num = OCON_NUM,
135 },
131}; 136};
132 137
133static struct policydb_compat_info *policydb_lookup_compat(int version) 138static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -179,6 +184,43 @@ out:
179 return rc; 184 return rc;
180} 185}
181 186
187static u32 filenametr_hash(struct hashtab *h, const void *k)
188{
189 const struct filename_trans *ft = k;
190 unsigned long hash;
191 unsigned int byte_num;
192 unsigned char focus;
193
194 hash = ft->stype ^ ft->ttype ^ ft->tclass;
195
196 byte_num = 0;
197 while ((focus = ft->name[byte_num++]))
198 hash = partial_name_hash(focus, hash);
199 return hash & (h->size - 1);
200}
201
202static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
203{
204 const struct filename_trans *ft1 = k1;
205 const struct filename_trans *ft2 = k2;
206 int v;
207
208 v = ft1->stype - ft2->stype;
209 if (v)
210 return v;
211
212 v = ft1->ttype - ft2->ttype;
213 if (v)
214 return v;
215
216 v = ft1->tclass - ft2->tclass;
217 if (v)
218 return v;
219
220 return strcmp(ft1->name, ft2->name);
221
222}
223
182static u32 rangetr_hash(struct hashtab *h, const void *k) 224static u32 rangetr_hash(struct hashtab *h, const void *k)
183{ 225{
184 const struct range_trans *key = k; 226 const struct range_trans *key = k;
@@ -231,15 +273,22 @@ static int policydb_init(struct policydb *p)
231 if (rc) 273 if (rc)
232 goto out; 274 goto out;
233 275
276 p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10));
277 if (!p->filename_trans)
278 goto out;
279
234 p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256); 280 p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
235 if (!p->range_tr) 281 if (!p->range_tr)
236 goto out; 282 goto out;
237 283
284 ebitmap_init(&p->filename_trans_ttypes);
238 ebitmap_init(&p->policycaps); 285 ebitmap_init(&p->policycaps);
239 ebitmap_init(&p->permissive_map); 286 ebitmap_init(&p->permissive_map);
240 287
241 return 0; 288 return 0;
242out: 289out:
290 hashtab_destroy(p->filename_trans);
291 hashtab_destroy(p->range_tr);
243 for (i = 0; i < SYM_NUM; i++) 292 for (i = 0; i < SYM_NUM; i++)
244 hashtab_destroy(p->symtab[i].table); 293 hashtab_destroy(p->symtab[i].table);
245 return rc; 294 return rc;
@@ -417,32 +466,26 @@ static int (*index_f[SYM_NUM]) (void *key, void *datum, void *datap) =
417}; 466};
418 467
419#ifdef DEBUG_HASHES 468#ifdef DEBUG_HASHES
420static void symtab_hash_eval(struct symtab *s) 469static void hash_eval(struct hashtab *h, const char *hash_name)
421{ 470{
422 int i; 471 struct hashtab_info info;
423
424 for (i = 0; i < SYM_NUM; i++) {
425 struct hashtab *h = s[i].table;
426 struct hashtab_info info;
427 472
428 hashtab_stat(h, &info); 473 hashtab_stat(h, &info);
429 printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, " 474 printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, "
430 "longest chain length %d\n", symtab_name[i], h->nel, 475 "longest chain length %d\n", hash_name, h->nel,
431 info.slots_used, h->size, info.max_chain_len); 476 info.slots_used, h->size, info.max_chain_len);
432 }
433} 477}
434 478
435static void rangetr_hash_eval(struct hashtab *h) 479static void symtab_hash_eval(struct symtab *s)
436{ 480{
437 struct hashtab_info info; 481 int i;
438 482
439 hashtab_stat(h, &info); 483 for (i = 0; i < SYM_NUM; i++)
440 printk(KERN_DEBUG "SELinux: rangetr: %d entries and %d/%d buckets used, " 484 hash_eval(s[i].table, symtab_name[i]);
441 "longest chain length %d\n", h->nel,
442 info.slots_used, h->size, info.max_chain_len);
443} 485}
486
444#else 487#else
445static inline void rangetr_hash_eval(struct hashtab *h) 488static inline void hash_eval(struct hashtab *h, char *hash_name)
446{ 489{
447} 490}
448#endif 491#endif
@@ -675,6 +718,16 @@ static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
675 cat_destroy, 718 cat_destroy,
676}; 719};
677 720
721static int filenametr_destroy(void *key, void *datum, void *p)
722{
723 struct filename_trans *ft = key;
724 kfree(ft->name);
725 kfree(key);
726 kfree(datum);
727 cond_resched();
728 return 0;
729}
730
678static int range_tr_destroy(void *key, void *datum, void *p) 731static int range_tr_destroy(void *key, void *datum, void *p)
679{ 732{
680 struct mls_range *rt = datum; 733 struct mls_range *rt = datum;
@@ -709,7 +762,6 @@ void policydb_destroy(struct policydb *p)
709 int i; 762 int i;
710 struct role_allow *ra, *lra = NULL; 763 struct role_allow *ra, *lra = NULL;
711 struct role_trans *tr, *ltr = NULL; 764 struct role_trans *tr, *ltr = NULL;
712 struct filename_trans *ft, *nft;
713 765
714 for (i = 0; i < SYM_NUM; i++) { 766 for (i = 0; i < SYM_NUM; i++) {
715 cond_resched(); 767 cond_resched();
@@ -773,6 +825,9 @@ void policydb_destroy(struct policydb *p)
773 } 825 }
774 kfree(lra); 826 kfree(lra);
775 827
828 hashtab_map(p->filename_trans, filenametr_destroy, NULL);
829 hashtab_destroy(p->filename_trans);
830
776 hashtab_map(p->range_tr, range_tr_destroy, NULL); 831 hashtab_map(p->range_tr, range_tr_destroy, NULL);
777 hashtab_destroy(p->range_tr); 832 hashtab_destroy(p->range_tr);
778 833
@@ -788,14 +843,7 @@ void policydb_destroy(struct policydb *p)
788 flex_array_free(p->type_attr_map_array); 843 flex_array_free(p->type_attr_map_array);
789 } 844 }
790 845
791 ft = p->filename_trans; 846 ebitmap_destroy(&p->filename_trans_ttypes);
792 while (ft) {
793 nft = ft->next;
794 kfree(ft->name);
795 kfree(ft);
796 ft = nft;
797 }
798
799 ebitmap_destroy(&p->policycaps); 847 ebitmap_destroy(&p->policycaps);
800 ebitmap_destroy(&p->permissive_map); 848 ebitmap_destroy(&p->permissive_map);
801 849
@@ -1795,7 +1843,7 @@ static int range_read(struct policydb *p, void *fp)
1795 rt = NULL; 1843 rt = NULL;
1796 r = NULL; 1844 r = NULL;
1797 } 1845 }
1798 rangetr_hash_eval(p->range_tr); 1846 hash_eval(p->range_tr, "rangetr");
1799 rc = 0; 1847 rc = 0;
1800out: 1848out:
1801 kfree(rt); 1849 kfree(rt);
@@ -1805,9 +1853,10 @@ out:
1805 1853
1806static int filename_trans_read(struct policydb *p, void *fp) 1854static int filename_trans_read(struct policydb *p, void *fp)
1807{ 1855{
1808 struct filename_trans *ft, *last; 1856 struct filename_trans *ft;
1809 u32 nel, len; 1857 struct filename_trans_datum *otype;
1810 char *name; 1858 char *name;
1859 u32 nel, len;
1811 __le32 buf[4]; 1860 __le32 buf[4];
1812 int rc, i; 1861 int rc, i;
1813 1862
@@ -1816,25 +1865,23 @@ static int filename_trans_read(struct policydb *p, void *fp)
1816 1865
1817 rc = next_entry(buf, fp, sizeof(u32)); 1866 rc = next_entry(buf, fp, sizeof(u32));
1818 if (rc) 1867 if (rc)
1819 goto out; 1868 return rc;
1820 nel = le32_to_cpu(buf[0]); 1869 nel = le32_to_cpu(buf[0]);
1821 1870
1822 last = p->filename_trans;
1823 while (last && last->next)
1824 last = last->next;
1825
1826 for (i = 0; i < nel; i++) { 1871 for (i = 0; i < nel; i++) {
1872 ft = NULL;
1873 otype = NULL;
1874 name = NULL;
1875
1827 rc = -ENOMEM; 1876 rc = -ENOMEM;
1828 ft = kzalloc(sizeof(*ft), GFP_KERNEL); 1877 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
1829 if (!ft) 1878 if (!ft)
1830 goto out; 1879 goto out;
1831 1880
1832 /* add it to the tail of the list */ 1881 rc = -ENOMEM;
1833 if (!last) 1882 otype = kmalloc(sizeof(*otype), GFP_KERNEL);
1834 p->filename_trans = ft; 1883 if (!otype)
1835 else 1884 goto out;
1836 last->next = ft;
1837 last = ft;
1838 1885
1839 /* length of the path component string */ 1886 /* length of the path component string */
1840 rc = next_entry(buf, fp, sizeof(u32)); 1887 rc = next_entry(buf, fp, sizeof(u32));
@@ -1862,10 +1909,22 @@ static int filename_trans_read(struct policydb *p, void *fp)
1862 ft->stype = le32_to_cpu(buf[0]); 1909 ft->stype = le32_to_cpu(buf[0]);
1863 ft->ttype = le32_to_cpu(buf[1]); 1910 ft->ttype = le32_to_cpu(buf[1]);
1864 ft->tclass = le32_to_cpu(buf[2]); 1911 ft->tclass = le32_to_cpu(buf[2]);
1865 ft->otype = le32_to_cpu(buf[3]); 1912
1913 otype->otype = le32_to_cpu(buf[3]);
1914
1915 rc = ebitmap_set_bit(&p->filename_trans_ttypes, ft->ttype, 1);
1916 if (rc)
1917 goto out;
1918
1919 hashtab_insert(p->filename_trans, ft, otype);
1866 } 1920 }
1867 rc = 0; 1921 hash_eval(p->filename_trans, "filenametr");
1922 return 0;
1868out: 1923out:
1924 kfree(ft);
1925 kfree(name);
1926 kfree(otype);
1927
1869 return rc; 1928 return rc;
1870} 1929}
1871 1930
@@ -2266,6 +2325,11 @@ int policydb_read(struct policydb *p, void *fp)
2266 p->symtab[i].nprim = nprim; 2325 p->symtab[i].nprim = nprim;
2267 } 2326 }
2268 2327
2328 rc = -EINVAL;
2329 p->process_class = string_to_security_class(p, "process");
2330 if (!p->process_class)
2331 goto bad;
2332
2269 rc = avtab_read(&p->te_avtab, fp, p); 2333 rc = avtab_read(&p->te_avtab, fp, p);
2270 if (rc) 2334 if (rc)
2271 goto bad; 2335 goto bad;
@@ -2298,8 +2362,17 @@ int policydb_read(struct policydb *p, void *fp)
2298 tr->role = le32_to_cpu(buf[0]); 2362 tr->role = le32_to_cpu(buf[0]);
2299 tr->type = le32_to_cpu(buf[1]); 2363 tr->type = le32_to_cpu(buf[1]);
2300 tr->new_role = le32_to_cpu(buf[2]); 2364 tr->new_role = le32_to_cpu(buf[2]);
2365 if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
2366 rc = next_entry(buf, fp, sizeof(u32));
2367 if (rc)
2368 goto bad;
2369 tr->tclass = le32_to_cpu(buf[0]);
2370 } else
2371 tr->tclass = p->process_class;
2372
2301 if (!policydb_role_isvalid(p, tr->role) || 2373 if (!policydb_role_isvalid(p, tr->role) ||
2302 !policydb_type_isvalid(p, tr->type) || 2374 !policydb_type_isvalid(p, tr->type) ||
2375 !policydb_class_isvalid(p, tr->tclass) ||
2303 !policydb_role_isvalid(p, tr->new_role)) 2376 !policydb_role_isvalid(p, tr->new_role))
2304 goto bad; 2377 goto bad;
2305 ltr = tr; 2378 ltr = tr;
@@ -2341,11 +2414,6 @@ int policydb_read(struct policydb *p, void *fp)
2341 goto bad; 2414 goto bad;
2342 2415
2343 rc = -EINVAL; 2416 rc = -EINVAL;
2344 p->process_class = string_to_security_class(p, "process");
2345 if (!p->process_class)
2346 goto bad;
2347
2348 rc = -EINVAL;
2349 p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition"); 2417 p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition");
2350 p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition"); 2418 p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition");
2351 if (!p->process_trans_perms) 2419 if (!p->process_trans_perms)
@@ -2517,8 +2585,9 @@ static int cat_write(void *vkey, void *datum, void *ptr)
2517 return 0; 2585 return 0;
2518} 2586}
2519 2587
2520static int role_trans_write(struct role_trans *r, void *fp) 2588static int role_trans_write(struct policydb *p, void *fp)
2521{ 2589{
2590 struct role_trans *r = p->role_tr;
2522 struct role_trans *tr; 2591 struct role_trans *tr;
2523 u32 buf[3]; 2592 u32 buf[3];
2524 size_t nel; 2593 size_t nel;
@@ -2538,6 +2607,12 @@ static int role_trans_write(struct role_trans *r, void *fp)
2538 rc = put_entry(buf, sizeof(u32), 3, fp); 2607 rc = put_entry(buf, sizeof(u32), 3, fp);
2539 if (rc) 2608 if (rc)
2540 return rc; 2609 return rc;
2610 if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
2611 buf[0] = cpu_to_le32(tr->tclass);
2612 rc = put_entry(buf, sizeof(u32), 1, fp);
2613 if (rc)
2614 return rc;
2615 }
2541 } 2616 }
2542 2617
2543 return 0; 2618 return 0;
@@ -3045,7 +3120,7 @@ static int genfs_write(struct policydb *p, void *fp)
3045 return 0; 3120 return 0;
3046} 3121}
3047 3122
3048static int range_count(void *key, void *data, void *ptr) 3123static int hashtab_cnt(void *key, void *data, void *ptr)
3049{ 3124{
3050 int *cnt = ptr; 3125 int *cnt = ptr;
3051 *cnt = *cnt + 1; 3126 *cnt = *cnt + 1;
@@ -3093,7 +3168,7 @@ static int range_write(struct policydb *p, void *fp)
3093 3168
3094 /* count the number of entries in the hashtab */ 3169 /* count the number of entries in the hashtab */
3095 nel = 0; 3170 nel = 0;
3096 rc = hashtab_map(p->range_tr, range_count, &nel); 3171 rc = hashtab_map(p->range_tr, hashtab_cnt, &nel);
3097 if (rc) 3172 if (rc)
3098 return rc; 3173 return rc;
3099 3174
@@ -3110,43 +3185,60 @@ static int range_write(struct policydb *p, void *fp)
3110 return 0; 3185 return 0;
3111} 3186}
3112 3187
3113static int filename_trans_write(struct policydb *p, void *fp) 3188static int filename_write_helper(void *key, void *data, void *ptr)
3114{ 3189{
3115 struct filename_trans *ft;
3116 u32 len, nel = 0;
3117 __le32 buf[4]; 3190 __le32 buf[4];
3191 struct filename_trans *ft = key;
3192 struct filename_trans_datum *otype = data;
3193 void *fp = ptr;
3118 int rc; 3194 int rc;
3195 u32 len;
3119 3196
3120 for (ft = p->filename_trans; ft; ft = ft->next) 3197 len = strlen(ft->name);
3121 nel++; 3198 buf[0] = cpu_to_le32(len);
3122
3123 buf[0] = cpu_to_le32(nel);
3124 rc = put_entry(buf, sizeof(u32), 1, fp); 3199 rc = put_entry(buf, sizeof(u32), 1, fp);
3125 if (rc) 3200 if (rc)
3126 return rc; 3201 return rc;
3127 3202
3128 for (ft = p->filename_trans; ft; ft = ft->next) { 3203 rc = put_entry(ft->name, sizeof(char), len, fp);
3129 len = strlen(ft->name); 3204 if (rc)
3130 buf[0] = cpu_to_le32(len); 3205 return rc;
3131 rc = put_entry(buf, sizeof(u32), 1, fp);
3132 if (rc)
3133 return rc;
3134 3206
3135 rc = put_entry(ft->name, sizeof(char), len, fp); 3207 buf[0] = ft->stype;
3136 if (rc) 3208 buf[1] = ft->ttype;
3137 return rc; 3209 buf[2] = ft->tclass;
3210 buf[3] = otype->otype;
3138 3211
3139 buf[0] = ft->stype; 3212 rc = put_entry(buf, sizeof(u32), 4, fp);
3140 buf[1] = ft->ttype; 3213 if (rc)
3141 buf[2] = ft->tclass; 3214 return rc;
3142 buf[3] = ft->otype;
3143 3215
3144 rc = put_entry(buf, sizeof(u32), 4, fp);
3145 if (rc)
3146 return rc;
3147 }
3148 return 0; 3216 return 0;
3149} 3217}
3218
3219static int filename_trans_write(struct policydb *p, void *fp)
3220{
3221 u32 nel;
3222 __le32 buf[1];
3223 int rc;
3224
3225 nel = 0;
3226 rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
3227 if (rc)
3228 return rc;
3229
3230 buf[0] = cpu_to_le32(nel);
3231 rc = put_entry(buf, sizeof(u32), 1, fp);
3232 if (rc)
3233 return rc;
3234
3235 rc = hashtab_map(p->filename_trans, filename_write_helper, fp);
3236 if (rc)
3237 return rc;
3238
3239 return 0;
3240}
3241
3150/* 3242/*
3151 * Write the configuration data in a policy database 3243 * Write the configuration data in a policy database
3152 * structure to a policy database binary representation 3244 * structure to a policy database binary representation
@@ -3249,7 +3341,7 @@ int policydb_write(struct policydb *p, void *fp)
3249 if (rc) 3341 if (rc)
3250 return rc; 3342 return rc;
3251 3343
3252 rc = role_trans_write(p->role_tr, fp); 3344 rc = role_trans_write(p, fp);
3253 if (rc) 3345 if (rc)
3254 return rc; 3346 return rc;
3255 3347
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 732ea4a68682..b846c0387180 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -72,17 +72,20 @@ struct role_datum {
72 72
73struct role_trans { 73struct role_trans {
74 u32 role; /* current role */ 74 u32 role; /* current role */
75 u32 type; /* program executable type */ 75 u32 type; /* program executable type, or new object type */
76 u32 tclass; /* process class, or new object class */
76 u32 new_role; /* new role */ 77 u32 new_role; /* new role */
77 struct role_trans *next; 78 struct role_trans *next;
78}; 79};
79 80
80struct filename_trans { 81struct filename_trans {
81 struct filename_trans *next;
82 u32 stype; /* current process */ 82 u32 stype; /* current process */
83 u32 ttype; /* parent dir context */ 83 u32 ttype; /* parent dir context */
84 u16 tclass; /* class of new object */ 84 u16 tclass; /* class of new object */
85 const char *name; /* last path component */ 85 const char *name; /* last path component */
86};
87
88struct filename_trans_datum {
86 u32 otype; /* expected of new object */ 89 u32 otype; /* expected of new object */
87}; 90};
88 91
@@ -227,7 +230,10 @@ struct policydb {
227 struct role_trans *role_tr; 230 struct role_trans *role_tr;
228 231
229 /* file transitions with the last path component */ 232 /* file transitions with the last path component */
230 struct filename_trans *filename_trans; 233 /* quickly exclude lookups when parent ttype has no rules */
234 struct ebitmap filename_trans_ttypes;
235 /* actual set of filename_trans rules */
236 struct hashtab *filename_trans;
231 237
232 /* bools indexed by (value - 1) */ 238 /* bools indexed by (value - 1) */
233 struct cond_bool_datum **bool_val_to_struct; 239 struct cond_bool_datum **bool_val_to_struct;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 6ef4af47dac4..c3e4b52699f4 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1359,26 +1359,35 @@ out:
1359} 1359}
1360 1360
1361static void filename_compute_type(struct policydb *p, struct context *newcontext, 1361static void filename_compute_type(struct policydb *p, struct context *newcontext,
1362 u32 scon, u32 tcon, u16 tclass, 1362 u32 stype, u32 ttype, u16 tclass,
1363 const struct qstr *qstr) 1363 const char *objname)
1364{ 1364{
1365 struct filename_trans *ft; 1365 struct filename_trans ft;
1366 for (ft = p->filename_trans; ft; ft = ft->next) { 1366 struct filename_trans_datum *otype;
1367 if (ft->stype == scon && 1367
1368 ft->ttype == tcon && 1368 /*
1369 ft->tclass == tclass && 1369 * Most filename trans rules are going to live in specific directories
1370 !strcmp(ft->name, qstr->name)) { 1370 * like /dev or /var/run. This bitmap will quickly skip rule searches
1371 newcontext->type = ft->otype; 1371 * if the ttype does not contain any rules.
1372 return; 1372 */
1373 } 1373 if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype))
1374 } 1374 return;
1375
1376 ft.stype = stype;
1377 ft.ttype = ttype;
1378 ft.tclass = tclass;
1379 ft.name = objname;
1380
1381 otype = hashtab_search(p->filename_trans, &ft);
1382 if (otype)
1383 newcontext->type = otype->otype;
1375} 1384}
1376 1385
1377static int security_compute_sid(u32 ssid, 1386static int security_compute_sid(u32 ssid,
1378 u32 tsid, 1387 u32 tsid,
1379 u16 orig_tclass, 1388 u16 orig_tclass,
1380 u32 specified, 1389 u32 specified,
1381 const struct qstr *qstr, 1390 const char *objname,
1382 u32 *out_sid, 1391 u32 *out_sid,
1383 bool kern) 1392 bool kern)
1384{ 1393{
@@ -1478,23 +1487,21 @@ static int security_compute_sid(u32 ssid,
1478 newcontext.type = avdatum->data; 1487 newcontext.type = avdatum->data;
1479 } 1488 }
1480 1489
1481 /* if we have a qstr this is a file trans check so check those rules */ 1490 /* if we have a objname this is a file trans check so check those rules */
1482 if (qstr) 1491 if (objname)
1483 filename_compute_type(&policydb, &newcontext, scontext->type, 1492 filename_compute_type(&policydb, &newcontext, scontext->type,
1484 tcontext->type, tclass, qstr); 1493 tcontext->type, tclass, objname);
1485 1494
1486 /* Check for class-specific changes. */ 1495 /* Check for class-specific changes. */
1487 if (tclass == policydb.process_class) { 1496 if (specified & AVTAB_TRANSITION) {
1488 if (specified & AVTAB_TRANSITION) { 1497 /* Look for a role transition rule. */
1489 /* Look for a role transition rule. */ 1498 for (roletr = policydb.role_tr; roletr; roletr = roletr->next) {
1490 for (roletr = policydb.role_tr; roletr; 1499 if ((roletr->role == scontext->role) &&
1491 roletr = roletr->next) { 1500 (roletr->type == tcontext->type) &&
1492 if (roletr->role == scontext->role && 1501 (roletr->tclass == tclass)) {
1493 roletr->type == tcontext->type) { 1502 /* Use the role transition rule. */
1494 /* Use the role transition rule. */ 1503 newcontext.role = roletr->new_role;
1495 newcontext.role = roletr->new_role; 1504 break;
1496 break;
1497 }
1498 } 1505 }
1499 } 1506 }
1500 } 1507 }
@@ -1541,13 +1548,14 @@ int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
1541 const struct qstr *qstr, u32 *out_sid) 1548 const struct qstr *qstr, u32 *out_sid)
1542{ 1549{
1543 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, 1550 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
1544 qstr, out_sid, true); 1551 qstr ? qstr->name : NULL, out_sid, true);
1545} 1552}
1546 1553
1547int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid) 1554int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
1555 const char *objname, u32 *out_sid)
1548{ 1556{
1549 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, 1557 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
1550 NULL, out_sid, false); 1558 objname, out_sid, false);
1551} 1559}
1552 1560
1553/** 1561/**
@@ -3190,7 +3198,7 @@ out:
3190 * @len: length of data in bytes 3198 * @len: length of data in bytes
3191 * 3199 *
3192 */ 3200 */
3193int security_read_policy(void **data, ssize_t *len) 3201int security_read_policy(void **data, size_t *len)
3194{ 3202{
3195 int rc; 3203 int rc;
3196 struct policy_file fp; 3204 struct policy_file fp;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index b449cfdad21c..2b6c6a516123 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -316,22 +316,17 @@ static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
316static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a, 316static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
317 struct dentry *d) 317 struct dentry *d)
318{ 318{
319 a->a.u.fs.path.dentry = d; 319 a->a.u.dentry = d;
320}
321static inline void smk_ad_setfield_u_fs_path_mnt(struct smk_audit_info *a,
322 struct vfsmount *m)
323{
324 a->a.u.fs.path.mnt = m;
325} 320}
326static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a, 321static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
327 struct inode *i) 322 struct inode *i)
328{ 323{
329 a->a.u.fs.inode = i; 324 a->a.u.inode = i;
330} 325}
331static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a, 326static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
332 struct path p) 327 struct path p)
333{ 328{
334 a->a.u.fs.path = p; 329 a->a.u.path = p;
335} 330}
336static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a, 331static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
337 struct sock *sk) 332 struct sock *sk)
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 400a5d5cde61..9831a39c11f6 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -383,7 +383,7 @@ static int smack_sb_statfs(struct dentry *dentry)
383 int rc; 383 int rc;
384 struct smk_audit_info ad; 384 struct smk_audit_info ad;
385 385
386 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 386 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
387 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 387 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
388 388
389 rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad); 389 rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad);
@@ -407,7 +407,7 @@ static int smack_sb_mount(char *dev_name, struct path *path,
407 struct superblock_smack *sbp = path->mnt->mnt_sb->s_security; 407 struct superblock_smack *sbp = path->mnt->mnt_sb->s_security;
408 struct smk_audit_info ad; 408 struct smk_audit_info ad;
409 409
410 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 410 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
411 smk_ad_setfield_u_fs_path(&ad, *path); 411 smk_ad_setfield_u_fs_path(&ad, *path);
412 412
413 return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); 413 return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
@@ -425,10 +425,13 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
425{ 425{
426 struct superblock_smack *sbp; 426 struct superblock_smack *sbp;
427 struct smk_audit_info ad; 427 struct smk_audit_info ad;
428 struct path path;
428 429
429 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 430 path.dentry = mnt->mnt_root;
430 smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root); 431 path.mnt = mnt;
431 smk_ad_setfield_u_fs_path_mnt(&ad, mnt); 432
433 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
434 smk_ad_setfield_u_fs_path(&ad, path);
432 435
433 sbp = mnt->mnt_sb->s_security; 436 sbp = mnt->mnt_sb->s_security;
434 return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); 437 return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
@@ -563,7 +566,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
563 struct smk_audit_info ad; 566 struct smk_audit_info ad;
564 int rc; 567 int rc;
565 568
566 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 569 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
567 smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); 570 smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
568 571
569 isp = smk_of_inode(old_dentry->d_inode); 572 isp = smk_of_inode(old_dentry->d_inode);
@@ -592,7 +595,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
592 struct smk_audit_info ad; 595 struct smk_audit_info ad;
593 int rc; 596 int rc;
594 597
595 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 598 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
596 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 599 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
597 600
598 /* 601 /*
@@ -623,7 +626,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
623 struct smk_audit_info ad; 626 struct smk_audit_info ad;
624 int rc; 627 int rc;
625 628
626 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 629 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
627 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 630 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
628 631
629 /* 632 /*
@@ -663,7 +666,7 @@ static int smack_inode_rename(struct inode *old_inode,
663 char *isp; 666 char *isp;
664 struct smk_audit_info ad; 667 struct smk_audit_info ad;
665 668
666 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 669 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
667 smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); 670 smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
668 671
669 isp = smk_of_inode(old_dentry->d_inode); 672 isp = smk_of_inode(old_dentry->d_inode);
@@ -700,7 +703,7 @@ static int smack_inode_permission(struct inode *inode, int mask, unsigned flags)
700 /* May be droppable after audit */ 703 /* May be droppable after audit */
701 if (flags & IPERM_FLAG_RCU) 704 if (flags & IPERM_FLAG_RCU)
702 return -ECHILD; 705 return -ECHILD;
703 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 706 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
704 smk_ad_setfield_u_fs_inode(&ad, inode); 707 smk_ad_setfield_u_fs_inode(&ad, inode);
705 return smk_curacc(smk_of_inode(inode), mask, &ad); 708 return smk_curacc(smk_of_inode(inode), mask, &ad);
706} 709}
@@ -720,7 +723,7 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
720 */ 723 */
721 if (iattr->ia_valid & ATTR_FORCE) 724 if (iattr->ia_valid & ATTR_FORCE)
722 return 0; 725 return 0;
723 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 726 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
724 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 727 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
725 728
726 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); 729 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
@@ -736,10 +739,13 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
736static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) 739static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
737{ 740{
738 struct smk_audit_info ad; 741 struct smk_audit_info ad;
742 struct path path;
739 743
740 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 744 path.dentry = dentry;
741 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 745 path.mnt = mnt;
742 smk_ad_setfield_u_fs_path_mnt(&ad, mnt); 746
747 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
748 smk_ad_setfield_u_fs_path(&ad, path);
743 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); 749 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
744} 750}
745 751
@@ -784,7 +790,7 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
784 } else 790 } else
785 rc = cap_inode_setxattr(dentry, name, value, size, flags); 791 rc = cap_inode_setxattr(dentry, name, value, size, flags);
786 792
787 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 793 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
788 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 794 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
789 795
790 if (rc == 0) 796 if (rc == 0)
@@ -845,7 +851,7 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
845{ 851{
846 struct smk_audit_info ad; 852 struct smk_audit_info ad;
847 853
848 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 854 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
849 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 855 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
850 856
851 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); 857 return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
@@ -877,7 +883,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
877 } else 883 } else
878 rc = cap_inode_removexattr(dentry, name); 884 rc = cap_inode_removexattr(dentry, name);
879 885
880 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 886 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
881 smk_ad_setfield_u_fs_path_dentry(&ad, dentry); 887 smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
882 if (rc == 0) 888 if (rc == 0)
883 rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); 889 rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
@@ -1047,7 +1053,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
1047 int rc = 0; 1053 int rc = 0;
1048 struct smk_audit_info ad; 1054 struct smk_audit_info ad;
1049 1055
1050 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 1056 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
1051 smk_ad_setfield_u_fs_path(&ad, file->f_path); 1057 smk_ad_setfield_u_fs_path(&ad, file->f_path);
1052 1058
1053 if (_IOC_DIR(cmd) & _IOC_WRITE) 1059 if (_IOC_DIR(cmd) & _IOC_WRITE)
@@ -1070,8 +1076,8 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
1070{ 1076{
1071 struct smk_audit_info ad; 1077 struct smk_audit_info ad;
1072 1078
1073 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 1079 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
1074 smk_ad_setfield_u_fs_path_dentry(&ad, file->f_path.dentry); 1080 smk_ad_setfield_u_fs_path(&ad, file->f_path);
1075 return smk_curacc(file->f_security, MAY_WRITE, &ad); 1081 return smk_curacc(file->f_security, MAY_WRITE, &ad);
1076} 1082}
1077 1083
@@ -1089,7 +1095,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
1089 struct smk_audit_info ad; 1095 struct smk_audit_info ad;
1090 int rc; 1096 int rc;
1091 1097
1092 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 1098 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
1093 smk_ad_setfield_u_fs_path(&ad, file->f_path); 1099 smk_ad_setfield_u_fs_path(&ad, file->f_path);
1094 1100
1095 switch (cmd) { 1101 switch (cmd) {
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 7556315c1978..a0d09e56874b 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -108,10 +108,9 @@ static bool tomoyo_flush(struct tomoyo_io_buffer *head)
108 head->read_user_buf += len; 108 head->read_user_buf += len;
109 w += len; 109 w += len;
110 } 110 }
111 if (*w) { 111 head->r.w[0] = w;
112 head->r.w[0] = w; 112 if (*w)
113 return false; 113 return false;
114 }
115 /* Add '\0' for query. */ 114 /* Add '\0' for query. */
116 if (head->poll) { 115 if (head->poll) {
117 if (!head->read_user_buf_avail || 116 if (!head->read_user_buf_avail ||
@@ -459,8 +458,16 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
459 if (profile == &tomoyo_default_profile) 458 if (profile == &tomoyo_default_profile)
460 return -EINVAL; 459 return -EINVAL;
461 if (!strcmp(data, "COMMENT")) { 460 if (!strcmp(data, "COMMENT")) {
462 const struct tomoyo_path_info *old_comment = profile->comment; 461 static DEFINE_SPINLOCK(lock);
463 profile->comment = tomoyo_get_name(cp); 462 const struct tomoyo_path_info *new_comment
463 = tomoyo_get_name(cp);
464 const struct tomoyo_path_info *old_comment;
465 if (!new_comment)
466 return -ENOMEM;
467 spin_lock(&lock);
468 old_comment = profile->comment;
469 profile->comment = new_comment;
470 spin_unlock(&lock);
464 tomoyo_put_name(old_comment); 471 tomoyo_put_name(old_comment);
465 return 0; 472 return 0;
466 } 473 }
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index cb09f1fce910..d64e8ecb6fb3 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -1011,7 +1011,6 @@ int tomoyo_path_perm(const u8 operation, struct path *path)
1011 break; 1011 break;
1012 case TOMOYO_TYPE_RMDIR: 1012 case TOMOYO_TYPE_RMDIR:
1013 case TOMOYO_TYPE_CHROOT: 1013 case TOMOYO_TYPE_CHROOT:
1014 case TOMOYO_TYPE_UMOUNT:
1015 tomoyo_add_slash(&buf); 1014 tomoyo_add_slash(&buf);
1016 break; 1015 break;
1017 } 1016 }
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
index 297612669c74..42a7b1ba8cbf 100644
--- a/security/tomoyo/memory.c
+++ b/security/tomoyo/memory.c
@@ -75,6 +75,7 @@ void *tomoyo_commit_ok(void *data, const unsigned int size)
75 memset(data, 0, size); 75 memset(data, 0, size);
76 return ptr; 76 return ptr;
77 } 77 }
78 kfree(ptr);
78 return NULL; 79 return NULL;
79} 80}
80 81
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index 82bf8c2390bc..162a864dba24 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -143,6 +143,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
143 goto out; 143 goto out;
144 } 144 }
145 requested_dev_name = tomoyo_realpath_from_path(&path); 145 requested_dev_name = tomoyo_realpath_from_path(&path);
146 path_put(&path);
146 if (!requested_dev_name) { 147 if (!requested_dev_name) {
147 error = -ENOENT; 148 error = -ENOENT;
148 goto out; 149 goto out;
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 9bfc1ee8222d..6d5393204d95 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -390,7 +390,7 @@ bool tomoyo_correct_domain(const unsigned char *domainname)
390 if (!cp) 390 if (!cp)
391 break; 391 break;
392 if (*domainname != '/' || 392 if (*domainname != '/' ||
393 !tomoyo_correct_word2(domainname, cp - domainname - 1)) 393 !tomoyo_correct_word2(domainname, cp - domainname))
394 goto out; 394 goto out;
395 domainname = cp + 1; 395 domainname = cp + 1;
396 } 396 }
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 8cc4733698a0..ce33be0e4e98 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -278,7 +278,7 @@ static int pdacf_resume(struct pcmcia_device *link)
278/* 278/*
279 * Module entry points 279 * Module entry points
280 */ 280 */
281static struct pcmcia_device_id snd_pdacf_ids[] = { 281static const struct pcmcia_device_id snd_pdacf_ids[] = {
282 /* this is too general PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), */ 282 /* this is too general PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), */
283 PCMCIA_DEVICE_PROD_ID12("Core Sound","PDAudio-CF",0x396d19d2,0x71717b49), 283 PCMCIA_DEVICE_PROD_ID12("Core Sound","PDAudio-CF",0x396d19d2,0x71717b49),
284 PCMCIA_DEVICE_NULL 284 PCMCIA_DEVICE_NULL
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 80000d631f88..d9ef21d8fa73 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -350,7 +350,7 @@ static void vxpocket_detach(struct pcmcia_device *link)
350 * Module entry points 350 * Module entry points
351 */ 351 */
352 352
353static struct pcmcia_device_id vxp_ids[] = { 353static const struct pcmcia_device_id vxp_ids[] = {
354 PCMCIA_DEVICE_MANF_CARD(0x01f1, 0x0100), 354 PCMCIA_DEVICE_MANF_CARD(0x01f1, 0x0100),
355 PCMCIA_DEVICE_NULL 355 PCMCIA_DEVICE_NULL
356}; 356};
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index 7f06884ecd41..af0f22fb1ef7 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -22,6 +22,7 @@
22 22
23static unsigned int offset; 23static unsigned int offset;
24static unsigned int ino = 721; 24static unsigned int ino = 721;
25static time_t default_mtime;
25 26
26struct file_handler { 27struct file_handler {
27 const char *type; 28 const char *type;
@@ -102,7 +103,6 @@ static int cpio_mkslink(const char *name, const char *target,
102 unsigned int mode, uid_t uid, gid_t gid) 103 unsigned int mode, uid_t uid, gid_t gid)
103{ 104{
104 char s[256]; 105 char s[256];
105 time_t mtime = time(NULL);
106 106
107 if (name[0] == '/') 107 if (name[0] == '/')
108 name++; 108 name++;
@@ -114,7 +114,7 @@ static int cpio_mkslink(const char *name, const char *target,
114 (long) uid, /* uid */ 114 (long) uid, /* uid */
115 (long) gid, /* gid */ 115 (long) gid, /* gid */
116 1, /* nlink */ 116 1, /* nlink */
117 (long) mtime, /* mtime */ 117 (long) default_mtime, /* mtime */
118 (unsigned)strlen(target)+1, /* filesize */ 118 (unsigned)strlen(target)+1, /* filesize */
119 3, /* major */ 119 3, /* major */
120 1, /* minor */ 120 1, /* minor */
@@ -152,7 +152,6 @@ static int cpio_mkgeneric(const char *name, unsigned int mode,
152 uid_t uid, gid_t gid) 152 uid_t uid, gid_t gid)
153{ 153{
154 char s[256]; 154 char s[256];
155 time_t mtime = time(NULL);
156 155
157 if (name[0] == '/') 156 if (name[0] == '/')
158 name++; 157 name++;
@@ -164,7 +163,7 @@ static int cpio_mkgeneric(const char *name, unsigned int mode,
164 (long) uid, /* uid */ 163 (long) uid, /* uid */
165 (long) gid, /* gid */ 164 (long) gid, /* gid */
166 2, /* nlink */ 165 2, /* nlink */
167 (long) mtime, /* mtime */ 166 (long) default_mtime, /* mtime */
168 0, /* filesize */ 167 0, /* filesize */
169 3, /* major */ 168 3, /* major */
170 1, /* minor */ 169 1, /* minor */
@@ -242,7 +241,6 @@ static int cpio_mknod(const char *name, unsigned int mode,
242 unsigned int maj, unsigned int min) 241 unsigned int maj, unsigned int min)
243{ 242{
244 char s[256]; 243 char s[256];
245 time_t mtime = time(NULL);
246 244
247 if (dev_type == 'b') 245 if (dev_type == 'b')
248 mode |= S_IFBLK; 246 mode |= S_IFBLK;
@@ -259,7 +257,7 @@ static int cpio_mknod(const char *name, unsigned int mode,
259 (long) uid, /* uid */ 257 (long) uid, /* uid */
260 (long) gid, /* gid */ 258 (long) gid, /* gid */
261 1, /* nlink */ 259 1, /* nlink */
262 (long) mtime, /* mtime */ 260 (long) default_mtime, /* mtime */
263 0, /* filesize */ 261 0, /* filesize */
264 3, /* major */ 262 3, /* major */
265 1, /* minor */ 263 1, /* minor */
@@ -460,7 +458,7 @@ static int cpio_mkfile_line(const char *line)
460static void usage(const char *prog) 458static void usage(const char *prog)
461{ 459{
462 fprintf(stderr, "Usage:\n" 460 fprintf(stderr, "Usage:\n"
463 "\t%s <cpio_list>\n" 461 "\t%s [-t <timestamp>] <cpio_list>\n"
464 "\n" 462 "\n"
465 "<cpio_list> is a file containing newline separated entries that\n" 463 "<cpio_list> is a file containing newline separated entries that\n"
466 "describe the files to be included in the initramfs archive:\n" 464 "describe the files to be included in the initramfs archive:\n"
@@ -491,7 +489,11 @@ static void usage(const char *prog)
491 "nod /dev/console 0600 0 0 c 5 1\n" 489 "nod /dev/console 0600 0 0 c 5 1\n"
492 "dir /root 0700 0 0\n" 490 "dir /root 0700 0 0\n"
493 "dir /sbin 0755 0 0\n" 491 "dir /sbin 0755 0 0\n"
494 "file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n", 492 "file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n"
493 "\n"
494 "<timestamp> is time in seconds since Epoch that will be used\n"
495 "as mtime for symlinks, special files and directories. The default\n"
496 "is to use the current time for these entries.\n",
495 prog); 497 prog);
496} 498}
497 499
@@ -529,17 +531,42 @@ int main (int argc, char *argv[])
529 char *args, *type; 531 char *args, *type;
530 int ec = 0; 532 int ec = 0;
531 int line_nr = 0; 533 int line_nr = 0;
534 const char *filename;
535
536 default_mtime = time(NULL);
537 while (1) {
538 int opt = getopt(argc, argv, "t:h");
539 char *invalid;
532 540
533 if (2 != argc) { 541 if (opt == -1)
542 break;
543 switch (opt) {
544 case 't':
545 default_mtime = strtol(optarg, &invalid, 10);
546 if (!*optarg || *invalid) {
547 fprintf(stderr, "Invalid timestamp: %s\n",
548 optarg);
549 usage(argv[0]);
550 exit(1);
551 }
552 break;
553 case 'h':
554 case '?':
555 usage(argv[0]);
556 exit(opt == 'h' ? 0 : 1);
557 }
558 }
559
560 if (argc - optind != 1) {
534 usage(argv[0]); 561 usage(argv[0]);
535 exit(1); 562 exit(1);
536 } 563 }
537 564 filename = argv[optind];
538 if (!strcmp(argv[1], "-")) 565 if (!strcmp(filename, "-"))
539 cpio_list = stdin; 566 cpio_list = stdin;
540 else if (! (cpio_list = fopen(argv[1], "r"))) { 567 else if (!(cpio_list = fopen(filename, "r"))) {
541 fprintf(stderr, "ERROR: unable to open '%s': %s\n\n", 568 fprintf(stderr, "ERROR: unable to open '%s': %s\n\n",
542 argv[1], strerror(errno)); 569 filename, strerror(errno));
543 usage(argv[0]); 570 usage(argv[0]);
544 exit(1); 571 exit(1);
545 } 572 }