aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/Intel-IOMMU.txt115
-rw-r--r--Documentation/feature-removal-schedule.txt24
-rw-r--r--Documentation/filesystems/Exporting115
-rw-r--r--Documentation/i386/boot.txt34
-rw-r--r--Documentation/kbuild/makefiles.txt10
-rw-r--r--Documentation/kernel-parameters.txt17
-rw-r--r--Documentation/memory-hotplug.txt58
-rw-r--r--Documentation/powerpc/mpc52xx-device-tree-bindings.txt4
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile7
-rw-r--r--arch/alpha/kernel/pci_iommu.c3
-rw-r--r--arch/arm/common/dmabounce.c3
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c58
-rw-r--r--arch/avr32/mach-at32ap/at32ap7000.c344
-rw-r--r--arch/avr32/mach-at32ap/extint.c2
-rw-r--r--arch/avr32/mach-at32ap/pm.h4
-rw-r--r--arch/avr32/mach-at32ap/time-tc.c2
-rw-r--r--arch/blackfin/Makefile4
-rw-r--r--arch/blackfin/kernel/dma-mapping.c3
-rw-r--r--arch/blackfin/kernel/setup.c4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/kernel/efi.c4
-rw-r--r--arch/ia64/kernel/setup.c14
-rw-r--r--arch/ia64/sn/pci/pci_dma.c2
-rw-r--r--arch/m68k/kernel/dma.c2
-rw-r--r--arch/m68knommu/Kconfig13
-rw-r--r--arch/m68knommu/Makefile3
-rw-r--r--arch/m68knommu/defconfig325
-rw-r--r--arch/m68knommu/kernel/setup.c27
-rw-r--r--arch/m68knommu/kernel/signal.c10
-rw-r--r--arch/m68knommu/kernel/time.c22
-rw-r--r--arch/m68knommu/platform/5206/config.c9
-rw-r--r--arch/m68knommu/platform/5206e/config.c10
-rw-r--r--arch/m68knommu/platform/520x/config.c8
-rw-r--r--arch/m68knommu/platform/523x/config.c8
-rw-r--r--arch/m68knommu/platform/5249/config.c10
-rw-r--r--arch/m68knommu/platform/5272/config.c10
-rw-r--r--arch/m68knommu/platform/527x/config.c8
-rw-r--r--arch/m68knommu/platform/528x/config.c8
-rw-r--r--arch/m68knommu/platform/5307/config.c10
-rw-r--r--arch/m68knommu/platform/5307/entry.S5
-rw-r--r--arch/m68knommu/platform/5307/pit.c15
-rw-r--r--arch/m68knommu/platform/5307/timers.c19
-rw-r--r--arch/m68knommu/platform/532x/config.c10
-rw-r--r--arch/m68knommu/platform/5407/config.c10
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/Kconfig.debug12
-rw-r--r--arch/mips/Makefile18
-rw-r--r--arch/mips/cobalt/Makefile2
-rw-r--r--arch/mips/cobalt/setup.c24
-rw-r--r--arch/mips/cobalt/time.c35
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c144
-rw-r--r--arch/mips/kernel/cevt-r4k.c4
-rw-r--r--arch/mips/kernel/time.c4
-rw-r--r--arch/mips/mips-boards/generic/time.c13
-rw-r--r--arch/mips/mm/dma-default.c17
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c134
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c75
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c4
-rw-r--r--arch/mips/sibyte/bcm1480/time.c117
-rw-r--r--arch/mips/sibyte/sb1250/irq.c35
-rw-r--r--arch/mips/sibyte/sb1250/smp.c4
-rw-r--r--arch/mips/sibyte/sb1250/time.c88
-rw-r--r--arch/parisc/kernel/pci-dma.c1
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/boot/dts/bamboo.dts10
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts26
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts26
-rw-r--r--arch/powerpc/boot/dts/sequoia.dts14
-rw-r--r--arch/powerpc/boot/dts/walnut.dts12
-rw-r--r--arch/powerpc/boot/treeboot-walnut.c6
-rw-r--r--arch/powerpc/configs/bamboo_defconfig114
-rw-r--r--arch/powerpc/configs/ebony_defconfig115
-rw-r--r--arch/powerpc/configs/walnut_defconfig94
-rw-r--r--arch/powerpc/kernel/dma_64.c3
-rw-r--r--arch/powerpc/kernel/ibmebus.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/platforms/40x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c4
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c71
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c5
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c9
-rw-r--r--arch/s390/defconfig131
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/smp.c65
-rw-r--r--arch/s390/lib/uaccess_pt.c90
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/init.c32
-rw-r--r--arch/s390/mm/pgtable.c94
-rw-r--r--arch/s390/mm/vmem.c53
-rw-r--r--arch/sparc/kernel/ioport.c17
-rw-r--r--arch/sparc/mm/io-unit.c2
-rw-r--r--arch/sparc/mm/iommu.c8
-rw-r--r--arch/sparc/mm/sun4c.c2
-rw-r--r--arch/sparc64/Kconfig4
-rw-r--r--arch/sparc64/Makefile4
-rw-r--r--arch/sparc64/defconfig91
-rw-r--r--arch/sparc64/kernel/Makefile9
-rw-r--r--arch/sparc64/kernel/iommu.c7
-rw-r--r--arch/sparc64/kernel/iommu_common.c18
-rw-r--r--arch/sparc64/kernel/irq.c85
-rw-r--r--arch/sparc64/kernel/ldc.c2
-rw-r--r--arch/sparc64/kernel/pci.c3
-rw-r--r--arch/sparc64/kernel/pci_msi.c14
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c7
-rw-r--r--arch/sparc64/math-emu/Makefile2
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/x86/boot/compressed/head_32.S15
-rw-r--r--arch/x86/boot/compressed/misc_32.c3
-rw-r--r--arch/x86/boot/header.S7
-rw-r--r--arch/x86/kernel/asm-offsets_32.c7
-rw-r--r--arch/x86/kernel/e820_32.c18
-rw-r--r--arch/x86/kernel/e820_64.c22
-rw-r--r--arch/x86/kernel/efi_32.c4
-rw-r--r--arch/x86/kernel/head_32.S44
-rw-r--r--arch/x86/kernel/io_apic_64.c59
-rw-r--r--arch/x86/kernel/pci-calgary_64.c10
-rw-r--r--arch/x86/kernel/pci-dma_64.c5
-rw-r--r--arch/x86/kernel/pci-gart_64.c4
-rw-r--r--arch/x86/kernel/pci-nommu_64.c4
-rw-r--r--arch/x86/kernel/setup_32.c4
-rw-r--r--arch/x86/kernel/setup_64.c9
-rw-r--r--arch/x86/mm/pageattr_64.c6
-rw-r--r--arch/x86_64/Kconfig32
-rw-r--r--block/ll_rw_blk.c20
-rw-r--r--crypto/digest.c2
-rw-r--r--crypto/hmac.c3
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/scatterwalk.h6
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/xcbc.c2
-rw-r--r--drivers/ata/libata-core.c10
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/base/memory.c9
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/cryptoloop.c12
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/sx8.c1
-rw-r--r--drivers/block/ub.c11
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/bluetooth/Kconfig35
-rw-r--r--drivers/bluetooth/Makefile4
-rw-r--r--drivers/bluetooth/bluecard_cs.c5
-rw-r--r--drivers/bluetooth/bpa10x.c624
-rw-r--r--drivers/bluetooth/bt3c_cs.c5
-rw-r--r--drivers/bluetooth/btsdio.c406
-rw-r--r--drivers/bluetooth/btuart_cs.c5
-rw-r--r--drivers/bluetooth/btusb.c564
-rw-r--r--drivers/bluetooth/dtl1_cs.c5
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c8
-rw-r--r--drivers/bluetooth/hci_ll.c531
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/char/cyclades.c2
-rw-r--r--drivers/firewire/fw-ohci.c13
-rw-r--r--drivers/ide/cris/ide-cris.c4
-rw-r--r--drivers/ide/ide-probe.c5
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ide/mips/au1xxx-ide.c6
-rw-r--r--drivers/ieee1394/dma.c2
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c24
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c8
-rw-r--r--drivers/input/keyboard/bf54x-keys.c1
-rw-r--r--drivers/input/mouse/appletouch.c25
-rw-r--r--drivers/input/serio/i8042.c4
-rw-r--r--drivers/input/serio/i8042.h22
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c36
-rw-r--r--drivers/kvm/kvm_main.c37
-rw-r--r--drivers/kvm/lapic.c38
-rw-r--r--drivers/kvm/mmu.c3
-rw-r--r--drivers/kvm/vmx.c16
-rw-r--r--drivers/kvm/x86_emulate.c77
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/raid5.c17
-rw-r--r--drivers/media/common/ir-keymaps.c70
-rw-r--r--drivers/media/common/saa7146_core.c3
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c42
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c2
-rw-r--r--drivers/media/radio/miropcm20-radio.c1
-rw-r--r--drivers/media/radio/radio-gemtek.c1
-rw-r--r--drivers/media/video/arv.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c3
-rw-r--r--drivers/media/video/bw-qcam.c1
-rw-r--r--drivers/media/video/c-qcam.c1
-rw-r--r--drivers/media/video/cpia.c5
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c5
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c86
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c57
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c3
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c133
-rw-r--r--drivers/media/video/cx88/cx88-video.c1
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c16
-rw-r--r--drivers/media/video/cx88/cx88.h24
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c3
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c1
-rw-r--r--drivers/media/video/ir-kbd-i2c.c1
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c116
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c160
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.h1
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c92
-rw-r--r--drivers/media/video/meye.c1
-rw-r--r--drivers/media/video/ov511.c1
-rw-r--r--drivers/media/video/planb.c1
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h11
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c3
-rw-r--r--drivers/media/video/pwc/pwc-if.c1
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c111
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c44
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c12
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c29
-rw-r--r--drivers/media/video/saa7134/saa7134-tvaudio.c32
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c28
-rw-r--r--drivers/media/video/saa7134/saa7134.h7
-rw-r--r--drivers/media/video/se401.c1
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c1
-rw-r--r--drivers/media/video/stradis.c1
-rw-r--r--drivers/media/video/stv680.c1
-rw-r--r--drivers/media/video/tuner-core.c2
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c1
-rw-r--r--drivers/media/video/usbvideo/vicam.c1
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c3
-rw-r--r--drivers/media/video/v4l2-common.c2
-rw-r--r--drivers/media/video/videobuf-core.c2
-rw-r--r--drivers/media/video/videobuf-dma-sg.c8
-rw-r--r--drivers/media/video/videocodec.c4
-rw-r--r--drivers/media/video/videodev.c42
-rw-r--r--drivers/media/video/vivi.c1
-rw-r--r--drivers/media/video/w9966.c1
-rw-r--r--drivers/media/video/w9968cf.c1
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c1
-rw-r--r--drivers/media/video/zoran_card.c10
-rw-r--r--drivers/media/video/zoran_driver.c2
-rw-r--r--drivers/mmc/card/queue.c15
-rw-r--r--drivers/mmc/host/at91_mci.c8
-rw-r--r--drivers/mmc/host/au1xmmc.c11
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c8
-rw-r--r--drivers/mmc/host/omap.c4
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c8
-rw-r--r--drivers/mmc/host/wbsd.c6
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c146
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/nandsim.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c14
-rw-r--r--drivers/mtd/onenand/onenand_sim.c50
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/fec.c24
-rw-r--r--drivers/net/mlx4/icm.c14
-rw-r--r--drivers/net/niu.c34
-rw-r--r--drivers/net/ppp_mppe.c6
-rw-r--r--drivers/net/tg3.c95
-rw-r--r--drivers/net/tg3.h11
-rw-r--r--drivers/parisc/ccio-dma.c1
-rw-r--r--drivers/parisc/sba_iommu.c1
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dmar.c329
-rw-r--r--drivers/pci/intel-iommu.c2271
-rw-r--r--drivers/pci/intel-iommu.h325
-rw-r--r--drivers/pci/iova.c394
-rw-r--r--drivers/pci/iova.h63
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pci/search.c34
-rw-r--r--drivers/power/apm_power.c141
-rw-r--r--drivers/s390/char/raw3270.c26
-rw-r--r--drivers/s390/char/tape_class.c19
-rw-r--r--drivers/s390/char/tape_class.h4
-rw-r--r--drivers/s390/char/vmlogrdr.c15
-rw-r--r--drivers/s390/cio/chp.c12
-rw-r--r--drivers/s390/cio/css.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_def.h4
-rw-r--r--drivers/s390/scsi/zfcp_erp.c10
-rw-r--r--drivers/scsi/3w-9xxx.c4
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/NCR53C9x.c4
-rw-r--r--drivers/scsi/NCR53c406a.c6
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1542.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/atari_NCR5380.c6
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/fd_mcs.c6
-rw-r--r--drivers/scsi/fdomain.c7
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ide-scsi.c12
-rw-r--r--drivers/scsi/imm.c8
-rw-r--r--drivers/scsi/in2000.c4
-rw-r--r--drivers/scsi/ipr.c19
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c15
-rw-r--r--drivers/scsi/megaraid.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c12
-rw-r--r--drivers/scsi/oktagon_esp.c6
-rw-r--r--drivers/scsi/osst.c32
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c6
-rw-r--r--drivers/scsi/ppa.c7
-rw-r--r--drivers/scsi/ps3rom.c6
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_lib.c13
-rw-r--r--drivers/scsi/seagate.c8
-rw-r--r--drivers/scsi/sg.c30
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sun3_NCR5380.c3
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/tmscsim.c5
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/serial/mcf.c653
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/image/microtek.c5
-rw-r--r--drivers/usb/misc/usbtest.c4
-rw-r--r--drivers/usb/storage/protocol.c2
-rw-r--r--drivers/watchdog/mpc5200_wdt.c3
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/ecryptfs/crypto.c16
-rw-r--r--fs/ecryptfs/keystore.c3
-rw-r--r--fs/efs/namei.c36
-rw-r--r--fs/efs/super.c5
-rw-r--r--fs/exportfs/expfs.c360
-rw-r--r--fs/ext2/dir.c44
-rw-r--r--fs/ext2/super.c36
-rw-r--r--fs/ext3/super.c37
-rw-r--r--fs/ext4/super.c37
-rw-r--r--fs/fat/inode.c26
-rw-r--r--fs/gfs2/ops_export.c83
-rw-r--r--fs/gfs2/ops_fstype.h2
-rw-r--r--fs/isofs/export.c69
-rw-r--r--fs/isofs/isofs.h2
-rw-r--r--fs/jffs2/acl.c101
-rw-r--r--fs/jffs2/acl.h12
-rw-r--r--fs/jffs2/dir.c35
-rw-r--r--fs/jffs2/file.c11
-rw-r--r--fs/jffs2/fs.c21
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jffs2/write.c8
-rw-r--r--fs/jfs/jfs_inode.h7
-rw-r--r--fs/jfs/namei.c35
-rw-r--r--fs/jfs/super.c7
-rw-r--r--fs/libfs.c88
-rw-r--r--fs/nfsd/export.c8
-rw-r--r--fs/nfsd/nfs4recover.c8
-rw-r--r--fs/nfsd/nfsfh.c67
-rw-r--r--fs/ntfs/namei.c77
-rw-r--r--fs/ntfs/ntfs.h2
-rw-r--r--fs/ocfs2/export.c67
-rw-r--r--fs/ocfs2/export.h2
-rw-r--r--fs/proc/base.c46
-rw-r--r--fs/reiserfs/inode.c62
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c206
-rw-r--r--fs/xfs/linux-2.6/xfs_export.h50
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h2
-rw-r--r--include/acpi/actbl1.h27
-rw-r--r--include/asm-alpha/scatterlist.h5
-rw-r--r--include/asm-arm/dma-mapping.h10
-rw-r--r--include/asm-arm/scatterlist.h5
-rw-r--r--include/asm-avr32/arch-at32ap/board.h23
-rw-r--r--include/asm-avr32/dma-mapping.h7
-rw-r--r--include/asm-avr32/scatterlist.h5
-rw-r--r--include/asm-blackfin/scatterlist.h6
-rw-r--r--include/asm-cris/scatterlist.h5
-rw-r--r--include/asm-frv/scatterlist.h13
-rw-r--r--include/asm-h8300/scatterlist.h5
-rw-r--r--include/asm-ia64/scatterlist.h5
-rw-r--r--include/asm-m32r/scatterlist.h5
-rw-r--r--include/asm-m68k/scatterlist.h5
-rw-r--r--include/asm-m68knommu/module.h12
-rw-r--r--include/asm-m68knommu/scatterlist.h6
-rw-r--r--include/asm-m68knommu/uaccess.h4
-rw-r--r--include/asm-mips/gt64120.h5
-rw-r--r--include/asm-mips/i8253.h6
-rw-r--r--include/asm-mips/scatterlist.h5
-rw-r--r--include/asm-mips/sibyte/sb1250.h2
-rw-r--r--include/asm-parisc/scatterlist.h7
-rw-r--r--include/asm-powerpc/dma-mapping.h10
-rw-r--r--include/asm-powerpc/mpc52xx.h9
-rw-r--r--include/asm-powerpc/scatterlist.h5
-rw-r--r--include/asm-ppc/system.h1
-rw-r--r--include/asm-s390/cpu.h25
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/page.h4
-rw-r--r--include/asm-s390/pgalloc.h250
-rw-r--r--include/asm-s390/pgtable.h429
-rw-r--r--include/asm-s390/processor.h20
-rw-r--r--include/asm-s390/scatterlist.h5
-rw-r--r--include/asm-s390/tlb.h129
-rw-r--r--include/asm-s390/tlbflush.h152
-rw-r--r--include/asm-sh/dma-mapping.h12
-rw-r--r--include/asm-sh/scatterlist.h5
-rw-r--r--include/asm-sh64/dma-mapping.h12
-rw-r--r--include/asm-sh64/scatterlist.h5
-rw-r--r--include/asm-sparc/scatterlist.h5
-rw-r--r--include/asm-sparc64/scatterlist.h5
-rw-r--r--include/asm-v850/scatterlist.h5
-rw-r--r--include/asm-x86/bootparam.h9
-rw-r--r--include/asm-x86/cacheflush.h1
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping_32.h4
-rw-r--r--include/asm-x86/scatterlist_32.h5
-rw-r--r--include/asm-x86/scatterlist_64.h5
-rw-r--r--include/asm-xtensa/scatterlist.h5
-rw-r--r--include/linux/capability.h6
-rw-r--r--include/linux/dmar.h86
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/efs_fs.h6
-rw-r--r--include/linux/exportfs.h141
-rw-r--r--include/linux/ext2_fs.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/i8042.h35
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/linkage.h6
-rw-r--r--include/linux/memory.h31
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/reiserfs_fs.h12
-rw-r--r--include/linux/scatterlist.h202
-rw-r--r--include/linux/skbuff.h15
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/videodev.h42
-rw-r--r--include/linux/videodev2.h92
-rw-r--r--include/media/v4l2-dev.h5
-rw-r--r--include/net/bluetooth/hci.h604
-rw-r--r--include/net/bluetooth/hci_core.h13
-rw-r--r--include/net/bluetooth/l2cap.h37
-rw-r--r--include/sound/version.h2
-rw-r--r--kernel/auditsc.c4
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/reed_solomon/decode_rs.c5
-rw-r--r--lib/reed_solomon/reed_solomon.c2
-rw-r--r--lib/swiotlb.c2
-rw-r--r--mm/memory_hotplug.c48
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/shmem.c37
-rw-r--r--mm/slub.c118
-rw-r--r--net/bluetooth/hci_conn.c82
-rw-r--r--net/bluetooth/hci_core.c70
-rw-r--r--net/bluetooth/hci_event.c1651
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hci_sysfs.c37
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap.c306
-rw-r--r--net/bluetooth/rfcomm/core.c60
-rw-r--r--net/bluetooth/rfcomm/tty.c25
-rw-r--r--net/bluetooth/sco.c12
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/dccp/diag.c1
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c13
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c8
-rw-r--r--net/ipv4/inet_diag.c7
-rw-r--r--net/ipv4/tcp_diag.c1
-rw-r--r--net/ipv6/ah6.c1
-rw-r--r--net/ipv6/esp6.c1
-rw-r--r--net/mac80211/wep.c8
-rw-r--r--net/sched/sch_teql.c6
-rw-r--r--net/sctp/auth.c3
-rw-r--r--net/sctp/sm_make_chunk.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c10
-rw-r--r--net/sunrpc/xdr.c2
-rw-r--r--net/xfrm/xfrm_algo.c5
-rw-r--r--security/commoncap.c23
-rw-r--r--sound/core/control.c5
-rw-r--r--sound/i2c/other/tea575x-tuner.c1
-rw-r--r--sound/pci/bt87x.c4
-rw-r--r--sound/pci/hda/hda_codec.c9
-rw-r--r--sound/pci/hda/hda_local.h18
-rw-r--r--sound/pci/hda/patch_analog.c5
-rw-r--r--sound/pci/hda/patch_cmedia.c3
-rw-r--r--sound/pci/hda/patch_conexant.c35
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/sh/aica.c31
-rw-r--r--sound/sparc/cs4231.c59
-rw-r--r--sound/usb/usbquirks.h9
524 files changed, 13825 insertions, 6104 deletions
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt
new file mode 100644
index 000000000000..c2321903aa09
--- /dev/null
+++ b/Documentation/Intel-IOMMU.txt
@@ -0,0 +1,115 @@
1Linux IOMMU Support
2===================
3
4The architecture spec can be obtained from the below location.
5
6http://www.intel.com/technology/virtualization/
7
8This guide gives a quick cheat sheet for some basic understanding.
9
10Some Keywords
11
12DMAR - DMA remapping
13DRHD - DMA Engine Reporting Structure
14RMRR - Reserved memory Region Reporting Structure
15ZLR - Zero length reads from PCI devices
16IOVA - IO Virtual address.
17
18Basic stuff
19-----------
20
21ACPI enumerates and lists the different DMA engines in the platform, and
22device scope relationships between PCI devices and which DMA engine controls
23them.
24
25What is RMRR?
26-------------
27
28There are some devices the BIOS controls, for e.g USB devices to perform
29PS2 emulation. The regions of memory used for these devices are marked
30reserved in the e820 map. When we turn on DMA translation, DMA to those
31regions will fail. Hence BIOS uses RMRR to specify these regions along with
32devices that need to access these regions. OS is expected to setup
33unity mappings for these regions for these devices to access these regions.
34
35How is IOVA generated?
36---------------------
37
38Well behaved drivers call pci_map_*() calls before sending command to device
39that needs to perform DMA. Once DMA is completed and mapping is no longer
40required, device performs a pci_unmap_*() calls to unmap the region.
41
42The Intel IOMMU driver allocates a virtual address per domain. Each PCIE
43device has its own domain (hence protection). Devices under p2p bridges
44share the virtual address with all devices under the p2p bridge due to
45transaction id aliasing for p2p bridges.
46
47IOVA generation is pretty generic. We used the same technique as vmalloc()
48but these are not global address spaces, but separate for each domain.
49Different DMA engines may support different number of domains.
50
51We also allocate gaurd pages with each mapping, so we can attempt to catch
52any overflow that might happen.
53
54
55Graphics Problems?
56------------------
57If you encounter issues with graphics devices, you can try adding
58option intel_iommu=igfx_off to turn off the integrated graphics engine.
59
60If it happens to be a PCI device included in the INCLUDE_ALL Engine,
61then try enabling CONFIG_DMAR_GFX_WA to setup a 1-1 map. We hear
62graphics drivers may be in process of using DMA api's in the near
63future and at that time this option can be yanked out.
64
65Some exceptions to IOVA
66-----------------------
67Interrupt ranges are not address translated, (0xfee00000 - 0xfeefffff).
68The same is true for peer to peer transactions. Hence we reserve the
69address from PCI MMIO ranges so they are not allocated for IOVA addresses.
70
71
72Fault reporting
73---------------
74When errors are reported, the DMA engine signals via an interrupt. The fault
75reason and device that caused it with fault reason is printed on console.
76
77See below for sample.
78
79
80Boot Message Sample
81-------------------
82
83Something like this gets printed indicating presence of DMAR tables
84in ACPI.
85
86ACPI: DMAR (v001 A M I OEMDMAR 0x00000001 MSFT 0x00000097) @ 0x000000007f5b5ef0
87
88When DMAR is being processed and initialized by ACPI, prints DMAR locations
89and any RMRR's processed.
90
91ACPI DMAR:Host address width 36
92ACPI DMAR:DRHD (flags: 0x00000000)base: 0x00000000fed90000
93ACPI DMAR:DRHD (flags: 0x00000000)base: 0x00000000fed91000
94ACPI DMAR:DRHD (flags: 0x00000001)base: 0x00000000fed93000
95ACPI DMAR:RMRR base: 0x00000000000ed000 end: 0x00000000000effff
96ACPI DMAR:RMRR base: 0x000000007f600000 end: 0x000000007fffffff
97
98When DMAR is enabled for use, you will notice..
99
100PCI-DMA: Using DMAR IOMMU
101
102Fault reporting
103---------------
104
105DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
106DMAR:[fault reason 05] PTE Write access is not set
107DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
108DMAR:[fault reason 05] PTE Write access is not set
109
110TBD
111----
112
113- For compatibility testing, could use unity map domain for all devices, just
114 provide a 1-1 for all useful memory under a single domain for all devices.
115- API for paravirt ops for abstracting functionlity for VMM folks.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 6b0f963f5379..6bb9be54ab76 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -14,18 +14,6 @@ Who: Jiri Slaby <jirislaby@gmail.com>
14 14
15--------------------------- 15---------------------------
16 16
17What: V4L2 VIDIOC_G_MPEGCOMP and VIDIOC_S_MPEGCOMP
18When: October 2007
19Why: Broken attempt to set MPEG compression parameters. These ioctls are
20 not able to implement the wide variety of parameters that can be set
21 by hardware MPEG encoders. A new MPEG control mechanism was created
22 in kernel 2.6.18 that replaces these ioctls. See the V4L2 specification
23 (section 1.9: Extended controls) for more information on this topic.
24Who: Hans Verkuil <hverkuil@xs4all.nl> and
25 Mauro Carvalho Chehab <mchehab@infradead.org>
26
27---------------------------
28
29What: dev->power.power_state 17What: dev->power.power_state
30When: July 2007 18When: July 2007
31Why: Broken design for runtime control over driver power states, confusing 19Why: Broken design for runtime control over driver power states, confusing
@@ -49,10 +37,10 @@ Who: David Miller <davem@davemloft.net>
49--------------------------- 37---------------------------
50 38
51What: Video4Linux API 1 ioctls and video_decoder.h from Video devices. 39What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
52When: December 2006 40When: December 2008
53Files: include/linux/video_decoder.h 41Files: include/linux/video_decoder.h include/linux/videodev.h
54Check: include/linux/video_decoder.h 42Check: include/linux/video_decoder.h include/linux/videodev.h
55Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6 43Why: V4L1 AP1 was replaced by V4L2 API during migration from 2.4 to 2.6
56 series. The old API have lots of drawbacks and don't provide enough 44 series. The old API have lots of drawbacks and don't provide enough
57 means to work with all video and audio standards. The newer API is 45 means to work with all video and audio standards. The newer API is
58 already available on the main drivers and should be used instead. 46 already available on the main drivers and should be used instead.
@@ -61,7 +49,9 @@ Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
61 Decoder iocts are using internally to allow video drivers to 49 Decoder iocts are using internally to allow video drivers to
62 communicate with video decoders. This should also be improved to allow 50 communicate with video decoders. This should also be improved to allow
63 V4L2 calls being translated into compatible internal ioctls. 51 V4L2 calls being translated into compatible internal ioctls.
64Who: Mauro Carvalho Chehab <mchehab@brturbo.com.br> 52 Compatibility ioctls will be provided, for a while, via
53 v4l1-compat module.
54Who: Mauro Carvalho Chehab <mchehab@infradead.org>
65 55
66--------------------------- 56---------------------------
67 57
diff --git a/Documentation/filesystems/Exporting b/Documentation/filesystems/Exporting
index 31047e0fe14b..87019d2b5981 100644
--- a/Documentation/filesystems/Exporting
+++ b/Documentation/filesystems/Exporting
@@ -2,9 +2,12 @@
2Making Filesystems Exportable 2Making Filesystems Exportable
3============================= 3=============================
4 4
5Most filesystem operations require a dentry (or two) as a starting 5Overview
6--------
7
8All filesystem operations require a dentry (or two) as a starting
6point. Local applications have a reference-counted hold on suitable 9point. Local applications have a reference-counted hold on suitable
7dentrys via open file descriptors or cwd/root. However remote 10dentries via open file descriptors or cwd/root. However remote
8applications that access a filesystem via a remote filesystem protocol 11applications that access a filesystem via a remote filesystem protocol
9such as NFS may not be able to hold such a reference, and so need a 12such as NFS may not be able to hold such a reference, and so need a
10different way to refer to a particular dentry. As the alternative 13different way to refer to a particular dentry. As the alternative
@@ -13,14 +16,14 @@ server-reboot (among other things, though these tend to be the most
13problematic), there is no simple answer like 'filename'. 16problematic), there is no simple answer like 'filename'.
14 17
15The mechanism discussed here allows each filesystem implementation to 18The mechanism discussed here allows each filesystem implementation to
16specify how to generate an opaque (out side of the filesystem) byte 19specify how to generate an opaque (outside of the filesystem) byte
17string for any dentry, and how to find an appropriate dentry for any 20string for any dentry, and how to find an appropriate dentry for any
18given opaque byte string. 21given opaque byte string.
19This byte string will be called a "filehandle fragment" as it 22This byte string will be called a "filehandle fragment" as it
20corresponds to part of an NFS filehandle. 23corresponds to part of an NFS filehandle.
21 24
22A filesystem which supports the mapping between filehandle fragments 25A filesystem which supports the mapping between filehandle fragments
23and dentrys will be termed "exportable". 26and dentries will be termed "exportable".
24 27
25 28
26 29
@@ -89,11 +92,9 @@ For a filesystem to be exportable it must:
89 1/ provide the filehandle fragment routines described below. 92 1/ provide the filehandle fragment routines described below.
90 2/ make sure that d_splice_alias is used rather than d_add 93 2/ make sure that d_splice_alias is used rather than d_add
91 when ->lookup finds an inode for a given parent and name. 94 when ->lookup finds an inode for a given parent and name.
92 Typically the ->lookup routine will end: 95 Typically the ->lookup routine will end with a:
93 if (inode) 96
94 return d_splice(inode, dentry); 97 return d_splice_alias(inode, dentry);
95 d_add(dentry, inode);
96 return NULL;
97 } 98 }
98 99
99 100
@@ -101,67 +102,39 @@ For a filesystem to be exportable it must:
101 A file system implementation declares that instances of the filesystem 102 A file system implementation declares that instances of the filesystem
102are exportable by setting the s_export_op field in the struct 103are exportable by setting the s_export_op field in the struct
103super_block. This field must point to a "struct export_operations" 104super_block. This field must point to a "struct export_operations"
104struct which could potentially be full of NULLs, though normally at 105struct which has the following members:
105least get_parent will be set. 106
106 107 encode_fh (optional)
107 The primary operations are decode_fh and encode_fh. 108 Takes a dentry and creates a filehandle fragment which can later be used
108decode_fh takes a filehandle fragment and tries to find or create a 109 to find or create a dentry for the same object. The default
109dentry for the object referred to by the filehandle. 110 implementation creates a filehandle fragment that encodes a 32bit inode
110encode_fh takes a dentry and creates a filehandle fragment which can 111 and generation number for the inode encoded, and if necessary the
111later be used to find/create a dentry for the same object. 112 same information for the parent.
112 113
113decode_fh will probably make use of "find_exported_dentry". 114 fh_to_dentry (mandatory)
114This function lives in the "exportfs" module which a filesystem does 115 Given a filehandle fragment, this should find the implied object and
115not need unless it is being exported. So rather that calling 116 create a dentry for it (possibly with d_alloc_anon).
116find_exported_dentry directly, each filesystem should call it through 117
117the find_exported_dentry pointer in it's export_operations table. 118 fh_to_parent (optional but strongly recommended)
118This field is set correctly by the exporting agent (e.g. nfsd) when a 119 Given a filehandle fragment, this should find the parent of the
119filesystem is exported, and before any export operations are called. 120 implied object and create a dentry for it (possibly with d_alloc_anon).
120 121 May fail if the filehandle fragment is too small.
121find_exported_dentry needs three support functions from the 122
122filesystem: 123 get_parent (optional but strongly recommended)
123 get_name. When given a parent dentry and a child dentry, this 124 When given a dentry for a directory, this should return a dentry for
124 should find a name in the directory identified by the parent 125 the parent. Quite possibly the parent dentry will have been allocated
125 dentry, which leads to the object identified by the child dentry. 126 by d_alloc_anon. The default get_parent function just returns an error
126 If no get_name function is supplied, a default implementation is 127 so any filehandle lookup that requires finding a parent will fail.
127 provided which uses vfs_readdir to find potential names, and 128 ->lookup("..") is *not* used as a default as it can leave ".." entries
128 matches inode numbers to find the correct match. 129 in the dcache which are too messy to work with.
129 130
130 get_parent. When given a dentry for a directory, this should return 131 get_name (optional)
131 a dentry for the parent. Quite possibly the parent dentry will 132 When given a parent dentry and a child dentry, this should find a name
132 have been allocated by d_alloc_anon. 133 in the directory identified by the parent dentry, which leads to the
133 The default get_parent function just returns an error so any 134 object identified by the child dentry. If no get_name function is
134 filehandle lookup that requires finding a parent will fail. 135 supplied, a default implementation is provided which uses vfs_readdir
135 ->lookup("..") is *not* used as a default as it can leave ".." 136 to find potential names, and matches inode numbers to find the correct
136 entries in the dcache which are too messy to work with. 137 match.
137
138 get_dentry. When given an opaque datum, this should find the
139 implied object and create a dentry for it (possibly with
140 d_alloc_anon).
141 The opaque datum is whatever is passed down by the decode_fh
142 function, and is often simply a fragment of the filehandle
143 fragment.
144 decode_fh passes two datums through find_exported_dentry. One that
145 should be used to identify the target object, and one that can be
146 used to identify the object's parent, should that be necessary.
147 The default get_dentry function assumes that the datum contains an
148 inode number and a generation number, and it attempts to get the
149 inode using "iget" and check it's validity by matching the
150 generation number. A filesystem should only depend on the default
151 if iget can safely be used this way.
152
153If decode_fh and/or encode_fh are left as NULL, then default
154implementations are used. These defaults are suitable for ext2 and
155extremely similar filesystems (like ext3).
156
157The default encode_fh creates a filehandle fragment from the inode
158number and generation number of the target together with the inode
159number and generation number of the parent (if the parent is
160required).
161
162The default decode_fh extract the target and parent datums from the
163filehandle assuming the format used by the default encode_fh and
164passed them to find_exported_dentry.
165 138
166 139
167A filehandle fragment consists of an array of 1 or more 4byte words, 140A filehandle fragment consists of an array of 1 or more 4byte words,
@@ -172,5 +145,3 @@ generated by encode_fh, in which case it will have been padded with
172nuls. Rather, the encode_fh routine should choose a "type" which 145nuls. Rather, the encode_fh routine should choose a "type" which
173indicates the decode_fh how much of the filehandle is valid, and how 146indicates the decode_fh how much of the filehandle is valid, and how
174it should be interpreted. 147it should be interpreted.
175
176
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index 35985b34d5a6..2f75e750e4f5 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -168,6 +168,8 @@ Offset Proto Name Meaning
1680234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not 1680234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
1690235/3 N/A pad2 Unused 1690235/3 N/A pad2 Unused
1700238/4 2.06+ cmdline_size Maximum size of the kernel command line 1700238/4 2.06+ cmdline_size Maximum size of the kernel command line
171023C/4 2.07+ hardware_subarch Hardware subarchitecture
1720240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
171 173
172(1) For backwards compatibility, if the setup_sects field contains 0, the 174(1) For backwards compatibility, if the setup_sects field contains 0, the
173 real value is 4. 175 real value is 4.
@@ -204,7 +206,7 @@ boot loaders can ignore those fields.
204 206
205The byte order of all fields is littleendian (this is x86, after all.) 207The byte order of all fields is littleendian (this is x86, after all.)
206 208
207Field name: setup_secs 209Field name: setup_sects
208Type: read 210Type: read
209Offset/size: 0x1f1/1 211Offset/size: 0x1f1/1
210Protocol: ALL 212Protocol: ALL
@@ -356,6 +358,13 @@ Protocol: 2.00+
356 - If 0, the protected-mode code is loaded at 0x10000. 358 - If 0, the protected-mode code is loaded at 0x10000.
357 - If 1, the protected-mode code is loaded at 0x100000. 359 - If 1, the protected-mode code is loaded at 0x100000.
358 360
361 Bit 6 (write): KEEP_SEGMENTS
362 Protocol: 2.07+
363 - if 0, reload the segment registers in the 32bit entry point.
364 - if 1, do not reload the segment registers in the 32bit entry point.
365 Assume that %cs %ds %ss %es are all set to flat segments with
366 a base of 0 (or the equivalent for their environment).
367
359 Bit 7 (write): CAN_USE_HEAP 368 Bit 7 (write): CAN_USE_HEAP
360 Set this bit to 1 to indicate that the value entered in the 369 Set this bit to 1 to indicate that the value entered in the
361 heap_end_ptr is valid. If this field is clear, some setup code 370 heap_end_ptr is valid. If this field is clear, some setup code
@@ -480,6 +489,29 @@ Protocol: 2.06+
480 cmdline_size characters. With protocol version 2.05 and earlier, the 489 cmdline_size characters. With protocol version 2.05 and earlier, the
481 maximum size was 255. 490 maximum size was 255.
482 491
492Field name: hardware_subarch
493Type: write
494Offset/size: 0x23c/4
495Protocol: 2.07+
496
497 In a paravirtualized environment the hardware low level architectural
498 pieces such as interrupt handling, page table handling, and
499 accessing process control registers needs to be done differently.
500
501 This field allows the bootloader to inform the kernel we are in one
502 one of those environments.
503
504 0x00000000 The default x86/PC environment
505 0x00000001 lguest
506 0x00000002 Xen
507
508Field name: hardware_subarch_data
509Type: write
510Offset/size: 0x240/8
511Protocol: 2.07+
512
513 A pointer to data that is specific to hardware subarch
514
483 515
484**** THE KERNEL COMMAND LINE 516**** THE KERNEL COMMAND LINE
485 517
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 6166e2d7da76..7a7753321a26 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -519,17 +519,17 @@ more details, with real examples.
519 to the user why it stops. 519 to the user why it stops.
520 520
521 cc-cross-prefix 521 cc-cross-prefix
522 cc-cross-prefix is used to check if there exist a $(CC) in path with 522 cc-cross-prefix is used to check if there exists a $(CC) in path with
523 one of the listed prefixes. The first prefix where there exist a 523 one of the listed prefixes. The first prefix where there exist a
524 prefix$(CC) in the PATH is returned - and if no prefix$(CC) is found 524 prefix$(CC) in the PATH is returned - and if no prefix$(CC) is found
525 then nothing is returned. 525 then nothing is returned.
526 Additional prefixes are separated by a single space in the 526 Additional prefixes are separated by a single space in the
527 call of cc-cross-prefix. 527 call of cc-cross-prefix.
528 This functionality is usefull for architecture Makefile that try 528 This functionality is useful for architecture Makefiles that try
529 to set CROSS_COMPILE to well know values but may have several 529 to set CROSS_COMPILE to well-known values but may have several
530 values to select between. 530 values to select between.
531 It is recommended only to try to set CROSS_COMPILE is it is a cross 531 It is recommended only to try to set CROSS_COMPILE if it is a cross
532 build (host arch is different from target arch). And is CROSS_COMPILE 532 build (host arch is different from target arch). And if CROSS_COMPILE
533 is already set then leave it with the old value. 533 is already set then leave it with the old value.
534 534
535 Example: 535 Example:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6accd360da73..b2361667839f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -772,6 +772,23 @@ and is between 256 and 4096 characters. It is defined in the file
772 772
773 inttest= [IA64] 773 inttest= [IA64]
774 774
775 intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option
776 off
777 Disable intel iommu driver.
778 igfx_off [Default Off]
779 By default, gfx is mapped as normal device. If a gfx
780 device has a dedicated DMAR unit, the DMAR unit is
781 bypassed by not enabling DMAR with this option. In
782 this case, gfx device will use physical address for
783 DMA.
784 forcedac [x86_64]
785 With this option iommu will not optimize to look
786 for io virtual address below 32 bit forcing dual
787 address cycle on pci bus for cards supporting greater
788 than 32 bit addressing. The default is to look
789 for translation below 32 bit and if not available
790 then look in the higher range.
791
775 io7= [HW] IO7 for Marvel based alpha systems 792 io7= [HW] IO7 for Marvel based alpha systems
776 See comment before marvel_specify_io7 in 793 See comment before marvel_specify_io7 in
777 arch/alpha/kernel/core_marvel.c. 794 arch/alpha/kernel/core_marvel.c.
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index 5fbcc22c98e9..168117bd6ee8 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -2,7 +2,8 @@
2Memory Hotplug 2Memory Hotplug
3============== 3==============
4 4
5Last Updated: Jul 28 2007 5Created: Jul 28 2007
6Add description of notifier of memory hotplug Oct 11 2007
6 7
7This document is about memory hotplug including how-to-use and current status. 8This document is about memory hotplug including how-to-use and current status.
8Because Memory Hotplug is still under development, contents of this text will 9Because Memory Hotplug is still under development, contents of this text will
@@ -24,7 +25,8 @@ be changed often.
24 6.1 Memory offline and ZONE_MOVABLE 25 6.1 Memory offline and ZONE_MOVABLE
25 6.2. How to offline memory 26 6.2. How to offline memory
267. Physical memory remove 277. Physical memory remove
278. Future Work List 288. Memory hotplug event notifier
299. Future Work List
28 30
29Note(1): x86_64's has special implementation for memory hotplug. 31Note(1): x86_64's has special implementation for memory hotplug.
30 This text does not describe it. 32 This text does not describe it.
@@ -307,8 +309,58 @@ Need more implementation yet....
307 - Notification completion of remove works by OS to firmware. 309 - Notification completion of remove works by OS to firmware.
308 - Guard from remove if not yet. 310 - Guard from remove if not yet.
309 311
312--------------------------------
3138. Memory hotplug event notifier
314--------------------------------
315Memory hotplug has event notifer. There are 6 types of notification.
316
317MEMORY_GOING_ONLINE
318 Generated before new memory becomes available in order to be able to
319 prepare subsystems to handle memory. The page allocator is still unable
320 to allocate from the new memory.
321
322MEMORY_CANCEL_ONLINE
323 Generated if MEMORY_GOING_ONLINE fails.
324
325MEMORY_ONLINE
326 Generated when memory has succesfully brought online. The callback may
327 allocate pages from the new memory.
328
329MEMORY_GOING_OFFLINE
330 Generated to begin the process of offlining memory. Allocations are no
331 longer possible from the memory but some of the memory to be offlined
332 is still in use. The callback can be used to free memory known to a
333 subsystem from the indicated memory section.
334
335MEMORY_CANCEL_OFFLINE
336 Generated if MEMORY_GOING_OFFLINE fails. Memory is available again from
337 the section that we attempted to offline.
338
339MEMORY_OFFLINE
340 Generated after offlining memory is complete.
341
342A callback routine can be registered by
343 hotplug_memory_notifier(callback_func, priority)
344
345The second argument of callback function (action) is event types of above.
346The third argument is passed by pointer of struct memory_notify.
347
348struct memory_notify {
349 unsigned long start_pfn;
350 unsigned long nr_pages;
351 int status_cahnge_nid;
352}
353
354start_pfn is start_pfn of online/offline memory.
355nr_pages is # of pages of online/offline memory.
356status_change_nid is set node id when N_HIGH_MEMORY of nodemask is (will be)
357set/clear. It means a new(memoryless) node gets new memory by online and a
358node loses all memory. If this is -1, then nodemask status is not changed.
359If status_changed_nid >= 0, callback should create/discard structures for the
360node if necessary.
361
310-------------- 362--------------
3118. Future Work 3639. Future Work
312-------------- 364--------------
313 - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like 365 - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like
314 sysctl or new control file. 366 sysctl or new control file.
diff --git a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt
index 5f7d536cb0c6..5e03610e186f 100644
--- a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt
+++ b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt
@@ -185,7 +185,7 @@ bestcomm@<addr> dma-controller mpc5200-bestcomm 5200 pic also requires
185Recommended soc5200 child nodes; populate as needed for your board 185Recommended soc5200 child nodes; populate as needed for your board
186name device_type compatible Description 186name device_type compatible Description
187---- ----------- ---------- ----------- 187---- ----------- ---------- -----------
188gpt@<addr> gpt mpc5200-gpt General purpose timers 188gpt@<addr> gpt fsl,mpc5200-gpt General purpose timers
189rtc@<addr> rtc mpc5200-rtc Real time clock 189rtc@<addr> rtc mpc5200-rtc Real time clock
190mscan@<addr> mscan mpc5200-mscan CAN bus controller 190mscan@<addr> mscan mpc5200-mscan CAN bus controller
191pci@<addr> pci mpc5200-pci PCI bridge 191pci@<addr> pci mpc5200-pci PCI bridge
@@ -213,7 +213,7 @@ cell-index int When multiple devices are present, is the
2135) General Purpose Timer nodes (child of soc5200 node) 2135) General Purpose Timer nodes (child of soc5200 node)
214On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board 214On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board
215design supports the internal wdt, then the device node for GPT0 should 215design supports the internal wdt, then the device node for GPT0 should
216include the empty property 'has-wdt'. 216include the empty property 'fsl,has-wdt'.
217 217
2186) PSC nodes (child of soc5200 node) 2186) PSC nodes (child of soc5200 node)
219PSC nodes can define the optional 'port-number' property to force assignment 219PSC nodes can define the optional 'port-number' property to force assignment
diff --git a/MAINTAINERS b/MAINTAINERS
index 1fd6d02a79b8..40245af2d0e3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2338,6 +2338,8 @@ L: linuxppc-dev@ozlabs.org
2338S: Maintained 2338S: Maintained
2339 2339
2340LINUX FOR POWERPC EMBEDDED PPC8XX 2340LINUX FOR POWERPC EMBEDDED PPC8XX
2341P: Vitaly Bordug
2342M: vitb@kernel.crashing.org
2341P: Marcelo Tosatti 2343P: Marcelo Tosatti
2342M: marcelo@kvack.org 2344M: marcelo@kvack.org
2343W: http://www.penguinppc.org/ 2345W: http://www.penguinppc.org/
diff --git a/Makefile b/Makefile
index 50bb50defe18..264f37b8b263 100644
--- a/Makefile
+++ b/Makefile
@@ -1505,15 +1505,16 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
1505# and we build for the host arch 1505# and we build for the host arch
1506quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) 1506quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
1507 cmd_depmod = \ 1507 cmd_depmod = \
1508 if [ -r System.map -a -x $(DEPMOD) -a "$(SUBARCH)" = "$(ARCH)" ]; then \ 1508 if [ -r System.map -a -x $(DEPMOD) ]; then \
1509 $(DEPMOD) -ae -F System.map \ 1509 $(DEPMOD) -ae -F System.map \
1510 $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) -r) \ 1510 $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) -r) \
1511 $(KERNELRELEASE); \ 1511 $(KERNELRELEASE); \
1512 fi 1512 fi
1513 1513
1514# Create temporary dir for module support files 1514# Create temporary dir for module support files
1515cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR); rm -f $(MODVERDIR)/* 1515# clean it up only when building all modules
1516 1516cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
1517 $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
1517 1518
1518a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \ 1519a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \
1519 $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) \ 1520 $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) \
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index e1c470752ebc..2d00a08d3f08 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -7,6 +7,7 @@
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/scatterlist.h>
10#include <linux/log2.h> 11#include <linux/log2.h>
11 12
12#include <asm/io.h> 13#include <asm/io.h>
@@ -465,7 +466,7 @@ EXPORT_SYMBOL(pci_free_consistent);
465 Write dma_length of each leader with the combined lengths of 466 Write dma_length of each leader with the combined lengths of
466 the mergable followers. */ 467 the mergable followers. */
467 468
468#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset) 469#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
469#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) 470#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
470 471
471static void 472static void
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 44ab0dad4035..52fc6a883281 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -29,6 +29,7 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h> 30#include <linux/dmapool.h>
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/scatterlist.h>
32 33
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
34 35
@@ -442,7 +443,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
442 BUG_ON(dir == DMA_NONE); 443 BUG_ON(dir == DMA_NONE);
443 444
444 for (i = 0; i < nents; i++, sg++) { 445 for (i = 0; i < nents; i++, sg++) {
445 struct page *page = sg->page; 446 struct page *page = sg_page(sg);
446 unsigned int offset = sg->offset; 447 unsigned int offset = sg->offset;
447 unsigned int length = sg->length; 448 unsigned int length = sg->length;
448 void *ptr = page_address(page) + offset; 449 void *ptr = page_address(page) + offset;
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 6b9e466104ad..5be0d13f4b03 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19#include <linux/spi/at73c213.h>
19 20
20#include <video/atmel_lcdc.h> 21#include <video/atmel_lcdc.h>
21 22
@@ -49,7 +50,26 @@ static struct eth_platform_data __initdata eth_data[2] = {
49}; 50};
50 51
51#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM 52#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM
53#ifndef CONFIG_BOARD_ATSTK1002_SW3_CUSTOM
54static struct at73c213_board_info at73c213_data = {
55 .ssc_id = 0,
56 .shortname = "AVR32 STK1000 external DAC",
57};
58#endif
59#endif
60
61#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM
52static struct spi_board_info spi0_board_info[] __initdata = { 62static struct spi_board_info spi0_board_info[] __initdata = {
63#ifndef CONFIG_BOARD_ATSTK1002_SW3_CUSTOM
64 {
65 /* AT73C213 */
66 .modalias = "at73c213",
67 .max_speed_hz = 200000,
68 .chip_select = 0,
69 .mode = SPI_MODE_1,
70 .platform_data = &at73c213_data,
71 },
72#endif
53 { 73 {
54 /* QVGA display */ 74 /* QVGA display */
55 .modalias = "ltv350qv", 75 .modalias = "ltv350qv",
@@ -180,6 +200,38 @@ static void setup_j2_leds(void)
180} 200}
181#endif 201#endif
182 202
203#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM
204#ifndef CONFIG_BOARD_ATSTK1002_SW3_CUSTOM
205static void __init at73c213_set_clk(struct at73c213_board_info *info)
206{
207 struct clk *gclk;
208 struct clk *pll;
209
210 gclk = clk_get(NULL, "gclk0");
211 if (IS_ERR(gclk))
212 goto err_gclk;
213 pll = clk_get(NULL, "pll0");
214 if (IS_ERR(pll))
215 goto err_pll;
216
217 if (clk_set_parent(gclk, pll)) {
218 pr_debug("STK1000: failed to set pll0 as parent for DAC clock\n");
219 goto err_set_clk;
220 }
221
222 at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
223 info->dac_clk = gclk;
224
225err_set_clk:
226 clk_put(pll);
227err_pll:
228 clk_put(gclk);
229err_gclk:
230 return;
231}
232#endif
233#endif
234
183void __init setup_board(void) 235void __init setup_board(void)
184{ 236{
185#ifdef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM 237#ifdef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM
@@ -248,6 +300,12 @@ static int __init atstk1002_init(void)
248 300
249 setup_j2_leds(); 301 setup_j2_leds();
250 302
303#ifndef CONFIG_BOARD_ATSTK1002_SW3_CUSTOM
304#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM
305 at73c213_set_clk(&at73c213_data);
306#endif
307#endif
308
251 return 0; 309 return 0;
252} 310}
253postcore_initcall(atstk1002_init); 311postcore_initcall(atstk1002_init);
diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c
index f6d154ca4d24..a9d9ec081e3d 100644
--- a/arch/avr32/mach-at32ap/at32ap7000.c
+++ b/arch/avr32/mach-at32ap/at32ap7000.c
@@ -556,6 +556,17 @@ static struct clk pico_clk = {
556 .users = 1, 556 .users = 1,
557}; 557};
558 558
559static struct resource dmaca0_resource[] = {
560 {
561 .start = 0xff200000,
562 .end = 0xff20ffff,
563 .flags = IORESOURCE_MEM,
564 },
565 IRQ(2),
566};
567DEFINE_DEV(dmaca, 0);
568DEV_CLK(hclk, dmaca0, hsb, 10);
569
559/* -------------------------------------------------------------------- 570/* --------------------------------------------------------------------
560 * HMATRIX 571 * HMATRIX
561 * -------------------------------------------------------------------- */ 572 * -------------------------------------------------------------------- */
@@ -655,6 +666,7 @@ void __init at32_add_system_devices(void)
655 platform_device_register(&at32_eic0_device); 666 platform_device_register(&at32_eic0_device);
656 platform_device_register(&smc0_device); 667 platform_device_register(&smc0_device);
657 platform_device_register(&pdc_device); 668 platform_device_register(&pdc_device);
669 platform_device_register(&dmaca0_device);
658 670
659 platform_device_register(&at32_systc0_device); 671 platform_device_register(&at32_systc0_device);
660 672
@@ -960,6 +972,96 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
960} 972}
961 973
962/* -------------------------------------------------------------------- 974/* --------------------------------------------------------------------
975 * TWI
976 * -------------------------------------------------------------------- */
977static struct resource atmel_twi0_resource[] __initdata = {
978 PBMEM(0xffe00800),
979 IRQ(5),
980};
981static struct clk atmel_twi0_pclk = {
982 .name = "twi_pclk",
983 .parent = &pba_clk,
984 .mode = pba_clk_mode,
985 .get_rate = pba_clk_get_rate,
986 .index = 2,
987};
988
989struct platform_device *__init at32_add_device_twi(unsigned int id)
990{
991 struct platform_device *pdev;
992
993 if (id != 0)
994 return NULL;
995
996 pdev = platform_device_alloc("atmel_twi", id);
997 if (!pdev)
998 return NULL;
999
1000 if (platform_device_add_resources(pdev, atmel_twi0_resource,
1001 ARRAY_SIZE(atmel_twi0_resource)))
1002 goto err_add_resources;
1003
1004 select_peripheral(PA(6), PERIPH_A, 0); /* SDA */
1005 select_peripheral(PA(7), PERIPH_A, 0); /* SDL */
1006
1007 atmel_twi0_pclk.dev = &pdev->dev;
1008
1009 platform_device_add(pdev);
1010 return pdev;
1011
1012err_add_resources:
1013 platform_device_put(pdev);
1014 return NULL;
1015}
1016
1017/* --------------------------------------------------------------------
1018 * MMC
1019 * -------------------------------------------------------------------- */
1020static struct resource atmel_mci0_resource[] __initdata = {
1021 PBMEM(0xfff02400),
1022 IRQ(28),
1023};
1024static struct clk atmel_mci0_pclk = {
1025 .name = "mci_clk",
1026 .parent = &pbb_clk,
1027 .mode = pbb_clk_mode,
1028 .get_rate = pbb_clk_get_rate,
1029 .index = 9,
1030};
1031
1032struct platform_device *__init at32_add_device_mci(unsigned int id)
1033{
1034 struct platform_device *pdev;
1035
1036 if (id != 0)
1037 return NULL;
1038
1039 pdev = platform_device_alloc("atmel_mci", id);
1040 if (!pdev)
1041 return NULL;
1042
1043 if (platform_device_add_resources(pdev, atmel_mci0_resource,
1044 ARRAY_SIZE(atmel_mci0_resource)))
1045 goto err_add_resources;
1046
1047 select_peripheral(PA(10), PERIPH_A, 0); /* CLK */
1048 select_peripheral(PA(11), PERIPH_A, 0); /* CMD */
1049 select_peripheral(PA(12), PERIPH_A, 0); /* DATA0 */
1050 select_peripheral(PA(13), PERIPH_A, 0); /* DATA1 */
1051 select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */
1052 select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */
1053
1054 atmel_mci0_pclk.dev = &pdev->dev;
1055
1056 platform_device_add(pdev);
1057 return pdev;
1058
1059err_add_resources:
1060 platform_device_put(pdev);
1061 return NULL;
1062}
1063
1064/* --------------------------------------------------------------------
963 * LCDC 1065 * LCDC
964 * -------------------------------------------------------------------- */ 1066 * -------------------------------------------------------------------- */
965static struct atmel_lcdfb_info atmel_lcdfb0_data; 1067static struct atmel_lcdfb_info atmel_lcdfb0_data;
@@ -1228,6 +1330,241 @@ out_free_pdev:
1228} 1330}
1229 1331
1230/* -------------------------------------------------------------------- 1332/* --------------------------------------------------------------------
1333 * IDE / CompactFlash
1334 * -------------------------------------------------------------------- */
1335static struct resource at32_smc_cs4_resource[] __initdata = {
1336 {
1337 .start = 0x04000000,
1338 .end = 0x07ffffff,
1339 .flags = IORESOURCE_MEM,
1340 },
1341 IRQ(~0UL), /* Magic IRQ will be overridden */
1342};
1343static struct resource at32_smc_cs5_resource[] __initdata = {
1344 {
1345 .start = 0x20000000,
1346 .end = 0x23ffffff,
1347 .flags = IORESOURCE_MEM,
1348 },
1349 IRQ(~0UL), /* Magic IRQ will be overridden */
1350};
1351
1352static int __init at32_init_ide_or_cf(struct platform_device *pdev,
1353 unsigned int cs, unsigned int extint)
1354{
1355 static unsigned int extint_pin_map[4] __initdata = {
1356 GPIO_PIN_PB(25),
1357 GPIO_PIN_PB(26),
1358 GPIO_PIN_PB(27),
1359 GPIO_PIN_PB(28),
1360 };
1361 static bool common_pins_initialized __initdata = false;
1362 unsigned int extint_pin;
1363 int ret;
1364
1365 if (extint >= ARRAY_SIZE(extint_pin_map))
1366 return -EINVAL;
1367 extint_pin = extint_pin_map[extint];
1368
1369 switch (cs) {
1370 case 4:
1371 ret = platform_device_add_resources(pdev,
1372 at32_smc_cs4_resource,
1373 ARRAY_SIZE(at32_smc_cs4_resource));
1374 if (ret)
1375 return ret;
1376
1377 select_peripheral(PE(21), PERIPH_A, 0); /* NCS4 -> OE_N */
1378 set_ebi_sfr_bits(HMATRIX_BIT(CS4A));
1379 break;
1380 case 5:
1381 ret = platform_device_add_resources(pdev,
1382 at32_smc_cs5_resource,
1383 ARRAY_SIZE(at32_smc_cs5_resource));
1384 if (ret)
1385 return ret;
1386
1387 select_peripheral(PE(22), PERIPH_A, 0); /* NCS5 -> OE_N */
1388 set_ebi_sfr_bits(HMATRIX_BIT(CS5A));
1389 break;
1390 default:
1391 return -EINVAL;
1392 }
1393
1394 if (!common_pins_initialized) {
1395 select_peripheral(PE(19), PERIPH_A, 0); /* CFCE1 -> CS0_N */
1396 select_peripheral(PE(20), PERIPH_A, 0); /* CFCE2 -> CS1_N */
1397 select_peripheral(PE(23), PERIPH_A, 0); /* CFRNW -> DIR */
1398 select_peripheral(PE(24), PERIPH_A, 0); /* NWAIT <- IORDY */
1399 common_pins_initialized = true;
1400 }
1401
1402 at32_select_periph(extint_pin, GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH);
1403
1404 pdev->resource[1].start = EIM_IRQ_BASE + extint;
1405 pdev->resource[1].end = pdev->resource[1].start;
1406
1407 return 0;
1408}
1409
1410struct platform_device *__init
1411at32_add_device_ide(unsigned int id, unsigned int extint,
1412 struct ide_platform_data *data)
1413{
1414 struct platform_device *pdev;
1415
1416 pdev = platform_device_alloc("at32_ide", id);
1417 if (!pdev)
1418 goto fail;
1419
1420 if (platform_device_add_data(pdev, data,
1421 sizeof(struct ide_platform_data)))
1422 goto fail;
1423
1424 if (at32_init_ide_or_cf(pdev, data->cs, extint))
1425 goto fail;
1426
1427 platform_device_add(pdev);
1428 return pdev;
1429
1430fail:
1431 platform_device_put(pdev);
1432 return NULL;
1433}
1434
1435struct platform_device *__init
1436at32_add_device_cf(unsigned int id, unsigned int extint,
1437 struct cf_platform_data *data)
1438{
1439 struct platform_device *pdev;
1440
1441 pdev = platform_device_alloc("at32_cf", id);
1442 if (!pdev)
1443 goto fail;
1444
1445 if (platform_device_add_data(pdev, data,
1446 sizeof(struct cf_platform_data)))
1447 goto fail;
1448
1449 if (at32_init_ide_or_cf(pdev, data->cs, extint))
1450 goto fail;
1451
1452 if (data->detect_pin != GPIO_PIN_NONE)
1453 at32_select_gpio(data->detect_pin, AT32_GPIOF_DEGLITCH);
1454 if (data->reset_pin != GPIO_PIN_NONE)
1455 at32_select_gpio(data->reset_pin, 0);
1456 if (data->vcc_pin != GPIO_PIN_NONE)
1457 at32_select_gpio(data->vcc_pin, 0);
1458 /* READY is used as extint, so we can't select it as gpio */
1459
1460 platform_device_add(pdev);
1461 return pdev;
1462
1463fail:
1464 platform_device_put(pdev);
1465 return NULL;
1466}
1467
1468/* --------------------------------------------------------------------
1469 * AC97C
1470 * -------------------------------------------------------------------- */
1471static struct resource atmel_ac97c0_resource[] __initdata = {
1472 PBMEM(0xfff02800),
1473 IRQ(29),
1474};
1475static struct clk atmel_ac97c0_pclk = {
1476 .name = "pclk",
1477 .parent = &pbb_clk,
1478 .mode = pbb_clk_mode,
1479 .get_rate = pbb_clk_get_rate,
1480 .index = 10,
1481};
1482
1483struct platform_device *__init at32_add_device_ac97c(unsigned int id)
1484{
1485 struct platform_device *pdev;
1486
1487 if (id != 0)
1488 return NULL;
1489
1490 pdev = platform_device_alloc("atmel_ac97c", id);
1491 if (!pdev)
1492 return NULL;
1493
1494 if (platform_device_add_resources(pdev, atmel_ac97c0_resource,
1495 ARRAY_SIZE(atmel_ac97c0_resource)))
1496 goto err_add_resources;
1497
1498 select_peripheral(PB(20), PERIPH_B, 0); /* SYNC */
1499 select_peripheral(PB(21), PERIPH_B, 0); /* SDO */
1500 select_peripheral(PB(22), PERIPH_B, 0); /* SDI */
1501 select_peripheral(PB(23), PERIPH_B, 0); /* SCLK */
1502
1503 atmel_ac97c0_pclk.dev = &pdev->dev;
1504
1505 platform_device_add(pdev);
1506 return pdev;
1507
1508err_add_resources:
1509 platform_device_put(pdev);
1510 return NULL;
1511}
1512
1513/* --------------------------------------------------------------------
1514 * ABDAC
1515 * -------------------------------------------------------------------- */
1516static struct resource abdac0_resource[] __initdata = {
1517 PBMEM(0xfff02000),
1518 IRQ(27),
1519};
1520static struct clk abdac0_pclk = {
1521 .name = "pclk",
1522 .parent = &pbb_clk,
1523 .mode = pbb_clk_mode,
1524 .get_rate = pbb_clk_get_rate,
1525 .index = 8,
1526};
1527static struct clk abdac0_sample_clk = {
1528 .name = "sample_clk",
1529 .mode = genclk_mode,
1530 .get_rate = genclk_get_rate,
1531 .set_rate = genclk_set_rate,
1532 .set_parent = genclk_set_parent,
1533 .index = 6,
1534};
1535
1536struct platform_device *__init at32_add_device_abdac(unsigned int id)
1537{
1538 struct platform_device *pdev;
1539
1540 if (id != 0)
1541 return NULL;
1542
1543 pdev = platform_device_alloc("abdac", id);
1544 if (!pdev)
1545 return NULL;
1546
1547 if (platform_device_add_resources(pdev, abdac0_resource,
1548 ARRAY_SIZE(abdac0_resource)))
1549 goto err_add_resources;
1550
1551 select_peripheral(PB(20), PERIPH_A, 0); /* DATA1 */
1552 select_peripheral(PB(21), PERIPH_A, 0); /* DATA0 */
1553 select_peripheral(PB(22), PERIPH_A, 0); /* DATAN1 */
1554 select_peripheral(PB(23), PERIPH_A, 0); /* DATAN0 */
1555
1556 abdac0_pclk.dev = &pdev->dev;
1557 abdac0_sample_clk.dev = &pdev->dev;
1558
1559 platform_device_add(pdev);
1560 return pdev;
1561
1562err_add_resources:
1563 platform_device_put(pdev);
1564 return NULL;
1565}
1566
1567/* --------------------------------------------------------------------
1231 * GCLK 1568 * GCLK
1232 * -------------------------------------------------------------------- */ 1569 * -------------------------------------------------------------------- */
1233static struct clk gclk0 = { 1570static struct clk gclk0 = {
@@ -1290,6 +1627,7 @@ struct clk *at32_clock_list[] = {
1290 &smc0_mck, 1627 &smc0_mck,
1291 &pdc_hclk, 1628 &pdc_hclk,
1292 &pdc_pclk, 1629 &pdc_pclk,
1630 &dmaca0_hclk,
1293 &pico_clk, 1631 &pico_clk,
1294 &pio0_mck, 1632 &pio0_mck,
1295 &pio1_mck, 1633 &pio1_mck,
@@ -1307,6 +1645,8 @@ struct clk *at32_clock_list[] = {
1307 &macb1_pclk, 1645 &macb1_pclk,
1308 &atmel_spi0_spi_clk, 1646 &atmel_spi0_spi_clk,
1309 &atmel_spi1_spi_clk, 1647 &atmel_spi1_spi_clk,
1648 &atmel_twi0_pclk,
1649 &atmel_mci0_pclk,
1310 &atmel_lcdfb0_hck1, 1650 &atmel_lcdfb0_hck1,
1311 &atmel_lcdfb0_pixclk, 1651 &atmel_lcdfb0_pixclk,
1312 &ssc0_pclk, 1652 &ssc0_pclk,
@@ -1314,6 +1654,9 @@ struct clk *at32_clock_list[] = {
1314 &ssc2_pclk, 1654 &ssc2_pclk,
1315 &usba0_hclk, 1655 &usba0_hclk,
1316 &usba0_pclk, 1656 &usba0_pclk,
1657 &atmel_ac97c0_pclk,
1658 &abdac0_pclk,
1659 &abdac0_sample_clk,
1317 &gclk0, 1660 &gclk0,
1318 &gclk1, 1661 &gclk1,
1319 &gclk2, 1662 &gclk2,
@@ -1355,6 +1698,7 @@ void __init at32_clock_init(void)
1355 genclk_init_parent(&gclk3); 1698 genclk_init_parent(&gclk3);
1356 genclk_init_parent(&gclk4); 1699 genclk_init_parent(&gclk4);
1357 genclk_init_parent(&atmel_lcdfb0_pixclk); 1700 genclk_init_parent(&atmel_lcdfb0_pixclk);
1701 genclk_init_parent(&abdac0_sample_clk);
1358 1702
1359 /* 1703 /*
1360 * Turn on all clocks that have at least one user already, and 1704 * Turn on all clocks that have at least one user already, and
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 8acd01090031..f5bfd4c81fe7 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -142,7 +142,7 @@ static int eic_set_irq_type(unsigned int irq, unsigned int flow_type)
142 return ret; 142 return ret;
143} 143}
144 144
145struct irq_chip eic_chip = { 145static struct irq_chip eic_chip = {
146 .name = "eic", 146 .name = "eic",
147 .ack = eic_ack_irq, 147 .ack = eic_ack_irq,
148 .mask = eic_mask_irq, 148 .mask = eic_mask_irq,
diff --git a/arch/avr32/mach-at32ap/pm.h b/arch/avr32/mach-at32ap/pm.h
index 47efd0d1951f..694d521edc2f 100644
--- a/arch/avr32/mach-at32ap/pm.h
+++ b/arch/avr32/mach-at32ap/pm.h
@@ -113,8 +113,8 @@
113 113
114/* Register access macros */ 114/* Register access macros */
115#define pm_readl(reg) \ 115#define pm_readl(reg) \
116 __raw_readl((void __iomem *)AT32_PM_BASE + PM_##reg) 116 __raw_readl((void __iomem __force *)AT32_PM_BASE + PM_##reg)
117#define pm_writel(reg,value) \ 117#define pm_writel(reg,value) \
118 __raw_writel((value), (void __iomem *)AT32_PM_BASE + PM_##reg) 118 __raw_writel((value), (void __iomem __force *)AT32_PM_BASE + PM_##reg)
119 119
120#endif /* __ARCH_AVR32_MACH_AT32AP_PM_H__ */ 120#endif /* __ARCH_AVR32_MACH_AT32AP_PM_H__ */
diff --git a/arch/avr32/mach-at32ap/time-tc.c b/arch/avr32/mach-at32ap/time-tc.c
index e3070bdd4bb9..10265863c982 100644
--- a/arch/avr32/mach-at32ap/time-tc.c
+++ b/arch/avr32/mach-at32ap/time-tc.c
@@ -79,7 +79,7 @@ static int avr32_timer_calc_div_and_set_jiffies(struct clk *pclk)
79{ 79{
80 unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2; 80 unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2;
81 unsigned int divs[] = { 4, 8, 16, 32 }; 81 unsigned int divs[] = { 4, 8, 16, 32 };
82 int divs_size = sizeof(divs) / sizeof(*divs); 82 int divs_size = ARRAY_SIZE(divs);
83 int i = 0; 83 int i = 0;
84 unsigned long count_hz; 84 unsigned long count_hz;
85 unsigned long shift; 85 unsigned long shift;
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 3c87291bcdab..f7cac7c51e7e 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -12,8 +12,8 @@ LDFLAGS_vmlinux := -X
12OBJCOPYFLAGS := -O binary -R .note -R .comment -S 12OBJCOPYFLAGS := -O binary -R .note -R .comment -S
13GZFLAGS := -9 13GZFLAGS := -9
14 14
15CFLAGS += $(call cc-option,-mno-fdpic) 15KBUILD_CFLAGS += $(call cc-option,-mno-fdpic)
16AFLAGS += $(call cc-option,-mno-fdpic) 16KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
17CFLAGS_MODULE += -mlong-calls 17CFLAGS_MODULE += -mlong-calls
18KALLSYMS += --symbol-prefix=_ 18KALLSYMS += --symbol-prefix=_
19 19
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index 94d7b119b71e..a16cb03c5291 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -160,8 +160,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
160 BUG_ON(direction == DMA_NONE); 160 BUG_ON(direction == DMA_NONE);
161 161
162 for (i = 0; i < nents; i++, sg++) { 162 for (i = 0; i < nents; i++, sg++) {
163 sg->dma_address = (dma_addr_t)(page_address(sg->page) + 163 sg->dma_address = (dma_addr_t) sg_virt(sg);
164 sg->offset);
165 164
166 invalidate_dcache_range(sg_dma_address(sg), 165 invalidate_dcache_range(sg_dma_address(sg),
167 sg_dma_address(sg) + 166 sg_dma_address(sg) +
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 0e746449c29b..f1b059e5a06c 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL(sclk_to_usecs);
501 501
502unsigned long usecs_to_sclk(unsigned long usecs) 502unsigned long usecs_to_sclk(unsigned long usecs)
503{ 503{
504 return get_sclk() / (USEC_PER_SEC * (u64)usecs); 504 return (get_sclk() * (u64)usecs) / USEC_PER_SEC;
505} 505}
506EXPORT_SYMBOL(usecs_to_sclk); 506EXPORT_SYMBOL(usecs_to_sclk);
507 507
@@ -589,7 +589,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
589#elif defined CONFIG_BFIN_WT 589#elif defined CONFIG_BFIN_WT
590 "wt" 590 "wt"
591#endif 591#endif
592 , 0); 592 "", 0);
593 593
594 seq_printf(m, "%s\n", cache); 594 seq_printf(m, "%s\n", cache);
595 595
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 3c95f4184b99..bc859a311eaf 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -246,7 +246,7 @@ static int reserve_sba_gart = 1;
246static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); 246static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
247static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t); 247static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
248 248
249#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) 249#define sba_sg_address(sg) sg_virt((sg))
250 250
251#ifdef FULL_VALID_PDIR 251#ifdef FULL_VALID_PDIR
252static u64 prefetch_spill_page; 252static u64 prefetch_spill_page;
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3a558a06757..6ef9b5219930 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -131,7 +131,7 @@ simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
131 stat.fd = desc[sc->device->id]; 131 stat.fd = desc[sc->device->id];
132 132
133 scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) { 133 scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
134 req.addr = __pa(page_address(sl->page) + sl->offset); 134 req.addr = __pa(sg_virt(sl));
135 req.len = sl->length; 135 req.len = sl->length;
136 if (DBG) 136 if (DBG)
137 printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n", 137 printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
@@ -212,7 +212,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
212 if (!len) 212 if (!len)
213 break; 213 break;
214 thislen = min(len, slp->length); 214 thislen = min(len, slp->length);
215 memcpy(page_address(slp->page) + slp->offset, buf, thislen); 215 memcpy(sg_virt(slp), buf, thislen);
216 len -= thislen; 216 len -= thislen;
217 } 217 }
218} 218}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 8e4894b205e2..3f7ea13358e9 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1090,7 +1090,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1090 1090
1091void 1091void
1092efi_initialize_iomem_resources(struct resource *code_resource, 1092efi_initialize_iomem_resources(struct resource *code_resource,
1093 struct resource *data_resource) 1093 struct resource *data_resource,
1094 struct resource *bss_resource)
1094{ 1095{
1095 struct resource *res; 1096 struct resource *res;
1096 void *efi_map_start, *efi_map_end, *p; 1097 void *efi_map_start, *efi_map_end, *p;
@@ -1171,6 +1172,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
1171 */ 1172 */
1172 insert_resource(res, code_resource); 1173 insert_resource(res, code_resource);
1173 insert_resource(res, data_resource); 1174 insert_resource(res, data_resource);
1175 insert_resource(res, bss_resource);
1174#ifdef CONFIG_KEXEC 1176#ifdef CONFIG_KEXEC
1175 insert_resource(res, &efi_memmap_res); 1177 insert_resource(res, &efi_memmap_res);
1176 insert_resource(res, &boot_param_res); 1178 insert_resource(res, &boot_param_res);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index cbf67f1aa291..ae6c3c02e117 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -90,7 +90,12 @@ static struct resource code_resource = {
90 .name = "Kernel code", 90 .name = "Kernel code",
91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
92}; 92};
93extern char _text[], _end[], _etext[]; 93
94static struct resource bss_resource = {
95 .name = "Kernel bss",
96 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
97};
98extern char _text[], _end[], _etext[], _edata[], _bss[];
94 99
95unsigned long ia64_max_cacheline_size; 100unsigned long ia64_max_cacheline_size;
96 101
@@ -200,8 +205,11 @@ static int __init register_memory(void)
200 code_resource.start = ia64_tpa(_text); 205 code_resource.start = ia64_tpa(_text);
201 code_resource.end = ia64_tpa(_etext) - 1; 206 code_resource.end = ia64_tpa(_etext) - 1;
202 data_resource.start = ia64_tpa(_etext); 207 data_resource.start = ia64_tpa(_etext);
203 data_resource.end = ia64_tpa(_end) - 1; 208 data_resource.end = ia64_tpa(_edata) - 1;
204 efi_initialize_iomem_resources(&code_resource, &data_resource); 209 bss_resource.start = ia64_tpa(_bss);
210 bss_resource.end = ia64_tpa(_end) - 1;
211 efi_initialize_iomem_resources(&code_resource, &data_resource,
212 &bss_resource);
205 213
206 return 0; 214 return 0;
207} 215}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index ecd8a52b9b9e..511db2fd7bff 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -16,7 +16,7 @@
16#include <asm/sn/pcidev.h> 16#include <asm/sn/pcidev.h>
17#include <asm/sn/sn_sal.h> 17#include <asm/sn/sn_sal.h>
18 18
19#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 19#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
21 21
22/** 22/**
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 9d4e4b5b6bd8..ef490e1ce600 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -121,7 +121,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
121 int i; 121 int i;
122 122
123 for (i = 0; i < nents; sg++, i++) { 123 for (i = 0; i < nents; sg++, i++) {
124 sg->dma_address = page_to_phys(sg->page) + sg->offset; 124 sg->dma_address = sg_phys(sg);
125 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); 125 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
126 } 126 }
127 return nents; 127 return nents;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index f52c627bdadd..f4b582cbb567 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -451,6 +451,12 @@ config MOD5272
451 help 451 help
452 Support for the Netburner MOD-5272 board. 452 Support for the Netburner MOD-5272 board.
453 453
454config SAVANTrosie1
455 bool "Savant Rosie1 board support"
456 depends on M523x
457 help
458 Support for the Savant Rosie1 board.
459
454config ROMFS_FROM_ROM 460config ROMFS_FROM_ROM
455 bool "ROMFS image not RAM resident" 461 bool "ROMFS image not RAM resident"
456 depends on (NETtel || SNAPGEAR) 462 depends on (NETtel || SNAPGEAR)
@@ -492,7 +498,12 @@ config SNEHA
492 bool 498 bool
493 default y 499 default y
494 depends on CPU16B 500 depends on CPU16B
495 501
502config SAVANT
503 bool
504 default y
505 depends on SAVANTrosie1
506
496config AVNET 507config AVNET
497 bool 508 bool
498 default y 509 default y
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 92227aaaa26e..30aa2553693d 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -48,6 +48,7 @@ board-$(CONFIG_SNEHA) := SNEHA
48board-$(CONFIG_M5208EVB) := M5208EVB 48board-$(CONFIG_M5208EVB) := M5208EVB
49board-$(CONFIG_MOD5272) := MOD5272 49board-$(CONFIG_MOD5272) := MOD5272
50board-$(CONFIG_AVNET) := AVNET 50board-$(CONFIG_AVNET) := AVNET
51board-$(CONFIG_SAVANT) := SAVANT
51BOARD := $(board-y) 52BOARD := $(board-y)
52 53
53model-$(CONFIG_RAMKERNEL) := ram 54model-$(CONFIG_RAMKERNEL) := ram
@@ -117,4 +118,4 @@ core-y += arch/m68knommu/kernel/ \
117libs-y += arch/m68knommu/lib/ 118libs-y += arch/m68knommu/lib/
118 119
119archclean: 120archclean:
120 $(Q)$(MAKE) $(clean)=arch/m68knommu/boot 121
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 3891de09ac23..5a0ecaaee3b0 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,41 +1,48 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17 3# Linux kernel version: 2.6.23
4# Tue Jun 27 12:57:06 2006 4# Thu Oct 18 13:17:38 2007
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7# CONFIG_MMU is not set 7# CONFIG_MMU is not set
8# CONFIG_FPU is not set 8# CONFIG_FPU is not set
9CONFIG_ZONE_DMA=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y 10CONFIG_RWSEM_GENERIC_SPINLOCK=y
10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 11# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
12# CONFIG_ARCH_HAS_ILOG2_U32 is not set
13# CONFIG_ARCH_HAS_ILOG2_U64 is not set
11CONFIG_GENERIC_FIND_NEXT_BIT=y 14CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y 15CONFIG_GENERIC_HWEIGHT=y
16CONFIG_GENERIC_HARDIRQS=y
13CONFIG_GENERIC_CALIBRATE_DELAY=y 17CONFIG_GENERIC_CALIBRATE_DELAY=y
14CONFIG_TIME_LOW_RES=y 18CONFIG_TIME_LOW_RES=y
19CONFIG_NO_IOPORT=y
20CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
15 21
16# 22#
17# Code maturity level options 23# General setup
18# 24#
19CONFIG_EXPERIMENTAL=y 25CONFIG_EXPERIMENTAL=y
20CONFIG_BROKEN_ON_SMP=y 26CONFIG_BROKEN_ON_SMP=y
21CONFIG_INIT_ENV_ARG_LIMIT=32 27CONFIG_INIT_ENV_ARG_LIMIT=32
22
23#
24# General setup
25#
26CONFIG_LOCALVERSION="" 28CONFIG_LOCALVERSION=""
27CONFIG_LOCALVERSION_AUTO=y 29CONFIG_LOCALVERSION_AUTO=y
28# CONFIG_SYSVIPC is not set 30# CONFIG_SYSVIPC is not set
29# CONFIG_POSIX_MQUEUE is not set 31# CONFIG_POSIX_MQUEUE is not set
30# CONFIG_BSD_PROCESS_ACCT is not set 32# CONFIG_BSD_PROCESS_ACCT is not set
31# CONFIG_SYSCTL is not set 33# CONFIG_TASKSTATS is not set
34# CONFIG_USER_NS is not set
32# CONFIG_AUDIT is not set 35# CONFIG_AUDIT is not set
33# CONFIG_IKCONFIG is not set 36# CONFIG_IKCONFIG is not set
37CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_SYSFS_DEPRECATED is not set
34# CONFIG_RELAY is not set 39# CONFIG_RELAY is not set
35CONFIG_INITRAMFS_SOURCE="" 40# CONFIG_BLK_DEV_INITRD is not set
36CONFIG_UID16=y
37# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 41# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
42CONFIG_SYSCTL=y
38CONFIG_EMBEDDED=y 43CONFIG_EMBEDDED=y
44CONFIG_UID16=y
45CONFIG_SYSCTL_SYSCALL=y
39# CONFIG_KALLSYMS is not set 46# CONFIG_KALLSYMS is not set
40# CONFIG_HOTPLUG is not set 47# CONFIG_HOTPLUG is not set
41CONFIG_PRINTK=y 48CONFIG_PRINTK=y
@@ -44,20 +51,25 @@ CONFIG_ELF_CORE=y
44CONFIG_BASE_FULL=y 51CONFIG_BASE_FULL=y
45# CONFIG_FUTEX is not set 52# CONFIG_FUTEX is not set
46# CONFIG_EPOLL is not set 53# CONFIG_EPOLL is not set
54# CONFIG_SIGNALFD is not set
55# CONFIG_EVENTFD is not set
56# CONFIG_VM_EVENT_COUNTERS is not set
47CONFIG_SLAB=y 57CONFIG_SLAB=y
58# CONFIG_SLUB is not set
59# CONFIG_SLOB is not set
48CONFIG_TINY_SHMEM=y 60CONFIG_TINY_SHMEM=y
49CONFIG_BASE_SMALL=0 61CONFIG_BASE_SMALL=0
50# CONFIG_SLOB is not set 62CONFIG_MODULES=y
51 63CONFIG_MODULE_UNLOAD=y
52# 64# CONFIG_MODULE_FORCE_UNLOAD is not set
53# Loadable module support 65# CONFIG_MODVERSIONS is not set
54# 66# CONFIG_MODULE_SRCVERSION_ALL is not set
55# CONFIG_MODULES is not set 67# CONFIG_KMOD is not set
56 68CONFIG_BLOCK=y
57# 69# CONFIG_LBD is not set
58# Block layer
59#
60# CONFIG_BLK_DEV_IO_TRACE is not set 70# CONFIG_BLK_DEV_IO_TRACE is not set
71# CONFIG_LSF is not set
72# CONFIG_BLK_DEV_BSG is not set
61 73
62# 74#
63# IO Schedulers 75# IO Schedulers
@@ -99,6 +111,7 @@ CONFIG_CLOCK_DIV=1
99# 111#
100# Platform 112# Platform
101# 113#
114# CONFIG_UC5272 is not set
102CONFIG_M5272C3=y 115CONFIG_M5272C3=y
103# CONFIG_COBRA5272 is not set 116# CONFIG_COBRA5272 is not set
104# CONFIG_CANCam is not set 117# CONFIG_CANCam is not set
@@ -107,7 +120,6 @@ CONFIG_M5272C3=y
107# CONFIG_CPU16B is not set 120# CONFIG_CPU16B is not set
108# CONFIG_MOD5272 is not set 121# CONFIG_MOD5272 is not set
109CONFIG_FREESCALE=y 122CONFIG_FREESCALE=y
110# CONFIG_LARGE_ALLOCS is not set
111CONFIG_4KSTACKS=y 123CONFIG_4KSTACKS=y
112 124
113# 125#
@@ -121,6 +133,11 @@ CONFIG_RAMAUTOBIT=y
121# CONFIG_RAM8BIT is not set 133# CONFIG_RAM8BIT is not set
122# CONFIG_RAM16BIT is not set 134# CONFIG_RAM16BIT is not set
123# CONFIG_RAM32BIT is not set 135# CONFIG_RAM32BIT is not set
136
137#
138# ROM configuration
139#
140# CONFIG_ROM is not set
124CONFIG_RAMKERNEL=y 141CONFIG_RAMKERNEL=y
125# CONFIG_ROMKERNEL is not set 142# CONFIG_ROMKERNEL is not set
126CONFIG_SELECT_MEMORY_MODEL=y 143CONFIG_SELECT_MEMORY_MODEL=y
@@ -131,20 +148,19 @@ CONFIG_FLATMEM=y
131CONFIG_FLAT_NODE_MEM_MAP=y 148CONFIG_FLAT_NODE_MEM_MAP=y
132# CONFIG_SPARSEMEM_STATIC is not set 149# CONFIG_SPARSEMEM_STATIC is not set
133CONFIG_SPLIT_PTLOCK_CPUS=4 150CONFIG_SPLIT_PTLOCK_CPUS=4
151# CONFIG_RESOURCES_64BIT is not set
152CONFIG_ZONE_DMA_FLAG=1
153CONFIG_VIRT_TO_BUS=y
134 154
135# 155#
136# Bus options (PCI, PCMCIA, EISA, MCA, ISA) 156# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
137# 157#
138# CONFIG_PCI is not set 158# CONFIG_PCI is not set
159# CONFIG_ARCH_SUPPORTS_MSI is not set
139 160
140# 161#
141# PCCARD (PCMCIA/CardBus) support 162# PCCARD (PCMCIA/CardBus) support
142# 163#
143# CONFIG_PCCARD is not set
144
145#
146# PCI Hotplug Support
147#
148 164
149# 165#
150# Executable file formats 166# Executable file formats
@@ -168,7 +184,6 @@ CONFIG_NET=y
168# 184#
169# Networking options 185# Networking options
170# 186#
171# CONFIG_NETDEBUG is not set
172CONFIG_PACKET=y 187CONFIG_PACKET=y
173# CONFIG_PACKET_MMAP is not set 188# CONFIG_PACKET_MMAP is not set
174CONFIG_UNIX=y 189CONFIG_UNIX=y
@@ -187,27 +202,21 @@ CONFIG_IP_FIB_HASH=y
187# CONFIG_INET_IPCOMP is not set 202# CONFIG_INET_IPCOMP is not set
188# CONFIG_INET_XFRM_TUNNEL is not set 203# CONFIG_INET_XFRM_TUNNEL is not set
189# CONFIG_INET_TUNNEL is not set 204# CONFIG_INET_TUNNEL is not set
205# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
206# CONFIG_INET_XFRM_MODE_TUNNEL is not set
207# CONFIG_INET_XFRM_MODE_BEET is not set
190# CONFIG_INET_DIAG is not set 208# CONFIG_INET_DIAG is not set
191# CONFIG_TCP_CONG_ADVANCED is not set 209# CONFIG_TCP_CONG_ADVANCED is not set
192CONFIG_TCP_CONG_BIC=y 210CONFIG_TCP_CONG_CUBIC=y
211CONFIG_DEFAULT_TCP_CONG="cubic"
212# CONFIG_TCP_MD5SIG is not set
193# CONFIG_IPV6 is not set 213# CONFIG_IPV6 is not set
194# CONFIG_INET6_XFRM_TUNNEL is not set 214# CONFIG_INET6_XFRM_TUNNEL is not set
195# CONFIG_INET6_TUNNEL is not set 215# CONFIG_INET6_TUNNEL is not set
216# CONFIG_NETWORK_SECMARK is not set
196# CONFIG_NETFILTER is not set 217# CONFIG_NETFILTER is not set
197
198#
199# DCCP Configuration (EXPERIMENTAL)
200#
201# CONFIG_IP_DCCP is not set 218# CONFIG_IP_DCCP is not set
202
203#
204# SCTP Configuration (EXPERIMENTAL)
205#
206# CONFIG_IP_SCTP is not set 219# CONFIG_IP_SCTP is not set
207
208#
209# TIPC Configuration (EXPERIMENTAL)
210#
211# CONFIG_TIPC is not set 220# CONFIG_TIPC is not set
212# CONFIG_ATM is not set 221# CONFIG_ATM is not set
213# CONFIG_BRIDGE is not set 222# CONFIG_BRIDGE is not set
@@ -218,7 +227,6 @@ CONFIG_TCP_CONG_BIC=y
218# CONFIG_ATALK is not set 227# CONFIG_ATALK is not set
219# CONFIG_X25 is not set 228# CONFIG_X25 is not set
220# CONFIG_LAPB is not set 229# CONFIG_LAPB is not set
221# CONFIG_NET_DIVERT is not set
222# CONFIG_ECONET is not set 230# CONFIG_ECONET is not set
223# CONFIG_WAN_ROUTER is not set 231# CONFIG_WAN_ROUTER is not set
224 232
@@ -234,7 +242,17 @@ CONFIG_TCP_CONG_BIC=y
234# CONFIG_HAMRADIO is not set 242# CONFIG_HAMRADIO is not set
235# CONFIG_IRDA is not set 243# CONFIG_IRDA is not set
236# CONFIG_BT is not set 244# CONFIG_BT is not set
245# CONFIG_AF_RXRPC is not set
246
247#
248# Wireless
249#
250# CONFIG_CFG80211 is not set
251# CONFIG_WIRELESS_EXT is not set
252# CONFIG_MAC80211 is not set
237# CONFIG_IEEE80211 is not set 253# CONFIG_IEEE80211 is not set
254# CONFIG_RFKILL is not set
255# CONFIG_NET_9P is not set
238 256
239# 257#
240# Device Drivers 258# Device Drivers
@@ -245,16 +263,8 @@ CONFIG_TCP_CONG_BIC=y
245# 263#
246CONFIG_STANDALONE=y 264CONFIG_STANDALONE=y
247CONFIG_PREVENT_FIRMWARE_BUILD=y 265CONFIG_PREVENT_FIRMWARE_BUILD=y
248# CONFIG_FW_LOADER is not set 266# CONFIG_SYS_HYPERVISOR is not set
249
250#
251# Connector - unified userspace <-> kernelspace linker
252#
253# CONFIG_CONNECTOR is not set 267# CONFIG_CONNECTOR is not set
254
255#
256# Memory Technology Devices (MTD)
257#
258CONFIG_MTD=y 268CONFIG_MTD=y
259# CONFIG_MTD_DEBUG is not set 269# CONFIG_MTD_DEBUG is not set
260# CONFIG_MTD_CONCAT is not set 270# CONFIG_MTD_CONCAT is not set
@@ -266,11 +276,13 @@ CONFIG_MTD_PARTITIONS=y
266# User Modules And Translation Layers 276# User Modules And Translation Layers
267# 277#
268CONFIG_MTD_CHAR=y 278CONFIG_MTD_CHAR=y
279CONFIG_MTD_BLKDEVS=y
269CONFIG_MTD_BLOCK=y 280CONFIG_MTD_BLOCK=y
270# CONFIG_FTL is not set 281# CONFIG_FTL is not set
271# CONFIG_NFTL is not set 282# CONFIG_NFTL is not set
272# CONFIG_INFTL is not set 283# CONFIG_INFTL is not set
273# CONFIG_RFD_FTL is not set 284# CONFIG_RFD_FTL is not set
285# CONFIG_SSFDC is not set
274 286
275# 287#
276# RAM/ROM/Flash chip drivers 288# RAM/ROM/Flash chip drivers
@@ -290,7 +302,6 @@ CONFIG_MTD_CFI_I2=y
290CONFIG_MTD_RAM=y 302CONFIG_MTD_RAM=y
291# CONFIG_MTD_ROM is not set 303# CONFIG_MTD_ROM is not set
292# CONFIG_MTD_ABSENT is not set 304# CONFIG_MTD_ABSENT is not set
293# CONFIG_MTD_OBSOLETE_CHIPS is not set
294 305
295# 306#
296# Mapping drivers for chip access 307# Mapping drivers for chip access
@@ -313,42 +324,25 @@ CONFIG_MTD_UCLINUX=y
313# CONFIG_MTD_DOC2000 is not set 324# CONFIG_MTD_DOC2000 is not set
314# CONFIG_MTD_DOC2001 is not set 325# CONFIG_MTD_DOC2001 is not set
315# CONFIG_MTD_DOC2001PLUS is not set 326# CONFIG_MTD_DOC2001PLUS is not set
316
317#
318# NAND Flash Device Drivers
319#
320# CONFIG_MTD_NAND is not set 327# CONFIG_MTD_NAND is not set
321
322#
323# OneNAND Flash Device Drivers
324#
325# CONFIG_MTD_ONENAND is not set 328# CONFIG_MTD_ONENAND is not set
326 329
327# 330#
328# Parallel port support 331# UBI - Unsorted block images
329# 332#
333# CONFIG_MTD_UBI is not set
330# CONFIG_PARPORT is not set 334# CONFIG_PARPORT is not set
331 335CONFIG_BLK_DEV=y
332#
333# Plug and Play support
334#
335
336#
337# Block devices
338#
339# CONFIG_BLK_DEV_COW_COMMON is not set 336# CONFIG_BLK_DEV_COW_COMMON is not set
340# CONFIG_BLK_DEV_LOOP is not set 337# CONFIG_BLK_DEV_LOOP is not set
341# CONFIG_BLK_DEV_NBD is not set 338# CONFIG_BLK_DEV_NBD is not set
342CONFIG_BLK_DEV_RAM=y 339CONFIG_BLK_DEV_RAM=y
343CONFIG_BLK_DEV_RAM_COUNT=16 340CONFIG_BLK_DEV_RAM_COUNT=16
344CONFIG_BLK_DEV_RAM_SIZE=4096 341CONFIG_BLK_DEV_RAM_SIZE=4096
345# CONFIG_BLK_DEV_INITRD is not set 342CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
346# CONFIG_CDROM_PKTCDVD is not set 343# CONFIG_CDROM_PKTCDVD is not set
347# CONFIG_ATA_OVER_ETH is not set 344# CONFIG_ATA_OVER_ETH is not set
348 345# CONFIG_MISC_DEVICES is not set
349#
350# ATA/ATAPI/MFM/RLL support
351#
352# CONFIG_IDE is not set 346# CONFIG_IDE is not set
353 347
354# 348#
@@ -356,67 +350,29 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
356# 350#
357# CONFIG_RAID_ATTRS is not set 351# CONFIG_RAID_ATTRS is not set
358# CONFIG_SCSI is not set 352# CONFIG_SCSI is not set
359 353# CONFIG_SCSI_DMA is not set
360# 354# CONFIG_SCSI_NETLINK is not set
361# Multi-device support (RAID and LVM)
362#
363# CONFIG_MD is not set 355# CONFIG_MD is not set
364
365#
366# Fusion MPT device support
367#
368# CONFIG_FUSION is not set
369
370#
371# IEEE 1394 (FireWire) support
372#
373
374#
375# I2O device support
376#
377
378#
379# Network device support
380#
381CONFIG_NETDEVICES=y 356CONFIG_NETDEVICES=y
357# CONFIG_NETDEVICES_MULTIQUEUE is not set
382# CONFIG_DUMMY is not set 358# CONFIG_DUMMY is not set
383# CONFIG_BONDING is not set 359# CONFIG_BONDING is not set
360# CONFIG_MACVLAN is not set
384# CONFIG_EQUALIZER is not set 361# CONFIG_EQUALIZER is not set
385# CONFIG_TUN is not set 362# CONFIG_TUN is not set
386
387#
388# PHY device support
389#
390# CONFIG_PHYLIB is not set 363# CONFIG_PHYLIB is not set
391
392#
393# Ethernet (10 or 100Mbit)
394#
395CONFIG_NET_ETHERNET=y 364CONFIG_NET_ETHERNET=y
396# CONFIG_MII is not set 365# CONFIG_MII is not set
397CONFIG_FEC=y 366CONFIG_FEC=y
398# CONFIG_FEC2 is not set 367# CONFIG_FEC2 is not set
368# CONFIG_NETDEV_1000 is not set
369# CONFIG_NETDEV_10000 is not set
399 370
400# 371#
401# Ethernet (1000 Mbit) 372# Wireless LAN
402#
403
404#
405# Ethernet (10000 Mbit)
406#
407
408#
409# Token Ring devices
410#
411
412#
413# Wireless LAN (non-hamradio)
414#
415# CONFIG_NET_RADIO is not set
416
417#
418# Wan interfaces
419# 373#
374# CONFIG_WLAN_PRE80211 is not set
375# CONFIG_WLAN_80211 is not set
420# CONFIG_WAN is not set 376# CONFIG_WAN is not set
421CONFIG_PPP=y 377CONFIG_PPP=y
422# CONFIG_PPP_MULTILINK is not set 378# CONFIG_PPP_MULTILINK is not set
@@ -427,20 +383,14 @@ CONFIG_PPP=y
427# CONFIG_PPP_BSDCOMP is not set 383# CONFIG_PPP_BSDCOMP is not set
428# CONFIG_PPP_MPPE is not set 384# CONFIG_PPP_MPPE is not set
429# CONFIG_PPPOE is not set 385# CONFIG_PPPOE is not set
386# CONFIG_PPPOL2TP is not set
430# CONFIG_SLIP is not set 387# CONFIG_SLIP is not set
388CONFIG_SLHC=y
431# CONFIG_SHAPER is not set 389# CONFIG_SHAPER is not set
432# CONFIG_NETCONSOLE is not set 390# CONFIG_NETCONSOLE is not set
433# CONFIG_NETPOLL is not set 391# CONFIG_NETPOLL is not set
434# CONFIG_NET_POLL_CONTROLLER is not set 392# CONFIG_NET_POLL_CONTROLLER is not set
435
436#
437# ISDN subsystem
438#
439# CONFIG_ISDN is not set 393# CONFIG_ISDN is not set
440
441#
442# Telephony Support
443#
444# CONFIG_PHONE is not set 394# CONFIG_PHONE is not set
445 395
446# 396#
@@ -472,34 +422,13 @@ CONFIG_SERIAL_COLDFIRE=y
472# CONFIG_UNIX98_PTYS is not set 422# CONFIG_UNIX98_PTYS is not set
473CONFIG_LEGACY_PTYS=y 423CONFIG_LEGACY_PTYS=y
474CONFIG_LEGACY_PTY_COUNT=256 424CONFIG_LEGACY_PTY_COUNT=256
475
476#
477# IPMI
478#
479# CONFIG_IPMI_HANDLER is not set 425# CONFIG_IPMI_HANDLER is not set
480
481#
482# Watchdog Cards
483#
484# CONFIG_WATCHDOG is not set 426# CONFIG_WATCHDOG is not set
427# CONFIG_HW_RANDOM is not set
485# CONFIG_GEN_RTC is not set 428# CONFIG_GEN_RTC is not set
486# CONFIG_DTLK is not set
487# CONFIG_R3964 is not set 429# CONFIG_R3964 is not set
488
489#
490# Ftape, the floppy tape device driver
491#
492# CONFIG_RAW_DRIVER is not set 430# CONFIG_RAW_DRIVER is not set
493
494#
495# TPM devices
496#
497# CONFIG_TCG_TPM is not set 431# CONFIG_TCG_TPM is not set
498# CONFIG_TELCLOCK is not set
499
500#
501# I2C support
502#
503# CONFIG_I2C is not set 432# CONFIG_I2C is not set
504 433
505# 434#
@@ -507,101 +436,74 @@ CONFIG_LEGACY_PTY_COUNT=256
507# 436#
508# CONFIG_SPI is not set 437# CONFIG_SPI is not set
509# CONFIG_SPI_MASTER is not set 438# CONFIG_SPI_MASTER is not set
510
511#
512# Dallas's 1-wire bus
513#
514# CONFIG_W1 is not set 439# CONFIG_W1 is not set
515 440# CONFIG_POWER_SUPPLY is not set
516#
517# Hardware Monitoring support
518#
519# CONFIG_HWMON is not set 441# CONFIG_HWMON is not set
520# CONFIG_HWMON_VID is not set
521 442
522# 443#
523# Misc devices 444# Multifunction device drivers
524# 445#
446# CONFIG_MFD_SM501 is not set
525 447
526# 448#
527# Multimedia devices 449# Multimedia devices
528# 450#
529# CONFIG_VIDEO_DEV is not set 451# CONFIG_VIDEO_DEV is not set
530CONFIG_VIDEO_V4L2=y 452# CONFIG_DVB_CORE is not set
453CONFIG_DAB=y
531 454
532# 455#
533# Digital Video Broadcasting Devices 456# Graphics support
534# 457#
535# CONFIG_DVB is not set 458# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
536 459
537# 460#
538# Graphics support 461# Display device support
539# 462#
463# CONFIG_DISPLAY_SUPPORT is not set
464# CONFIG_VGASTATE is not set
465CONFIG_VIDEO_OUTPUT_CONTROL=y
540# CONFIG_FB is not set 466# CONFIG_FB is not set
541 467
542# 468#
543# Sound 469# Sound
544# 470#
545# CONFIG_SOUND is not set 471# CONFIG_SOUND is not set
546 472# CONFIG_USB_SUPPORT is not set
547#
548# USB support
549#
550# CONFIG_USB_ARCH_HAS_HCD is not set
551# CONFIG_USB_ARCH_HAS_OHCI is not set
552# CONFIG_USB_ARCH_HAS_EHCI is not set
553
554#
555# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
556#
557
558#
559# USB Gadget Support
560#
561# CONFIG_USB_GADGET is not set
562
563#
564# MMC/SD Card support
565#
566# CONFIG_MMC is not set 473# CONFIG_MMC is not set
567
568#
569# LED devices
570#
571# CONFIG_NEW_LEDS is not set 474# CONFIG_NEW_LEDS is not set
475# CONFIG_RTC_CLASS is not set
572 476
573# 477#
574# LED drivers 478# DMA Engine support
575# 479#
480# CONFIG_DMA_ENGINE is not set
576 481
577# 482#
578# LED Triggers 483# DMA Clients
579# 484#
580 485
581# 486#
582# InfiniBand support 487# DMA Devices
583# 488#
584 489
585# 490#
586# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) 491# Userspace I/O
587# 492#
588 493# CONFIG_UIO is not set
589#
590# Real Time Clock
591#
592# CONFIG_RTC_CLASS is not set
593 494
594# 495#
595# File systems 496# File systems
596# 497#
597CONFIG_EXT2_FS=y 498CONFIG_EXT2_FS=y
598# CONFIG_EXT2_FS_XATTR is not set 499# CONFIG_EXT2_FS_XATTR is not set
599# CONFIG_EXT2_FS_XIP is not set
600# CONFIG_EXT3_FS is not set 500# CONFIG_EXT3_FS is not set
501# CONFIG_EXT4DEV_FS is not set
601# CONFIG_REISERFS_FS is not set 502# CONFIG_REISERFS_FS is not set
602# CONFIG_JFS_FS is not set 503# CONFIG_JFS_FS is not set
603# CONFIG_FS_POSIX_ACL is not set 504# CONFIG_FS_POSIX_ACL is not set
604# CONFIG_XFS_FS is not set 505# CONFIG_XFS_FS is not set
506# CONFIG_GFS2_FS is not set
605# CONFIG_OCFS2_FS is not set 507# CONFIG_OCFS2_FS is not set
606# CONFIG_MINIX_FS is not set 508# CONFIG_MINIX_FS is not set
607CONFIG_ROMFS_FS=y 509CONFIG_ROMFS_FS=y
@@ -629,6 +531,7 @@ CONFIG_ROMFS_FS=y
629# Pseudo filesystems 531# Pseudo filesystems
630# 532#
631CONFIG_PROC_FS=y 533CONFIG_PROC_FS=y
534CONFIG_PROC_SYSCTL=y
632CONFIG_SYSFS=y 535CONFIG_SYSFS=y
633# CONFIG_TMPFS is not set 536# CONFIG_TMPFS is not set
634# CONFIG_HUGETLB_PAGE is not set 537# CONFIG_HUGETLB_PAGE is not set
@@ -645,7 +548,6 @@ CONFIG_RAMFS=y
645# CONFIG_BEFS_FS is not set 548# CONFIG_BEFS_FS is not set
646# CONFIG_BFS_FS is not set 549# CONFIG_BFS_FS is not set
647# CONFIG_EFS_FS is not set 550# CONFIG_EFS_FS is not set
648# CONFIG_JFFS_FS is not set
649# CONFIG_JFFS2_FS is not set 551# CONFIG_JFFS2_FS is not set
650# CONFIG_CRAMFS is not set 552# CONFIG_CRAMFS is not set
651# CONFIG_VXFS_FS is not set 553# CONFIG_VXFS_FS is not set
@@ -664,7 +566,6 @@ CONFIG_RAMFS=y
664# CONFIG_NCP_FS is not set 566# CONFIG_NCP_FS is not set
665# CONFIG_CODA_FS is not set 567# CONFIG_CODA_FS is not set
666# CONFIG_AFS_FS is not set 568# CONFIG_AFS_FS is not set
667# CONFIG_9P_FS is not set
668 569
669# 570#
670# Partition Types 571# Partition Types
@@ -678,15 +579,21 @@ CONFIG_MSDOS_PARTITION=y
678# CONFIG_NLS is not set 579# CONFIG_NLS is not set
679 580
680# 581#
582# Distributed Lock Manager
583#
584# CONFIG_DLM is not set
585
586#
681# Kernel hacking 587# Kernel hacking
682# 588#
683# CONFIG_PRINTK_TIME is not set 589# CONFIG_PRINTK_TIME is not set
590# CONFIG_ENABLE_MUST_CHECK is not set
684# CONFIG_MAGIC_SYSRQ is not set 591# CONFIG_MAGIC_SYSRQ is not set
592# CONFIG_UNUSED_SYMBOLS is not set
593# CONFIG_DEBUG_FS is not set
594# CONFIG_HEADERS_CHECK is not set
685# CONFIG_DEBUG_KERNEL is not set 595# CONFIG_DEBUG_KERNEL is not set
686CONFIG_LOG_BUF_SHIFT=14
687# CONFIG_DEBUG_BUGVERBOSE is not set 596# CONFIG_DEBUG_BUGVERBOSE is not set
688# CONFIG_DEBUG_FS is not set
689# CONFIG_UNWIND_INFO is not set
690# CONFIG_FULLDEBUG is not set 597# CONFIG_FULLDEBUG is not set
691# CONFIG_HIGHPROFILE is not set 598# CONFIG_HIGHPROFILE is not set
692# CONFIG_BOOTPARAM is not set 599# CONFIG_BOOTPARAM is not set
@@ -699,20 +606,16 @@ CONFIG_LOG_BUF_SHIFT=14
699# 606#
700# CONFIG_KEYS is not set 607# CONFIG_KEYS is not set
701# CONFIG_SECURITY is not set 608# CONFIG_SECURITY is not set
702
703#
704# Cryptographic options
705#
706# CONFIG_CRYPTO is not set 609# CONFIG_CRYPTO is not set
707 610
708# 611#
709# Hardware crypto devices
710#
711
712#
713# Library routines 612# Library routines
714# 613#
715# CONFIG_CRC_CCITT is not set 614# CONFIG_CRC_CCITT is not set
716# CONFIG_CRC16 is not set 615# CONFIG_CRC16 is not set
616# CONFIG_CRC_ITU_T is not set
717# CONFIG_CRC32 is not set 617# CONFIG_CRC32 is not set
618# CONFIG_CRC7 is not set
718# CONFIG_LIBCRC32C is not set 619# CONFIG_LIBCRC32C is not set
620CONFIG_HAS_IOMEM=y
621CONFIG_HAS_DMA=y
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 3f86ade3a22a..74bf94948ec2 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -151,27 +151,15 @@ void setup_arch(char **cmdline_p)
151#ifdef CONFIG_ELITE 151#ifdef CONFIG_ELITE
152 printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n"); 152 printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n");
153#endif 153#endif
154#ifdef CONFIG_TELOS
155 printk(KERN_INFO "Modified for Omnia ToolVox by James D. Schettine, james@telos-systems.com\n");
156#endif
157#endif 154#endif
158 printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n"); 155 printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
159 156
160#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 ) 157#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 )
161 printk(KERN_INFO "TRG SuperPilot FLASH card support <info@trgnet.com>\n"); 158 printk(KERN_INFO "TRG SuperPilot FLASH card support <info@trgnet.com>\n");
162#endif 159#endif
163
164#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 ) 160#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 )
165 printk(KERN_INFO "PalmV support by Lineo Inc. <jeff@uclinux.com>\n"); 161 printk(KERN_INFO "PalmV support by Lineo Inc. <jeff@uclinux.com>\n");
166#endif 162#endif
167
168#ifdef CONFIG_M68EZ328ADS
169 printk(KERN_INFO "M68EZ328ADS board support (C) 1999 Vladimir Gurevich <vgurevic@cisco.com>\n");
170#endif
171
172#ifdef CONFIG_ALMA_ANS
173 printk(KERN_INFO "Alma Electronics board support (C) 1999 Vladimir Gurevich <vgurevic@cisco.com>\n");
174#endif
175#if defined (CONFIG_M68360) 163#if defined (CONFIG_M68360)
176 printk(KERN_INFO "QUICC port done by SED Systems <hamilton@sedsystems.ca>,\n"); 164 printk(KERN_INFO "QUICC port done by SED Systems <hamilton@sedsystems.ca>,\n");
177 printk(KERN_INFO "based on 2.0.38 port by Lineo Inc. <mleslie@lineo.com>.\n"); 165 printk(KERN_INFO "based on 2.0.38 port by Lineo Inc. <mleslie@lineo.com>.\n");
@@ -188,11 +176,9 @@ void setup_arch(char **cmdline_p)
188 "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, 176 "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
189 (int) &_sdata, (int) &_edata, 177 (int) &_sdata, (int) &_edata,
190 (int) &_sbss, (int) &_ebss); 178 (int) &_sbss, (int) &_ebss);
191 printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x " 179 printk(KERN_DEBUG "MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
192 "STACK=0x%06x-0x%06x\n",
193 (int) &_ebss, (int) memory_start, 180 (int) &_ebss, (int) memory_start,
194 (int) memory_start, (int) memory_end, 181 (int) memory_start, (int) memory_end);
195 (int) memory_end, (int) _ramend);
196#endif 182#endif
197 183
198 /* Keep a copy of command line */ 184 /* Keep a copy of command line */
@@ -287,12 +273,3 @@ struct seq_operations cpuinfo_op = {
287 .show = show_cpuinfo, 273 .show = show_cpuinfo,
288}; 274};
289 275
290void arch_gettod(int *year, int *mon, int *day, int *hour,
291 int *min, int *sec)
292{
293 if (mach_gettod)
294 mach_gettod(year, mon, day, hour, min, sec);
295 else
296 *year = *mon = *day = *hour = *min = *sec = 0;
297}
298
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index 437f8c6c14a0..70371378db86 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -781,15 +781,7 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
781 /* Did we come from a system call? */ 781 /* Did we come from a system call? */
782 if (regs->orig_d0 >= 0) { 782 if (regs->orig_d0 >= 0) {
783 /* Restart the system call - no handlers present */ 783 /* Restart the system call - no handlers present */
784 if (regs->d0 == -ERESTARTNOHAND 784 handle_restart(regs, NULL, 0);
785 || regs->d0 == -ERESTARTSYS
786 || regs->d0 == -ERESTARTNOINTR) {
787 regs->d0 = regs->orig_d0;
788 regs->pc -= 2;
789 } else if (regs->d0 == -ERESTART_RESTARTBLOCK) {
790 regs->d0 = __NR_restart_syscall;
791 regs->pc -= 2;
792 }
793 } 785 }
794 return 0; 786 return 0;
795} 787}
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 467053da2d08..77e5375a2dd5 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -27,7 +27,6 @@
27 27
28#define TICK_SIZE (tick_nsec / 1000) 28#define TICK_SIZE (tick_nsec / 1000)
29 29
30
31static inline int set_rtc_mmss(unsigned long nowtime) 30static inline int set_rtc_mmss(unsigned long nowtime)
32{ 31{
33 if (mach_set_clock_mmss) 32 if (mach_set_clock_mmss)
@@ -39,15 +38,11 @@ static inline int set_rtc_mmss(unsigned long nowtime)
39 * timer_interrupt() needs to keep up the real-time clock, 38 * timer_interrupt() needs to keep up the real-time clock,
40 * as well as call the "do_timer()" routine every clocktick 39 * as well as call the "do_timer()" routine every clocktick
41 */ 40 */
42static irqreturn_t timer_interrupt(int irq, void *dummy) 41irqreturn_t arch_timer_interrupt(int irq, void *dummy)
43{ 42{
44 /* last time the cmos clock got updated */ 43 /* last time the cmos clock got updated */
45 static long last_rtc_update=0; 44 static long last_rtc_update=0;
46 45
47 /* may need to kick the hardware timer */
48 if (mach_tick)
49 mach_tick();
50
51 write_seqlock(&xtime_lock); 46 write_seqlock(&xtime_lock);
52 47
53 do_timer(1); 48 do_timer(1);
@@ -103,10 +98,10 @@ void time_init(void)
103{ 98{
104 unsigned int year, mon, day, hour, min, sec; 99 unsigned int year, mon, day, hour, min, sec;
105 100
106 extern void arch_gettod(int *year, int *mon, int *day, int *hour, 101 if (mach_gettod)
107 int *min, int *sec); 102 mach_gettod(&year, &mon, &day, &hour, &min, &sec);
108 103 else
109 arch_gettod(&year, &mon, &day, &hour, &min, &sec); 104 year = mon = day = hour = min = sec = 0;
110 105
111 if ((year += 1900) < 1970) 106 if ((year += 1900) < 1970)
112 year += 100; 107 year += 100;
@@ -114,7 +109,7 @@ void time_init(void)
114 xtime.tv_nsec = 0; 109 xtime.tv_nsec = 0;
115 wall_to_monotonic.tv_sec = -xtime.tv_sec; 110 wall_to_monotonic.tv_sec = -xtime.tv_sec;
116 111
117 mach_sched_init(timer_interrupt); 112 hw_timer_init();
118} 113}
119 114
120/* 115/*
@@ -128,7 +123,7 @@ void do_gettimeofday(struct timeval *tv)
128 123
129 do { 124 do {
130 seq = read_seqbegin_irqsave(&xtime_lock, flags); 125 seq = read_seqbegin_irqsave(&xtime_lock, flags);
131 usec = mach_gettimeoffset ? mach_gettimeoffset() : 0; 126 usec = hw_timer_offset();
132 sec = xtime.tv_sec; 127 sec = xtime.tv_sec;
133 usec += (xtime.tv_nsec / 1000); 128 usec += (xtime.tv_nsec / 1000);
134 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 129 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
@@ -160,8 +155,7 @@ int do_settimeofday(struct timespec *tv)
160 * Discover what correction gettimeofday 155 * Discover what correction gettimeofday
161 * would have done, and then undo it! 156 * would have done, and then undo it!
162 */ 157 */
163 if (mach_gettimeoffset) 158 nsec -= (hw_timer_offset() * 1000);
164 nsec -= (mach_gettimeoffset() * 1000);
165 159
166 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 160 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
167 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 161 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
diff --git a/arch/m68knommu/platform/5206/config.c b/arch/m68knommu/platform/5206/config.c
index d0f2dc5cb5a1..b3c4dd4cc135 100644
--- a/arch/m68knommu/platform/5206/config.c
+++ b/arch/m68knommu/platform/5206/config.c
@@ -10,13 +10,10 @@
10/***************************************************************************/ 10/***************************************************************************/
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/param.h> 13#include <linux/param.h>
15#include <linux/init.h> 14#include <linux/init.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <asm/irq.h>
18#include <asm/dma.h> 16#include <asm/dma.h>
19#include <asm/traps.h>
20#include <asm/machdep.h> 17#include <asm/machdep.h>
21#include <asm/coldfire.h> 18#include <asm/coldfire.h>
22#include <asm/mcftimer.h> 19#include <asm/mcftimer.h>
@@ -25,9 +22,6 @@
25 22
26/***************************************************************************/ 23/***************************************************************************/
27 24
28void coldfire_tick(void);
29void coldfire_timer_init(irq_handler_t handler);
30unsigned long coldfire_timer_offset(void);
31void coldfire_reset(void); 25void coldfire_reset(void);
32 26
33/***************************************************************************/ 27/***************************************************************************/
@@ -97,9 +91,6 @@ int mcf_timerirqpending(int timer)
97void config_BSP(char *commandp, int size) 91void config_BSP(char *commandp, int size)
98{ 92{
99 mcf_setimr(MCFSIM_IMR_MASKALL); 93 mcf_setimr(MCFSIM_IMR_MASKALL);
100 mach_sched_init = coldfire_timer_init;
101 mach_tick = coldfire_tick;
102 mach_gettimeoffset = coldfire_timer_offset;
103 mach_reset = coldfire_reset; 94 mach_reset = coldfire_reset;
104} 95}
105 96
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 425703fb6cee..f84a4aea8cb6 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -9,23 +9,16 @@
9/***************************************************************************/ 9/***************************************************************************/
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/param.h> 12#include <linux/param.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
15#include <asm/irq.h>
16#include <asm/dma.h> 14#include <asm/dma.h>
17#include <asm/traps.h>
18#include <asm/machdep.h> 15#include <asm/machdep.h>
19#include <asm/coldfire.h> 16#include <asm/coldfire.h>
20#include <asm/mcftimer.h>
21#include <asm/mcfsim.h> 17#include <asm/mcfsim.h>
22#include <asm/mcfdma.h> 18#include <asm/mcfdma.h>
23 19
24/***************************************************************************/ 20/***************************************************************************/
25 21
26void coldfire_tick(void);
27void coldfire_timer_init(irq_handler_t handler);
28unsigned long coldfire_timer_offset(void);
29void coldfire_reset(void); 22void coldfire_reset(void);
30 23
31/***************************************************************************/ 24/***************************************************************************/
@@ -102,9 +95,6 @@ void config_BSP(char *commandp, int size)
102 commandp[size-1] = 0; 95 commandp[size-1] = 0;
103#endif /* CONFIG_NETtel */ 96#endif /* CONFIG_NETtel */
104 97
105 mach_sched_init = coldfire_timer_init;
106 mach_tick = coldfire_tick;
107 mach_gettimeoffset = coldfire_timer_offset;
108 mach_reset = coldfire_reset; 98 mach_reset = coldfire_reset;
109} 99}
110 100
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
index a2c95bebd004..6edbd41261cc 100644
--- a/arch/m68knommu/platform/520x/config.c
+++ b/arch/m68knommu/platform/520x/config.c
@@ -27,9 +27,6 @@ unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
27 27
28/***************************************************************************/ 28/***************************************************************************/
29 29
30void coldfire_pit_tick(void);
31void coldfire_pit_init(irq_handler_t handler);
32unsigned long coldfire_pit_offset(void);
33void coldfire_reset(void); 30void coldfire_reset(void);
34 31
35/***************************************************************************/ 32/***************************************************************************/
@@ -47,10 +44,7 @@ void mcf_autovector(unsigned int vec)
47 44
48void config_BSP(char *commandp, int size) 45void config_BSP(char *commandp, int size)
49{ 46{
50 mach_sched_init = coldfire_pit_init; 47 mach_reset = coldfire_reset;
51 mach_tick = coldfire_pit_tick;
52 mach_gettimeoffset = coldfire_pit_offset;
53 mach_reset = coldfire_reset;
54} 48}
55 49
56/***************************************************************************/ 50/***************************************************************************/
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c
index 0a3af05a434b..e7f80c8e8636 100644
--- a/arch/m68knommu/platform/523x/config.c
+++ b/arch/m68knommu/platform/523x/config.c
@@ -13,12 +13,10 @@
13/***************************************************************************/ 13/***************************************************************************/
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/param.h> 16#include <linux/param.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <asm/dma.h> 19#include <asm/dma.h>
21#include <asm/traps.h>
22#include <asm/machdep.h> 20#include <asm/machdep.h>
23#include <asm/coldfire.h> 21#include <asm/coldfire.h>
24#include <asm/mcfsim.h> 22#include <asm/mcfsim.h>
@@ -26,9 +24,6 @@
26 24
27/***************************************************************************/ 25/***************************************************************************/
28 26
29void coldfire_pit_tick(void);
30void coldfire_pit_init(irq_handler_t handler);
31unsigned long coldfire_pit_offset(void);
32void coldfire_reset(void); 27void coldfire_reset(void);
33 28
34/***************************************************************************/ 29/***************************************************************************/
@@ -62,9 +57,6 @@ void mcf_autovector(unsigned int vec)
62void config_BSP(char *commandp, int size) 57void config_BSP(char *commandp, int size)
63{ 58{
64 mcf_disableall(); 59 mcf_disableall();
65 mach_sched_init = coldfire_pit_init;
66 mach_tick = coldfire_pit_tick;
67 mach_gettimeoffset = coldfire_pit_offset;
68 mach_reset = coldfire_reset; 60 mach_reset = coldfire_reset;
69} 61}
70 62
diff --git a/arch/m68knommu/platform/5249/config.c b/arch/m68knommu/platform/5249/config.c
index dc2c362590c2..d4d39435cb15 100644
--- a/arch/m68knommu/platform/5249/config.c
+++ b/arch/m68knommu/platform/5249/config.c
@@ -9,24 +9,17 @@
9/***************************************************************************/ 9/***************************************************************************/
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/param.h> 12#include <linux/param.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
16#include <asm/irq.h>
17#include <asm/dma.h> 15#include <asm/dma.h>
18#include <asm/traps.h>
19#include <asm/machdep.h> 16#include <asm/machdep.h>
20#include <asm/coldfire.h> 17#include <asm/coldfire.h>
21#include <asm/mcftimer.h>
22#include <asm/mcfsim.h> 18#include <asm/mcfsim.h>
23#include <asm/mcfdma.h> 19#include <asm/mcfdma.h>
24 20
25/***************************************************************************/ 21/***************************************************************************/
26 22
27void coldfire_tick(void);
28void coldfire_timer_init(irq_handler_t handler);
29unsigned long coldfire_timer_offset(void);
30void coldfire_reset(void); 23void coldfire_reset(void);
31 24
32/***************************************************************************/ 25/***************************************************************************/
@@ -95,9 +88,6 @@ int mcf_timerirqpending(int timer)
95void config_BSP(char *commandp, int size) 88void config_BSP(char *commandp, int size)
96{ 89{
97 mcf_setimr(MCFSIM_IMR_MASKALL); 90 mcf_setimr(MCFSIM_IMR_MASKALL);
98 mach_sched_init = coldfire_timer_init;
99 mach_tick = coldfire_tick;
100 mach_gettimeoffset = coldfire_timer_offset;
101 mach_reset = coldfire_reset; 91 mach_reset = coldfire_reset;
102} 92}
103 93
diff --git a/arch/m68knommu/platform/5272/config.c b/arch/m68knommu/platform/5272/config.c
index 1365a8300d5d..634a6375e4a5 100644
--- a/arch/m68knommu/platform/5272/config.c
+++ b/arch/m68knommu/platform/5272/config.c
@@ -10,24 +10,17 @@
10/***************************************************************************/ 10/***************************************************************************/
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/param.h> 13#include <linux/param.h>
15#include <linux/init.h> 14#include <linux/init.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <asm/irq.h>
18#include <asm/dma.h> 16#include <asm/dma.h>
19#include <asm/traps.h>
20#include <asm/machdep.h> 17#include <asm/machdep.h>
21#include <asm/coldfire.h> 18#include <asm/coldfire.h>
22#include <asm/mcftimer.h>
23#include <asm/mcfsim.h> 19#include <asm/mcfsim.h>
24#include <asm/mcfdma.h> 20#include <asm/mcfdma.h>
25 21
26/***************************************************************************/ 22/***************************************************************************/
27 23
28void coldfire_tick(void);
29void coldfire_timer_init(irq_handler_t handler);
30unsigned long coldfire_timer_offset(void);
31void coldfire_reset(void); 24void coldfire_reset(void);
32 25
33extern unsigned int mcf_timervector; 26extern unsigned int mcf_timervector;
@@ -128,9 +121,6 @@ void config_BSP(char *commandp, int size)
128 121
129 mcf_timervector = 69; 122 mcf_timervector = 69;
130 mcf_profilevector = 70; 123 mcf_profilevector = 70;
131 mach_sched_init = coldfire_timer_init;
132 mach_tick = coldfire_tick;
133 mach_gettimeoffset = coldfire_timer_offset;
134 mach_reset = coldfire_reset; 124 mach_reset = coldfire_reset;
135} 125}
136 126
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c
index 1b820441419a..9cbfbc68ae4f 100644
--- a/arch/m68knommu/platform/527x/config.c
+++ b/arch/m68knommu/platform/527x/config.c
@@ -13,12 +13,10 @@
13/***************************************************************************/ 13/***************************************************************************/
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/param.h> 16#include <linux/param.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <asm/dma.h> 19#include <asm/dma.h>
21#include <asm/traps.h>
22#include <asm/machdep.h> 20#include <asm/machdep.h>
23#include <asm/coldfire.h> 21#include <asm/coldfire.h>
24#include <asm/mcfsim.h> 22#include <asm/mcfsim.h>
@@ -26,9 +24,6 @@
26 24
27/***************************************************************************/ 25/***************************************************************************/
28 26
29void coldfire_pit_tick(void);
30void coldfire_pit_init(irq_handler_t handler);
31unsigned long coldfire_pit_offset(void);
32void coldfire_reset(void); 27void coldfire_reset(void);
33 28
34/***************************************************************************/ 29/***************************************************************************/
@@ -62,9 +57,6 @@ void mcf_autovector(unsigned int vec)
62void config_BSP(char *commandp, int size) 57void config_BSP(char *commandp, int size)
63{ 58{
64 mcf_disableall(); 59 mcf_disableall();
65 mach_sched_init = coldfire_pit_init;
66 mach_tick = coldfire_pit_tick;
67 mach_gettimeoffset = coldfire_pit_offset;
68 mach_reset = coldfire_reset; 60 mach_reset = coldfire_reset;
69} 61}
70 62
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index a089e9513699..acbd43486d97 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -13,12 +13,10 @@
13/***************************************************************************/ 13/***************************************************************************/
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/param.h> 16#include <linux/param.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <asm/dma.h> 19#include <asm/dma.h>
21#include <asm/traps.h>
22#include <asm/machdep.h> 20#include <asm/machdep.h>
23#include <asm/coldfire.h> 21#include <asm/coldfire.h>
24#include <asm/mcfsim.h> 22#include <asm/mcfsim.h>
@@ -26,9 +24,6 @@
26 24
27/***************************************************************************/ 25/***************************************************************************/
28 26
29void coldfire_pit_tick(void);
30void coldfire_pit_init(irq_handler_t handler);
31unsigned long coldfire_pit_offset(void);
32void coldfire_reset(void); 27void coldfire_reset(void);
33 28
34/***************************************************************************/ 29/***************************************************************************/
@@ -62,9 +57,6 @@ void mcf_autovector(unsigned int vec)
62void config_BSP(char *commandp, int size) 57void config_BSP(char *commandp, int size)
63{ 58{
64 mcf_disableall(); 59 mcf_disableall();
65 mach_sched_init = coldfire_pit_init;
66 mach_tick = coldfire_pit_tick;
67 mach_gettimeoffset = coldfire_pit_offset;
68 mach_reset = coldfire_reset; 60 mach_reset = coldfire_reset;
69} 61}
70 62
diff --git a/arch/m68knommu/platform/5307/config.c b/arch/m68knommu/platform/5307/config.c
index e3461619fd65..6040821e637d 100644
--- a/arch/m68knommu/platform/5307/config.c
+++ b/arch/m68knommu/platform/5307/config.c
@@ -10,25 +10,18 @@
10/***************************************************************************/ 10/***************************************************************************/
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/param.h> 13#include <linux/param.h>
15#include <linux/init.h> 14#include <linux/init.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <asm/irq.h>
18#include <asm/dma.h> 16#include <asm/dma.h>
19#include <asm/traps.h>
20#include <asm/machdep.h> 17#include <asm/machdep.h>
21#include <asm/coldfire.h> 18#include <asm/coldfire.h>
22#include <asm/mcftimer.h>
23#include <asm/mcfsim.h> 19#include <asm/mcfsim.h>
24#include <asm/mcfdma.h> 20#include <asm/mcfdma.h>
25#include <asm/mcfwdebug.h> 21#include <asm/mcfwdebug.h>
26 22
27/***************************************************************************/ 23/***************************************************************************/
28 24
29void coldfire_tick(void);
30void coldfire_timer_init(irq_handler_t handler);
31unsigned long coldfire_timer_offset(void);
32void coldfire_reset(void); 25void coldfire_reset(void);
33 26
34extern unsigned int mcf_timervector; 27extern unsigned int mcf_timervector;
@@ -122,9 +115,6 @@ void config_BSP(char *commandp, int size)
122 mcf_timerlevel = 6; 115 mcf_timerlevel = 6;
123#endif 116#endif
124 117
125 mach_sched_init = coldfire_timer_init;
126 mach_tick = coldfire_tick;
127 mach_gettimeoffset = coldfire_timer_offset;
128 mach_reset = coldfire_reset; 118 mach_reset = coldfire_reset;
129 119
130#ifdef MCF_BDM_DISABLE 120#ifdef MCF_BDM_DISABLE
diff --git a/arch/m68knommu/platform/5307/entry.S b/arch/m68knommu/platform/5307/entry.S
index a8cd867805ca..b333731b875a 100644
--- a/arch/m68knommu/platform/5307/entry.S
+++ b/arch/m68knommu/platform/5307/entry.S
@@ -74,7 +74,8 @@ ENTRY(system_call)
74 movel %sp,%d2 /* get thread_info pointer */ 74 movel %sp,%d2 /* get thread_info pointer */
75 andl #-THREAD_SIZE,%d2 /* at start of kernel stack */ 75 andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
76 movel %d2,%a0 76 movel %d2,%a0
77 movel %sp,%a0@(THREAD_ESP0) /* save top of frame */ 77 movel %a0@,%a1 /* save top of frame */
78 movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
78 btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 79 btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
79 bnes 1f 80 bnes 1f
80 81
@@ -83,6 +84,8 @@ ENTRY(system_call)
83 movel %d0,%sp@(PT_D0) /* save the return value */ 84 movel %d0,%sp@(PT_D0) /* save the return value */
84 jra ret_from_exception 85 jra ret_from_exception
851: 861:
87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */
88 movel %d2,PT_D0(%sp) /* on syscall entry */
86 subql #4,%sp 89 subql #4,%sp
87 SAVE_SWITCH_STACK 90 SAVE_SWITCH_STACK
88 jbsr syscall_trace 91 jbsr syscall_trace
diff --git a/arch/m68knommu/platform/5307/pit.c b/arch/m68knommu/platform/5307/pit.c
index f18352fa35a6..173b754d1cda 100644
--- a/arch/m68knommu/platform/5307/pit.c
+++ b/arch/m68knommu/platform/5307/pit.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <asm/machdep.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/coldfire.h> 22#include <asm/coldfire.h>
22#include <asm/mcfpit.h> 23#include <asm/mcfpit.h>
@@ -31,28 +32,30 @@
31 32
32/***************************************************************************/ 33/***************************************************************************/
33 34
34void coldfire_pit_tick(void) 35static irqreturn_t hw_tick(int irq, void *dummy)
35{ 36{
36 unsigned short pcsr; 37 unsigned short pcsr;
37 38
38 /* Reset the ColdFire timer */ 39 /* Reset the ColdFire timer */
39 pcsr = __raw_readw(TA(MCFPIT_PCSR)); 40 pcsr = __raw_readw(TA(MCFPIT_PCSR));
40 __raw_writew(pcsr | MCFPIT_PCSR_PIF, TA(MCFPIT_PCSR)); 41 __raw_writew(pcsr | MCFPIT_PCSR_PIF, TA(MCFPIT_PCSR));
42
43 return arch_timer_interrupt(irq, dummy);
41} 44}
42 45
43/***************************************************************************/ 46/***************************************************************************/
44 47
45static struct irqaction coldfire_pit_irq = { 48static struct irqaction coldfire_pit_irq = {
46 .name = "timer", 49 .name = "timer",
47 .flags = IRQF_DISABLED | IRQF_TIMER, 50 .flags = IRQF_DISABLED | IRQF_TIMER,
51 .handler = hw_tick,
48}; 52};
49 53
50void coldfire_pit_init(irq_handler_t handler) 54void hw_timer_init(void)
51{ 55{
52 volatile unsigned char *icrp; 56 volatile unsigned char *icrp;
53 volatile unsigned long *imrp; 57 volatile unsigned long *imrp;
54 58
55 coldfire_pit_irq.handler = handler;
56 setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &coldfire_pit_irq); 59 setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &coldfire_pit_irq);
57 60
58 icrp = (volatile unsigned char *) (MCF_IPSBAR + MCFICM_INTC0 + 61 icrp = (volatile unsigned char *) (MCF_IPSBAR + MCFICM_INTC0 +
@@ -71,7 +74,7 @@ void coldfire_pit_init(irq_handler_t handler)
71 74
72/***************************************************************************/ 75/***************************************************************************/
73 76
74unsigned long coldfire_pit_offset(void) 77unsigned long hw_timer_offset(void)
75{ 78{
76 volatile unsigned long *ipr; 79 volatile unsigned long *ipr;
77 unsigned long pmr, pcntr, offset; 80 unsigned long pmr, pcntr, offset;
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index 64bd0ff9029e..489dec85c859 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -9,10 +9,9 @@
9/***************************************************************************/ 9/***************************************************************************/
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/init.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/param.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/irq.h> 15#include <linux/irq.h>
17#include <asm/io.h> 16#include <asm/io.h>
18#include <asm/traps.h> 17#include <asm/traps.h>
@@ -54,24 +53,28 @@ extern int mcf_timerirqpending(int timer);
54 53
55/***************************************************************************/ 54/***************************************************************************/
56 55
57void coldfire_tick(void) 56static irqreturn_t hw_tick(int irq, void *dummy)
58{ 57{
59 /* Reset the ColdFire timer */ 58 /* Reset the ColdFire timer */
60 __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER)); 59 __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER));
60
61 return arch_timer_interrupt(irq, dummy);
61} 62}
62 63
63/***************************************************************************/ 64/***************************************************************************/
64 65
65static struct irqaction coldfire_timer_irq = { 66static struct irqaction coldfire_timer_irq = {
66 .name = "timer", 67 .name = "timer",
67 .flags = IRQF_DISABLED | IRQF_TIMER, 68 .flags = IRQF_DISABLED | IRQF_TIMER,
69 .handler = hw_tick,
68}; 70};
69 71
72/***************************************************************************/
73
70static int ticks_per_intr; 74static int ticks_per_intr;
71 75
72void coldfire_timer_init(irq_handler_t handler) 76void hw_timer_init(void)
73{ 77{
74 coldfire_timer_irq.handler = handler;
75 setup_irq(mcf_timervector, &coldfire_timer_irq); 78 setup_irq(mcf_timervector, &coldfire_timer_irq);
76 79
77 __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); 80 __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
@@ -89,7 +92,7 @@ void coldfire_timer_init(irq_handler_t handler)
89 92
90/***************************************************************************/ 93/***************************************************************************/
91 94
92unsigned long coldfire_timer_offset(void) 95unsigned long hw_timer_offset(void)
93{ 96{
94 unsigned long tcn, offset; 97 unsigned long tcn, offset;
95 98
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
index b32c6425f821..f77328b7b6db 100644
--- a/arch/m68knommu/platform/532x/config.c
+++ b/arch/m68knommu/platform/532x/config.c
@@ -18,25 +18,18 @@
18/***************************************************************************/ 18/***************************************************************************/
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/param.h> 21#include <linux/param.h>
23#include <linux/init.h> 22#include <linux/init.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
25#include <asm/irq.h>
26#include <asm/dma.h> 24#include <asm/dma.h>
27#include <asm/traps.h>
28#include <asm/machdep.h> 25#include <asm/machdep.h>
29#include <asm/coldfire.h> 26#include <asm/coldfire.h>
30#include <asm/mcftimer.h>
31#include <asm/mcfsim.h> 27#include <asm/mcfsim.h>
32#include <asm/mcfdma.h> 28#include <asm/mcfdma.h>
33#include <asm/mcfwdebug.h> 29#include <asm/mcfwdebug.h>
34 30
35/***************************************************************************/ 31/***************************************************************************/
36 32
37void coldfire_tick(void);
38void coldfire_timer_init(irq_handler_t handler);
39unsigned long coldfire_timer_offset(void);
40void coldfire_reset(void); 33void coldfire_reset(void);
41 34
42extern unsigned int mcf_timervector; 35extern unsigned int mcf_timervector;
@@ -104,9 +97,6 @@ void config_BSP(char *commandp, int size)
104 97
105 mcf_timervector = 64+32; 98 mcf_timervector = 64+32;
106 mcf_profilevector = 64+33; 99 mcf_profilevector = 64+33;
107 mach_sched_init = coldfire_timer_init;
108 mach_tick = coldfire_tick;
109 mach_gettimeoffset = coldfire_timer_offset;
110 mach_reset = coldfire_reset; 100 mach_reset = coldfire_reset;
111 101
112#ifdef MCF_BDM_DISABLE 102#ifdef MCF_BDM_DISABLE
diff --git a/arch/m68knommu/platform/5407/config.c b/arch/m68knommu/platform/5407/config.c
index e692536817d8..2d3b62eba7ca 100644
--- a/arch/m68knommu/platform/5407/config.c
+++ b/arch/m68knommu/platform/5407/config.c
@@ -10,24 +10,17 @@
10/***************************************************************************/ 10/***************************************************************************/
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/param.h> 13#include <linux/param.h>
15#include <linux/init.h> 14#include <linux/init.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <asm/irq.h>
18#include <asm/dma.h> 16#include <asm/dma.h>
19#include <asm/traps.h>
20#include <asm/machdep.h> 17#include <asm/machdep.h>
21#include <asm/coldfire.h> 18#include <asm/coldfire.h>
22#include <asm/mcftimer.h>
23#include <asm/mcfsim.h> 19#include <asm/mcfsim.h>
24#include <asm/mcfdma.h> 20#include <asm/mcfdma.h>
25 21
26/***************************************************************************/ 22/***************************************************************************/
27 23
28void coldfire_tick(void);
29void coldfire_timer_init(irq_handler_t handler);
30unsigned long coldfire_timer_offset(void);
31void coldfire_reset(void); 24void coldfire_reset(void);
32 25
33extern unsigned int mcf_timervector; 26extern unsigned int mcf_timervector;
@@ -108,9 +101,6 @@ void config_BSP(char *commandp, int size)
108 mcf_timerlevel = 6; 101 mcf_timerlevel = 6;
109#endif 102#endif
110 103
111 mach_sched_init = coldfire_timer_init;
112 mach_tick = coldfire_tick;
113 mach_gettimeoffset = coldfire_timer_offset;
114 mach_reset = coldfire_reset; 104 mach_reset = coldfire_reset;
115} 105}
116 106
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3ecff5e9e4f3..61262c5f9c62 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -66,6 +66,7 @@ config BCM47XX
66config MIPS_COBALT 66config MIPS_COBALT
67 bool "Cobalt Server" 67 bool "Cobalt Server"
68 select CEVT_R4K 68 select CEVT_R4K
69 select CEVT_GT641XX
69 select DMA_NONCOHERENT 70 select DMA_NONCOHERENT
70 select HW_HAS_PCI 71 select HW_HAS_PCI
71 select I8253 72 select I8253
@@ -729,6 +730,9 @@ config ARCH_MAY_HAVE_PC_FDC
729config BOOT_RAW 730config BOOT_RAW
730 bool 731 bool
731 732
733config CEVT_GT641XX
734 bool
735
732config CEVT_R4K 736config CEVT_R4K
733 bool 737 bool
734 738
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 3efe117721aa..fd7124c1b75a 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -6,18 +6,6 @@ config TRACE_IRQFLAGS_SUPPORT
6 6
7source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
8 8
9config CROSSCOMPILE
10 bool "Are you using a crosscompiler"
11 help
12 Say Y here if you are compiling the kernel on a different
13 architecture than the one it is intended to run on. This is just a
14 convenience option which will select the appropriate value for
15 the CROSS_COMPILE make variable which otherwise has to be passed on
16 the command line from mips-linux-, mipsel-linux-, mips64-linux- and
17 mips64el-linux- as appropriate for a particular kernel configuration.
18 You will have to pass the value for CROSS_COMPILE manually if the
19 name prefix for your tools is different.
20
21config CMDLINE 9config CMDLINE
22 string "Default kernel command string" 10 string "Default kernel command string"
23 default "" 11 default ""
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 14164c2b8791..23c17755eca0 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -18,15 +18,15 @@ cflags-y :=
18# Select the object file format to substitute into the linker script. 18# Select the object file format to substitute into the linker script.
19# 19#
20ifdef CONFIG_CPU_LITTLE_ENDIAN 20ifdef CONFIG_CPU_LITTLE_ENDIAN
2132bit-tool-prefix = mipsel-linux- 2132bit-tool-archpref = mipsel
2264bit-tool-prefix = mips64el-linux- 2264bit-tool-archpref = mips64el
2332bit-bfd = elf32-tradlittlemips 2332bit-bfd = elf32-tradlittlemips
2464bit-bfd = elf64-tradlittlemips 2464bit-bfd = elf64-tradlittlemips
2532bit-emul = elf32ltsmip 2532bit-emul = elf32ltsmip
2664bit-emul = elf64ltsmip 2664bit-emul = elf64ltsmip
27else 27else
2832bit-tool-prefix = mips-linux- 2832bit-tool-archpref = mips
2964bit-tool-prefix = mips64-linux- 2964bit-tool-archpref = mips64
3032bit-bfd = elf32-tradbigmips 3032bit-bfd = elf32-tradbigmips
3164bit-bfd = elf64-tradbigmips 3164bit-bfd = elf64-tradbigmips
3232bit-emul = elf32btsmip 3232bit-emul = elf32btsmip
@@ -34,16 +34,18 @@ else
34endif 34endif
35 35
36ifdef CONFIG_32BIT 36ifdef CONFIG_32BIT
37tool-prefix = $(32bit-tool-prefix) 37tool-archpref = $(32bit-tool-archpref)
38UTS_MACHINE := mips 38UTS_MACHINE := mips
39endif 39endif
40ifdef CONFIG_64BIT 40ifdef CONFIG_64BIT
41tool-prefix = $(64bit-tool-prefix) 41tool-archpref = $(64bit-tool-archpref)
42UTS_MACHINE := mips64 42UTS_MACHINE := mips64
43endif 43endif
44 44
45ifdef CONFIG_CROSSCOMPILE 45ifneq ($(SUBARCH),$(ARCH))
46CROSS_COMPILE := $(tool-prefix) 46 ifeq ($(CROSS_COMPILE),)
47 CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-gnu-linux- $(tool-archpref)-unknown-gnu-linux-)
48 endif
47endif 49endif
48 50
49ifdef CONFIG_32BIT 51ifdef CONFIG_32BIT
diff --git a/arch/mips/cobalt/Makefile b/arch/mips/cobalt/Makefile
index 6b83f4ddc8fc..d73833b7c781 100644
--- a/arch/mips/cobalt/Makefile
+++ b/arch/mips/cobalt/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the Cobalt micro systems family specific parts of the kernel 2# Makefile for the Cobalt micro systems family specific parts of the kernel
3# 3#
4 4
5obj-y := buttons.o irq.o led.o reset.o rtc.o serial.o setup.o 5obj-y := buttons.o irq.o led.o reset.o rtc.o serial.o setup.o time.o
6 6
7obj-$(CONFIG_PCI) += pci.o 7obj-$(CONFIG_PCI) += pci.o
8obj-$(CONFIG_EARLY_PRINTK) += console.o 8obj-$(CONFIG_EARLY_PRINTK) += console.o
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index d11bb1bc7b6b..dd23beb8604f 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -9,19 +9,17 @@
9 * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv) 9 * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
10 * 10 *
11 */ 11 */
12#include <linux/interrupt.h>
13#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/ioport.h>
14#include <linux/pm.h> 16#include <linux/pm.h>
15 17
16#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
17#include <asm/time.h>
18#include <asm/i8253.h>
19#include <asm/io.h>
20#include <asm/reboot.h> 19#include <asm/reboot.h>
21#include <asm/gt64120.h> 20#include <asm/gt64120.h>
22 21
23#include <cobalt.h> 22#include <cobalt.h>
24#include <irq.h>
25 23
26extern void cobalt_machine_restart(char *command); 24extern void cobalt_machine_restart(char *command);
27extern void cobalt_machine_halt(void); 25extern void cobalt_machine_halt(void);
@@ -41,17 +39,6 @@ const char *get_system_type(void)
41 return "MIPS Cobalt"; 39 return "MIPS Cobalt";
42} 40}
43 41
44void __init plat_timer_setup(struct irqaction *irq)
45{
46 /* Load timer value for HZ (TCLK is 50MHz) */
47 GT_WRITE(GT_TC0_OFS, 50*1000*1000 / HZ);
48
49 /* Enable timer0 */
50 GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
51
52 setup_irq(GT641XX_TIMER0_IRQ, irq);
53}
54
55/* 42/*
56 * Cobalt doesn't have PS/2 keyboard/mouse interfaces, 43 * Cobalt doesn't have PS/2 keyboard/mouse interfaces,
57 * keyboard conntroller is never used. 44 * keyboard conntroller is never used.
@@ -84,11 +71,6 @@ static struct resource cobalt_reserved_resources[] = {
84 }, 71 },
85}; 72};
86 73
87void __init plat_time_init(void)
88{
89 setup_pit_timer();
90}
91
92void __init plat_mem_setup(void) 74void __init plat_mem_setup(void)
93{ 75{
94 int i; 76 int i;
diff --git a/arch/mips/cobalt/time.c b/arch/mips/cobalt/time.c
new file mode 100644
index 000000000000..fa819fccd5db
--- /dev/null
+++ b/arch/mips/cobalt/time.c
@@ -0,0 +1,35 @@
1/*
2 * Cobalt time initialization.
3 *
4 * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/init.h>
21
22#include <asm/gt64120.h>
23#include <asm/i8253.h>
24#include <asm/time.h>
25
26#define GT641XX_BASE_CLOCK 50000000 /* 50MHz */
27
28void __init plat_time_init(void)
29{
30 setup_pit_timer();
31
32 gt641xx_set_base_clock(GT641XX_BASE_CLOCK);
33
34 mips_timer_state = gt641xx_timer0_state;
35}
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index a3afa39faae5..d7745c8976f6 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -9,6 +9,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
9 time.o topology.o traps.o unaligned.o 9 time.o topology.o traps.o unaligned.o
10 10
11obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 11obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
12obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
12 13
13binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ 14binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
14 irix5sys.o sysirix.o 15 irix5sys.o sysirix.o
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
new file mode 100644
index 000000000000..4c651b2680f9
--- /dev/null
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -0,0 +1,144 @@
1/*
2 * GT641xx clockevent routines.
3 *
4 * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/clockchips.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24
25#include <asm/gt64120.h>
26#include <asm/time.h>
27
28#include <irq.h>
29
30static DEFINE_SPINLOCK(gt641xx_timer_lock);
31static unsigned int gt641xx_base_clock;
32
33void gt641xx_set_base_clock(unsigned int clock)
34{
35 gt641xx_base_clock = clock;
36}
37
38int gt641xx_timer0_state(void)
39{
40 if (GT_READ(GT_TC0_OFS))
41 return 0;
42
43 GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
44 GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
45
46 return 1;
47}
48
49static int gt641xx_timer0_set_next_event(unsigned long delta,
50 struct clock_event_device *evt)
51{
52 unsigned long flags;
53 u32 ctrl;
54
55 spin_lock_irqsave(&gt641xx_timer_lock, flags);
56
57 ctrl = GT_READ(GT_TC_CONTROL_OFS);
58 ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
59 ctrl |= GT_TC_CONTROL_ENTC0_MSK;
60
61 GT_WRITE(GT_TC0_OFS, delta);
62 GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
63
64 spin_unlock_irqrestore(&gt641xx_timer_lock, flags);
65
66 return 0;
67}
68
69static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
70 struct clock_event_device *evt)
71{
72 unsigned long flags;
73 u32 ctrl;
74
75 spin_lock_irqsave(&gt641xx_timer_lock, flags);
76
77 ctrl = GT_READ(GT_TC_CONTROL_OFS);
78 ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
79
80 switch (mode) {
81 case CLOCK_EVT_MODE_PERIODIC:
82 ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
83 break;
84 case CLOCK_EVT_MODE_ONESHOT:
85 ctrl |= GT_TC_CONTROL_ENTC0_MSK;
86 break;
87 default:
88 break;
89 }
90
91 GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
92
93 spin_unlock_irqrestore(&gt641xx_timer_lock, flags);
94}
95
96static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
97{
98}
99
100static struct clock_event_device gt641xx_timer0_clockevent = {
101 .name = "gt641xx-timer0",
102 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
103 .cpumask = CPU_MASK_CPU0,
104 .irq = GT641XX_TIMER0_IRQ,
105 .set_next_event = gt641xx_timer0_set_next_event,
106 .set_mode = gt641xx_timer0_set_mode,
107 .event_handler = gt641xx_timer0_event_handler,
108};
109
110static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
111{
112 struct clock_event_device *cd = &gt641xx_timer0_clockevent;
113
114 cd->event_handler(cd);
115
116 return IRQ_HANDLED;
117}
118
119static struct irqaction gt641xx_timer0_irqaction = {
120 .handler = gt641xx_timer0_interrupt,
121 .flags = IRQF_DISABLED | IRQF_PERCPU,
122 .name = "gt641xx_timer0",
123};
124
125static int __init gt641xx_timer0_clockevent_init(void)
126{
127 struct clock_event_device *cd;
128
129 if (!gt641xx_base_clock)
130 return 0;
131
132 GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
133
134 cd = &gt641xx_timer0_clockevent;
135 cd->rating = 200 + gt641xx_base_clock / 10000000;
136 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
137 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
138 clockevent_set_clock(cd, gt641xx_base_clock);
139
140 clockevents_register_device(&gt641xx_timer0_clockevent);
141
142 return setup_irq(GT641XX_TIMER0_IRQ, &gt641xx_timer0_irqaction);
143}
144arch_initcall(gt641xx_timer0_clockevent_init);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index a915e5693421..ae2984fff580 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -186,7 +186,7 @@ static int c0_compare_int_usable(void)
186 * IP7 already pending? Try to clear it by acking the timer. 186 * IP7 already pending? Try to clear it by acking the timer.
187 */ 187 */
188 if (c0_compare_int_pending()) { 188 if (c0_compare_int_pending()) {
189 write_c0_compare(read_c0_compare()); 189 write_c0_compare(read_c0_count());
190 irq_disable_hazard(); 190 irq_disable_hazard();
191 if (c0_compare_int_pending()) 191 if (c0_compare_int_pending())
192 return 0; 192 return 0;
@@ -202,7 +202,7 @@ static int c0_compare_int_usable(void)
202 if (!c0_compare_int_pending()) 202 if (!c0_compare_int_pending())
203 return 0; 203 return 0;
204 204
205 write_c0_compare(read_c0_compare()); 205 write_c0_compare(read_c0_count());
206 irq_disable_hazard(); 206 irq_disable_hazard();
207 if (c0_compare_int_pending()) 207 if (c0_compare_int_pending())
208 return 0; 208 return 0;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index c4e6866d5cbc..6c6849a8f136 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -195,8 +195,8 @@ void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
195 195
196 /* Find a shift value */ 196 /* Find a shift value */
197 for (shift = 32; shift > 0; shift--) { 197 for (shift = 32; shift > 0; shift--) {
198 temp = (u64) NSEC_PER_SEC << shift; 198 temp = (u64) clock << shift;
199 do_div(temp, clock); 199 do_div(temp, NSEC_PER_SEC);
200 if ((temp >> 32) == 0) 200 if ((temp >> 32) == 0)
201 break; 201 break;
202 } 202 }
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index 1d00b778ff1e..9d6243a8c15a 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -147,21 +147,8 @@ void __init plat_time_init(void)
147#endif 147#endif
148} 148}
149 149
150//static irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
151//{
152// return perf_irq();
153//}
154
155//static struct irqaction perf_irqaction = {
156// .handler = mips_perf_interrupt,
157// .flags = IRQF_DISABLED | IRQF_PERCPU,
158// .name = "performance",
159//};
160
161void __init plat_perf_setup(void) 150void __init plat_perf_setup(void)
162{ 151{
163// struct irqaction *irq = &perf_irqaction;
164
165 cp0_perfcount_irq = -1; 152 cp0_perfcount_irq = -1;
166 153
167#ifdef MSC01E_INT_BASE 154#ifdef MSC01E_INT_BASE
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 98b5e5bac02e..b1b40527658b 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -13,6 +13,7 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/scatterlist.h>
16 17
17#include <asm/cache.h> 18#include <asm/cache.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -165,12 +166,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
165 for (i = 0; i < nents; i++, sg++) { 166 for (i = 0; i < nents; i++, sg++) {
166 unsigned long addr; 167 unsigned long addr;
167 168
168 addr = (unsigned long) page_address(sg->page); 169 addr = (unsigned long) sg_virt(sg);
169 if (!plat_device_is_coherent(dev) && addr) 170 if (!plat_device_is_coherent(dev) && addr)
170 __dma_sync(addr + sg->offset, sg->length, direction); 171 __dma_sync(addr, sg->length, direction);
171 sg->dma_address = plat_map_dma_mem(dev, 172 sg->dma_address = plat_map_dma_mem(dev,
172 (void *)(addr + sg->offset), 173 (void *)addr, sg->length);
173 sg->length);
174 } 174 }
175 175
176 return nents; 176 return nents;
@@ -223,10 +223,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
223 for (i = 0; i < nhwentries; i++, sg++) { 223 for (i = 0; i < nhwentries; i++, sg++) {
224 if (!plat_device_is_coherent(dev) && 224 if (!plat_device_is_coherent(dev) &&
225 direction != DMA_TO_DEVICE) { 225 direction != DMA_TO_DEVICE) {
226 addr = (unsigned long) page_address(sg->page); 226 addr = (unsigned long) sg_virt(sg);
227 if (addr) 227 if (addr)
228 __dma_sync(addr + sg->offset, sg->length, 228 __dma_sync(addr, sg->length, direction);
229 direction);
230 } 229 }
231 plat_unmap_dma_mem(sg->dma_address); 230 plat_unmap_dma_mem(sg->dma_address);
232 } 231 }
@@ -304,7 +303,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
304 /* Make sure that gcc doesn't leave the empty loop body. */ 303 /* Make sure that gcc doesn't leave the empty loop body. */
305 for (i = 0; i < nelems; i++, sg++) { 304 for (i = 0; i < nelems; i++, sg++) {
306 if (cpu_is_noncoherent_r10000(dev)) 305 if (cpu_is_noncoherent_r10000(dev))
307 __dma_sync((unsigned long)page_address(sg->page), 306 __dma_sync((unsigned long)page_address(sg_page(sg)),
308 sg->length, direction); 307 sg->length, direction);
309 plat_unmap_dma_mem(sg->dma_address); 308 plat_unmap_dma_mem(sg->dma_address);
310 } 309 }
@@ -322,7 +321,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
322 /* Make sure that gcc doesn't leave the empty loop body. */ 321 /* Make sure that gcc doesn't leave the empty loop body. */
323 for (i = 0; i < nelems; i++, sg++) { 322 for (i = 0; i < nelems; i++, sg++) {
324 if (!plat_device_is_coherent(dev)) 323 if (!plat_device_is_coherent(dev))
325 __dma_sync((unsigned long)page_address(sg->page), 324 __dma_sync((unsigned long)page_address(sg_page(sg)),
326 sg->length, direction); 325 sg->length, direction);
327 plat_unmap_dma_mem(sg->dma_address); 326 plat_unmap_dma_mem(sg->dma_address);
328 } 327 }
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 681b593071cb..3305fa9ae66d 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -110,7 +110,7 @@ static void __init per_hub_init(cnodeid_t cnode)
110 } 110 }
111} 111}
112 112
113void __init per_cpu_init(void) 113void __cpuinit per_cpu_init(void)
114{ 114{
115 int cpu = smp_processor_id(); 115 int cpu = smp_processor_id();
116 int slice = LOCAL_HUB_L(PI_CPU_NUM); 116 int slice = LOCAL_HUB_L(PI_CPU_NUM);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index d467bf4f6c3f..f5dccf01da11 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -111,8 +111,24 @@ unsigned long read_persistent_clock(void)
111 return mktime(year, month, date, hour, min, sec); 111 return mktime(year, month, date, hour, min, sec);
112} 112}
113 113
114static int rt_set_next_event(unsigned long delta, 114static void enable_rt_irq(unsigned int irq)
115 struct clock_event_device *evt) 115{
116}
117
118static void disable_rt_irq(unsigned int irq)
119{
120}
121
122static struct irq_chip rt_irq_type = {
123 .name = "SN HUB RT timer",
124 .ack = disable_rt_irq,
125 .mask = disable_rt_irq,
126 .mask_ack = disable_rt_irq,
127 .unmask = enable_rt_irq,
128 .eoi = enable_rt_irq,
129};
130
131static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
116{ 132{
117 unsigned int cpu = smp_processor_id(); 133 unsigned int cpu = smp_processor_id();
118 int slice = cputoslice(cpu) == 0; 134 int slice = cputoslice(cpu) == 0;
@@ -129,50 +145,24 @@ static void rt_set_mode(enum clock_event_mode mode,
129 struct clock_event_device *evt) 145 struct clock_event_device *evt)
130{ 146{
131 switch (mode) { 147 switch (mode) {
132 case CLOCK_EVT_MODE_PERIODIC: 148 case CLOCK_EVT_MODE_ONESHOT:
133 /* The only mode supported */ 149 /* The only mode supported */
134 break; 150 break;
135 151
152 case CLOCK_EVT_MODE_PERIODIC:
136 case CLOCK_EVT_MODE_UNUSED: 153 case CLOCK_EVT_MODE_UNUSED:
137 case CLOCK_EVT_MODE_SHUTDOWN: 154 case CLOCK_EVT_MODE_SHUTDOWN:
138 case CLOCK_EVT_MODE_ONESHOT:
139 case CLOCK_EVT_MODE_RESUME: 155 case CLOCK_EVT_MODE_RESUME:
140 /* Nothing to do */ 156 /* Nothing to do */
141 break; 157 break;
142 } 158 }
143} 159}
144 160
145struct clock_event_device rt_clock_event_device = {
146 .name = "HUB-RT",
147 .features = CLOCK_EVT_FEAT_ONESHOT,
148
149 .rating = 300,
150 .set_next_event = rt_set_next_event,
151 .set_mode = rt_set_mode,
152};
153
154static void enable_rt_irq(unsigned int irq)
155{
156}
157
158static void disable_rt_irq(unsigned int irq)
159{
160}
161
162static struct irq_chip rt_irq_type = {
163 .name = "SN HUB RT timer",
164 .ack = disable_rt_irq,
165 .mask = disable_rt_irq,
166 .mask_ack = disable_rt_irq,
167 .unmask = enable_rt_irq,
168 .eoi = enable_rt_irq,
169};
170
171unsigned int rt_timer_irq; 161unsigned int rt_timer_irq;
172 162
173static irqreturn_t ip27_rt_timer_interrupt(int irq, void *dev_id) 163static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
174{ 164{
175 struct clock_event_device *cd = &rt_clock_event_device; 165 struct clock_event_device *cd = dev_id;
176 unsigned int cpu = smp_processor_id(); 166 unsigned int cpu = smp_processor_id();
177 int slice = cputoslice(cpu) == 0; 167 int slice = cputoslice(cpu) == 0;
178 168
@@ -182,11 +172,10 @@ static irqreturn_t ip27_rt_timer_interrupt(int irq, void *dev_id)
182 return IRQ_HANDLED; 172 return IRQ_HANDLED;
183} 173}
184 174
185static struct irqaction rt_irqaction = { 175struct irqaction hub_rt_irqaction = {
186 .handler = (irq_handler_t) ip27_rt_timer_interrupt, 176 .handler = hub_rt_counter_handler,
187 .flags = IRQF_DISABLED, 177 .flags = IRQF_DISABLED | IRQF_PERCPU,
188 .mask = CPU_MASK_NONE, 178 .name = "hub-rt",
189 .name = "timer"
190}; 179};
191 180
192/* 181/*
@@ -200,32 +189,48 @@ static struct irqaction rt_irqaction = {
200#define NSEC_PER_CYCLE 800 189#define NSEC_PER_CYCLE 800
201#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE) 190#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
202 191
203static void __init ip27_rt_clock_event_init(void) 192static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
193static DEFINE_PER_CPU(char [11], hub_rt_name);
194
195static void __cpuinit hub_rt_clock_event_init(void)
204{ 196{
205 struct clock_event_device *cd = &rt_clock_event_device;
206 unsigned int cpu = smp_processor_id(); 197 unsigned int cpu = smp_processor_id();
207 int irq = allocate_irqno(); 198 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
208 199 unsigned char *name = per_cpu(hub_rt_name, cpu);
209 if (irq < 0) 200 int irq = rt_timer_irq;
210 panic("Can't allocate interrupt number for timer interrupt"); 201
211 202 sprintf(name, "hub-rt %d", cpu);
212 rt_timer_irq = irq; 203 cd->name = "HUB-RT",
213 204 cd->features = CLOCK_EVT_FEAT_ONESHOT,
205 clockevent_set_clock(cd, CYCLES_PER_SEC);
206 cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
207 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
208 cd->rating = 200,
214 cd->irq = irq, 209 cd->irq = irq,
215 cd->cpumask = cpumask_of_cpu(cpu), 210 cd->cpumask = cpumask_of_cpu(cpu),
216 211 cd->rating = 300,
217 /* 212 cd->set_next_event = rt_next_event,
218 * Calculate the min / max delta 213 cd->set_mode = rt_set_mode,
219 */
220 cd->mult =
221 div_sc((unsigned long) CYCLES_PER_SEC, NSEC_PER_SEC, 32);
222 cd->shift = 32;
223 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
224 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
225 clockevents_register_device(cd); 214 clockevents_register_device(cd);
215}
216
217static void __init hub_rt_clock_event_global_init(void)
218{
219 unsigned int irq;
220
221 do {
222 smp_wmb();
223 irq = rt_timer_irq;
224 if (irq)
225 break;
226
227 irq = allocate_irqno();
228 if (irq < 0)
229 panic("Allocation of irq number for timer failed");
230 } while (xchg(&rt_timer_irq, irq));
226 231
227 set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); 232 set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
228 setup_irq(irq, &rt_irqaction); 233 setup_irq(irq, &hub_rt_irqaction);
229} 234}
230 235
231static cycle_t hub_rt_read(void) 236static cycle_t hub_rt_read(void)
@@ -233,27 +238,29 @@ static cycle_t hub_rt_read(void)
233 return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); 238 return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
234} 239}
235 240
236struct clocksource ht_rt_clocksource = { 241struct clocksource hub_rt_clocksource = {
237 .name = "HUB-RT", 242 .name = "HUB-RT",
238 .rating = 200, 243 .rating = 200,
239 .read = hub_rt_read, 244 .read = hub_rt_read,
240 .mask = CLOCKSOURCE_MASK(52), 245 .mask = CLOCKSOURCE_MASK(52),
241 .shift = 32,
242 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 246 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
243}; 247};
244 248
245static void __init ip27_rt_clocksource_init(void) 249static void __init hub_rt_clocksource_init(void)
246{ 250{
247 clocksource_register(&ht_rt_clocksource); 251 struct clocksource *cs = &hub_rt_clocksource;
252
253 clocksource_set_clock(cs, CYCLES_PER_SEC);
254 clocksource_register(cs);
248} 255}
249 256
250void __init plat_time_init(void) 257void __init plat_time_init(void)
251{ 258{
252 ip27_rt_clock_event_init(); 259 hub_rt_clocksource_init();
253 ip27_rt_clocksource_init(); 260 hub_rt_clock_event_global_init();
254} 261}
255 262
256void __init cpu_time_init(void) 263void __cpuinit cpu_time_init(void)
257{ 264{
258 lboard_t *board; 265 lboard_t *board;
259 klcpu_t *cpu; 266 klcpu_t *cpu;
@@ -271,6 +278,7 @@ void __init cpu_time_init(void)
271 278
272 printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed); 279 printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
273 280
281 hub_rt_clock_event_init();
274 set_c0_status(SRB_TIMOCLK); 282 set_c0_status(SRB_TIMOCLK);
275} 283}
276 284
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 7aa79bf63c4a..10299bafeab7 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -452,6 +452,43 @@ static void bcm1480_kgdb_interrupt(void)
452 452
453extern void bcm1480_mailbox_interrupt(void); 453extern void bcm1480_mailbox_interrupt(void);
454 454
455static inline void dispatch_ip4(void)
456{
457 int cpu = smp_processor_id();
458 int irq = K_BCM1480_INT_TIMER_0 + cpu;
459
460 /* Reset the timer */
461 __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
462 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
463
464 do_IRQ(irq);
465}
466
467static inline void dispatch_ip2(void)
468{
469 unsigned long long mask_h, mask_l;
470 unsigned int cpu = smp_processor_id();
471 unsigned long base;
472
473 /*
474 * Default...we've hit an IP[2] interrupt, which means we've got to
475 * check the 1480 interrupt registers to figure out what to do. Need
476 * to detect which CPU we're on, now that smp_affinity is supported.
477 */
478 base = A_BCM1480_IMR_MAPPER(cpu);
479 mask_h = __raw_readq(
480 IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
481 mask_l = __raw_readq(
482 IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));
483
484 if (mask_h) {
485 if (mask_h ^ 1)
486 do_IRQ(fls64(mask_h) - 1);
487 else if (mask_l)
488 do_IRQ(63 + fls64(mask_l));
489 }
490}
491
455asmlinkage void plat_irq_dispatch(void) 492asmlinkage void plat_irq_dispatch(void)
456{ 493{
457 unsigned int pending; 494 unsigned int pending;
@@ -469,17 +506,8 @@ asmlinkage void plat_irq_dispatch(void)
469 else 506 else
470#endif 507#endif
471 508
472 if (pending & CAUSEF_IP4) { 509 if (pending & CAUSEF_IP4)
473 int cpu = smp_processor_id(); 510 dispatch_ip4();
474 int irq = K_BCM1480_INT_TIMER_0 + cpu;
475
476 /* Reset the timer */
477 __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
478 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
479
480 do_IRQ(irq);
481 }
482
483#ifdef CONFIG_SMP 511#ifdef CONFIG_SMP
484 else if (pending & CAUSEF_IP3) 512 else if (pending & CAUSEF_IP3)
485 bcm1480_mailbox_interrupt(); 513 bcm1480_mailbox_interrupt();
@@ -490,27 +518,6 @@ asmlinkage void plat_irq_dispatch(void)
490 bcm1480_kgdb_interrupt(); /* KGDB (uart 1) */ 518 bcm1480_kgdb_interrupt(); /* KGDB (uart 1) */
491#endif 519#endif
492 520
493 else if (pending & CAUSEF_IP2) { 521 else if (pending & CAUSEF_IP2)
494 unsigned long long mask_h, mask_l; 522 dispatch_ip2();
495 unsigned long base;
496
497 /*
498 * Default...we've hit an IP[2] interrupt, which means we've
499 * got to check the 1480 interrupt registers to figure out what
500 * to do. Need to detect which CPU we're on, now that
501 * smp_affinity is supported.
502 */
503 base = A_BCM1480_IMR_MAPPER(smp_processor_id());
504 mask_h = __raw_readq(
505 IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
506 mask_l = __raw_readq(
507 IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));
508
509 if (mask_h) {
510 if (mask_h ^ 1)
511 do_IRQ(fls64(mask_h) - 1);
512 else
513 do_IRQ(63 + fls64(mask_l));
514 }
515 }
516} 523}
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 02b266a31c46..436ba78359ab 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -58,7 +58,7 @@ static void *mailbox_0_regs[] = {
58/* 58/*
59 * SMP init and finish on secondary CPUs 59 * SMP init and finish on secondary CPUs
60 */ 60 */
61void bcm1480_smp_init(void) 61void __cpuinit bcm1480_smp_init(void)
62{ 62{
63 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | 63 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
64 STATUSF_IP1 | STATUSF_IP0; 64 STATUSF_IP1 | STATUSF_IP0;
@@ -67,7 +67,7 @@ void bcm1480_smp_init(void)
67 change_c0_status(ST0_IM, imask); 67 change_c0_status(ST0_IM, imask);
68} 68}
69 69
70void bcm1480_smp_finish(void) 70void __cpuinit bcm1480_smp_finish(void)
71{ 71{
72 extern void sb1480_clockevent_init(void); 72 extern void sb1480_clockevent_init(void);
73 73
diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c
index c730744aa474..610f0253954d 100644
--- a/arch/mips/sibyte/bcm1480/time.c
+++ b/arch/mips/sibyte/bcm1480/time.c
@@ -15,22 +15,12 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */ 17 */
18
19/*
20 * These are routines to set up and handle interrupts from the
21 * bcm1480 general purpose timer 0. We're using the timer as a
22 * system clock, so we set it up to run at 100 Hz. On every
23 * interrupt, we update our idea of what the time of day is,
24 * then call do_timer() in the architecture-independent kernel
25 * code to do general bookkeeping (e.g. update jiffies, run
26 * bottom halves, etc.)
27 */
28#include <linux/clockchips.h> 18#include <linux/clockchips.h>
29#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/irq.h>
30#include <linux/percpu.h> 21#include <linux/percpu.h>
31#include <linux/spinlock.h> 22#include <linux/spinlock.h>
32 23
33#include <asm/irq.h>
34#include <asm/addrspace.h> 24#include <asm/addrspace.h>
35#include <asm/time.h> 25#include <asm/time.h>
36#include <asm/io.h> 26#include <asm/io.h>
@@ -47,33 +37,10 @@
47#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1 37#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
48#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2 38#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
49 39
50#ifdef CONFIG_SIMULATION
51#define BCM1480_HPT_VALUE 50000
52#else
53#define BCM1480_HPT_VALUE 1000000
54#endif
55
56extern int bcm1480_steal_irq(int irq); 40extern int bcm1480_steal_irq(int irq);
57 41
58void __init plat_time_init(void)
59{
60 unsigned int cpu = smp_processor_id();
61 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
62
63 BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
64
65 bcm1480_mask_irq(cpu, irq);
66
67 /* Map the timer interrupt to ip[4] of this cpu */
68 __raw_writeq(IMR_IP4_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H)
69 + (irq<<3)));
70
71 bcm1480_unmask_irq(cpu, irq);
72 bcm1480_steal_irq(irq);
73}
74
75/* 42/*
76 * The general purpose timer ticks at 1 Mhz independent if 43 * The general purpose timer ticks at 1MHz independent if
77 * the rest of the system 44 * the rest of the system
78 */ 45 */
79static void sibyte_set_mode(enum clock_event_mode mode, 46static void sibyte_set_mode(enum clock_event_mode mode,
@@ -88,7 +55,7 @@ static void sibyte_set_mode(enum clock_event_mode mode,
88 switch (mode) { 55 switch (mode) {
89 case CLOCK_EVT_MODE_PERIODIC: 56 case CLOCK_EVT_MODE_PERIODIC:
90 __raw_writeq(0, timer_cfg); 57 __raw_writeq(0, timer_cfg);
91 __raw_writeq(BCM1480_HPT_VALUE / HZ - 1, timer_init); 58 __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, timer_init);
92 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, 59 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
93 timer_cfg); 60 timer_cfg);
94 break; 61 break;
@@ -121,80 +88,96 @@ static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
121 return res; 88 return res;
122} 89}
123 90
124static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
125
126static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) 91static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
127{ 92{
128 unsigned int cpu = smp_processor_id(); 93 unsigned int cpu = smp_processor_id();
129 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); 94 struct clock_event_device *cd = dev_id;
95 void __iomem *timer_cfg;
96
97 timer_cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
130 98
131 /* Reset the timer */ 99 /* Reset the timer */
132 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, 100 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
133 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); 101 timer_cfg);
134 cd->event_handler(cd); 102 cd->event_handler(cd);
135 103
136 return IRQ_HANDLED; 104 return IRQ_HANDLED;
137} 105}
138 106
139static struct irqaction sibyte_counter_irqaction = { 107static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
140 .handler = sibyte_counter_handler, 108static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
141 .flags = IRQF_DISABLED | IRQF_PERCPU, 109static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
142 .name = "timer",
143};
144 110
145/*
146 * This interrupt is "special" in that it doesn't use the request_irq
147 * way to hook the irq line. The timer interrupt is initialized early
148 * enough to make this a major pain, and it's also firing enough to
149 * warrant a bit of special case code. bcm1480_timer_interrupt is
150 * called directly from irq_handler.S when IP[4] is set during an
151 * interrupt
152 */
153void __cpuinit sb1480_clockevent_init(void) 111void __cpuinit sb1480_clockevent_init(void)
154{ 112{
155 unsigned int cpu = smp_processor_id(); 113 unsigned int cpu = smp_processor_id();
156 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; 114 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
115 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
157 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); 116 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
117 unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
118
119 BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
158 120
159 cd->name = "bcm1480-counter"; 121 sprintf(name, "bcm1480-counter %d", cpu);
122 cd->name = name;
160 cd->features = CLOCK_EVT_FEAT_PERIODIC | 123 cd->features = CLOCK_EVT_FEAT_PERIODIC |
161 CLOCK_EVT_MODE_ONESHOT; 124 CLOCK_EVT_MODE_ONESHOT;
125 clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
126 cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
127 cd->min_delta_ns = clockevent_delta2ns(1, cd);
128 cd->rating = 200;
129 cd->irq = irq;
130 cd->cpumask = cpumask_of_cpu(cpu);
162 cd->set_next_event = sibyte_next_event; 131 cd->set_next_event = sibyte_next_event;
163 cd->set_mode = sibyte_set_mode; 132 cd->set_mode = sibyte_set_mode;
164 cd->irq = irq; 133 clockevents_register_device(cd);
165 clockevent_set_clock(cd, BCM1480_HPT_VALUE); 134
135 bcm1480_mask_irq(cpu, irq);
136
137 /*
138 * Map timer interrupt to IP[4] of this cpu
139 */
140 __raw_writeq(IMR_IP4_VAL,
141 IOADDR(A_BCM1480_IMR_REGISTER(cpu,
142 R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
166 143
167 setup_irq(irq, &sibyte_counter_irqaction); 144 bcm1480_unmask_irq(cpu, irq);
145 bcm1480_steal_irq(irq);
146
147 action->handler = sibyte_counter_handler;
148 action->flags = IRQF_DISABLED | IRQF_PERCPU;
149 action->name = name;
150 action->dev_id = cd;
151 setup_irq(irq, action);
168} 152}
169 153
170static cycle_t bcm1480_hpt_read(void) 154static cycle_t bcm1480_hpt_read(void)
171{ 155{
172 /* We assume this function is called xtime_lock held. */ 156 return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
173 unsigned long count =
174 __raw_readq(IOADDR(A_SCD_TIMER_REGISTER(0, R_SCD_TIMER_CNT)));
175 return (jiffies + 1) * (BCM1480_HPT_VALUE / HZ) - count;
176} 157}
177 158
178struct clocksource bcm1480_clocksource = { 159struct clocksource bcm1480_clocksource = {
179 .name = "MIPS", 160 .name = "zbbus-cycles",
180 .rating = 200, 161 .rating = 200,
181 .read = bcm1480_hpt_read, 162 .read = bcm1480_hpt_read,
182 .mask = CLOCKSOURCE_MASK(32), 163 .mask = CLOCKSOURCE_MASK(64),
183 .shift = 32,
184 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 164 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
185}; 165};
186 166
187void __init sb1480_clocksource_init(void) 167void __init sb1480_clocksource_init(void)
188{ 168{
189 struct clocksource *cs = &bcm1480_clocksource; 169 struct clocksource *cs = &bcm1480_clocksource;
170 unsigned int plldiv;
171 unsigned long zbbus;
190 172
191 clocksource_set_clock(cs, BCM1480_HPT_VALUE); 173 plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
174 zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
175 clocksource_set_clock(cs, zbbus);
192 clocksource_register(cs); 176 clocksource_register(cs);
193} 177}
194 178
195void __init bcm1480_hpt_setup(void) 179void __init plat_time_init(void)
196{ 180{
197 mips_hpt_frequency = BCM1480_HPT_VALUE;
198 sb1480_clocksource_init(); 181 sb1480_clocksource_init();
199 sb1480_clockevent_init(); 182 sb1480_clockevent_init();
200} 183}
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 500d17e84c09..53780a179d1d 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -402,6 +402,22 @@ static void sb1250_kgdb_interrupt(void)
402 402
403extern void sb1250_mailbox_interrupt(void); 403extern void sb1250_mailbox_interrupt(void);
404 404
405static inline void dispatch_ip2(void)
406{
407 unsigned int cpu = smp_processor_id();
408 unsigned long long mask;
409
410 /*
411 * Default...we've hit an IP[2] interrupt, which means we've got to
412 * check the 1250 interrupt registers to figure out what to do. Need
413 * to detect which CPU we're on, now that smp_affinity is supported.
414 */
415 mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
416 R_IMR_INTERRUPT_STATUS_BASE)));
417 if (mask)
418 do_IRQ(fls64(mask) - 1);
419}
420
405asmlinkage void plat_irq_dispatch(void) 421asmlinkage void plat_irq_dispatch(void)
406{ 422{
407 unsigned int cpu = smp_processor_id(); 423 unsigned int cpu = smp_processor_id();
@@ -434,21 +450,8 @@ asmlinkage void plat_irq_dispatch(void)
434 sb1250_kgdb_interrupt(); 450 sb1250_kgdb_interrupt();
435#endif 451#endif
436 452
437 else if (pending & CAUSEF_IP2) { 453 else if (pending & CAUSEF_IP2)
438 unsigned long long mask; 454 dispatch_ip2();
439 455 else
440 /*
441 * Default...we've hit an IP[2] interrupt, which means we've
442 * got to check the 1250 interrupt registers to figure out what
443 * to do. Need to detect which CPU we're on, now that
444 * smp_affinity is supported.
445 */
446 mask = __raw_readq(IOADDR(A_IMR_REGISTER(smp_processor_id(),
447 R_IMR_INTERRUPT_STATUS_BASE)));
448 if (mask)
449 do_IRQ(fls64(mask) - 1);
450 else
451 spurious_interrupt();
452 } else
453 spurious_interrupt(); 456 spurious_interrupt();
454} 457}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index aaa4f30dda79..3f52c95a4eb8 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -46,7 +46,7 @@ static void *mailbox_regs[] = {
46/* 46/*
47 * SMP init and finish on secondary CPUs 47 * SMP init and finish on secondary CPUs
48 */ 48 */
49void sb1250_smp_init(void) 49void __cpuinit sb1250_smp_init(void)
50{ 50{
51 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | 51 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
52 STATUSF_IP1 | STATUSF_IP0; 52 STATUSF_IP1 | STATUSF_IP0;
@@ -55,7 +55,7 @@ void sb1250_smp_init(void)
55 change_c0_status(ST0_IM, imask); 55 change_c0_status(ST0_IM, imask);
56} 56}
57 57
58void sb1250_smp_finish(void) 58void __cpuinit sb1250_smp_finish(void)
59{ 59{
60 extern void sb1250_clockevent_init(void); 60 extern void sb1250_clockevent_init(void);
61 61
diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c
index 9ef54628bc9c..a41e908bc218 100644
--- a/arch/mips/sibyte/sb1250/time.c
+++ b/arch/mips/sibyte/sb1250/time.c
@@ -52,26 +52,6 @@
52 52
53extern int sb1250_steal_irq(int irq); 53extern int sb1250_steal_irq(int irq);
54 54
55static cycle_t sb1250_hpt_read(void);
56
57void __init sb1250_hpt_setup(void)
58{
59 int cpu = smp_processor_id();
60
61 if (!cpu) {
62 /* Setup hpt using timer #3 but do not enable irq for it */
63 __raw_writeq(0, IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
64 __raw_writeq(SB1250_HPT_VALUE,
65 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_INIT)));
66 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
67 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
68
69 mips_hpt_frequency = V_SCD_TIMER_FREQ;
70 clocksource_mips.read = sb1250_hpt_read;
71 clocksource_mips.mask = M_SCD_TIMER_INIT;
72 }
73}
74
75/* 55/*
76 * The general purpose timer ticks at 1 Mhz independent if 56 * The general purpose timer ticks at 1 Mhz independent if
77 * the rest of the system 57 * the rest of the system
@@ -121,18 +101,14 @@ sibyte_next_event(unsigned long delta, struct clock_event_device *evt)
121 return 0; 101 return 0;
122} 102}
123 103
124struct clock_event_device sibyte_hpt_clockevent = {
125 .name = "sb1250-counter",
126 .features = CLOCK_EVT_FEAT_PERIODIC,
127 .set_mode = sibyte_set_mode,
128 .set_next_event = sibyte_next_event,
129 .shift = 32,
130 .irq = 0,
131};
132
133static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) 104static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
134{ 105{
135 struct clock_event_device *cd = &sibyte_hpt_clockevent; 106 unsigned int cpu = smp_processor_id();
107 struct clock_event_device *cd = dev_id;
108
109 /* ACK interrupt */
110 ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
111 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
136 112
137 cd->event_handler(cd); 113 cd->event_handler(cd);
138 114
@@ -145,15 +121,35 @@ static struct irqaction sibyte_irqaction = {
145 .name = "timer", 121 .name = "timer",
146}; 122};
147 123
124static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
125static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
126static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
127
148void __cpuinit sb1250_clockevent_init(void) 128void __cpuinit sb1250_clockevent_init(void)
149{ 129{
150 struct clock_event_device *cd = &sibyte_hpt_clockevent;
151 unsigned int cpu = smp_processor_id(); 130 unsigned int cpu = smp_processor_id();
152 int irq = K_INT_TIMER_0 + cpu; 131 unsigned int irq = K_INT_TIMER_0 + cpu;
132 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
133 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
134 unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
153 135
154 /* Only have 4 general purpose timers, and we use last one as hpt */ 136 /* Only have 4 general purpose timers, and we use last one as hpt */
155 BUG_ON(cpu > 2); 137 BUG_ON(cpu > 2);
156 138
139 sprintf(name, "bcm1480-counter %d", cpu);
140 cd->name = name;
141 cd->features = CLOCK_EVT_FEAT_PERIODIC |
142 CLOCK_EVT_MODE_ONESHOT;
143 clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
144 cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
145 cd->min_delta_ns = clockevent_delta2ns(1, cd);
146 cd->rating = 200;
147 cd->irq = irq;
148 cd->cpumask = cpumask_of_cpu(cpu);
149 cd->set_next_event = sibyte_next_event;
150 cd->set_mode = sibyte_set_mode;
151 clockevents_register_device(cd);
152
157 sb1250_mask_irq(cpu, irq); 153 sb1250_mask_irq(cpu, irq);
158 154
159 /* Map the timer interrupt to ip[4] of this cpu */ 155 /* Map the timer interrupt to ip[4] of this cpu */
@@ -165,17 +161,11 @@ void __cpuinit sb1250_clockevent_init(void)
165 sb1250_unmask_irq(cpu, irq); 161 sb1250_unmask_irq(cpu, irq);
166 sb1250_steal_irq(irq); 162 sb1250_steal_irq(irq);
167 163
168 /* 164 action->handler = sibyte_counter_handler;
169 * This interrupt is "special" in that it doesn't use the request_irq 165 action->flags = IRQF_DISABLED | IRQF_PERCPU;
170 * way to hook the irq line. The timer interrupt is initialized early 166 action->name = name;
171 * enough to make this a major pain, and it's also firing enough to 167 action->dev_id = cd;
172 * warrant a bit of special case code. sb1250_timer_interrupt is
173 * called directly from irq_handler.S when IP[4] is set during an
174 * interrupt
175 */
176 setup_irq(irq, &sibyte_irqaction); 168 setup_irq(irq, &sibyte_irqaction);
177
178 clockevents_register_device(cd);
179} 169}
180 170
181/* 171/*
@@ -195,8 +185,7 @@ struct clocksource bcm1250_clocksource = {
195 .name = "MIPS", 185 .name = "MIPS",
196 .rating = 200, 186 .rating = 200,
197 .read = sb1250_hpt_read, 187 .read = sb1250_hpt_read,
198 .mask = CLOCKSOURCE_MASK(32), 188 .mask = CLOCKSOURCE_MASK(23),
199 .shift = 32,
200 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 189 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
201}; 190};
202 191
@@ -204,6 +193,17 @@ void __init sb1250_clocksource_init(void)
204{ 193{
205 struct clocksource *cs = &bcm1250_clocksource; 194 struct clocksource *cs = &bcm1250_clocksource;
206 195
196 /* Setup hpt using timer #3 but do not enable irq for it */
197 __raw_writeq(0,
198 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
199 R_SCD_TIMER_CFG)));
200 __raw_writeq(SB1250_HPT_VALUE,
201 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
202 R_SCD_TIMER_INIT)));
203 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
204 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
205 R_SCD_TIMER_CFG)));
206
207 clocksource_set_clock(cs, V_SCD_TIMER_FREQ); 207 clocksource_set_clock(cs, V_SCD_TIMER_FREQ);
208 clocksource_register(cs); 208 clocksource_register(cs);
209} 209}
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 41f8e321e34c..9448d4e91142 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/scatterlist.h>
28 29
29#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
30#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 31#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index c939fe86a9e0..6a79fe43e229 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -216,7 +216,6 @@ config PPC_EARLY_DEBUG_BEAT
216config PPC_EARLY_DEBUG_44x 216config PPC_EARLY_DEBUG_44x
217 bool "Early serial debugging for IBM/AMCC 44x CPUs" 217 bool "Early serial debugging for IBM/AMCC 44x CPUs"
218 depends on 44x 218 depends on 44x
219 select PPC_UDBG_16550
220 help 219 help
221 Select this to enable early debugging for IBM 44x chips via the 220 Select this to enable early debugging for IBM 44x chips via the
222 inbuilt serial port. 221 inbuilt serial port.
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
index a88ae3d218a5..cb2fb50a281c 100644
--- a/arch/powerpc/boot/dts/bamboo.dts
+++ b/arch/powerpc/boot/dts/bamboo.dts
@@ -98,11 +98,13 @@
98 interrupt-parent = <&MAL0>; 98 interrupt-parent = <&MAL0>;
99 interrupts = <0 1 2 3 4>; 99 interrupts = <0 1 2 3 4>;
100 #interrupt-cells = <1>; 100 #interrupt-cells = <1>;
101 #address-cells = <0>;
102 #size-cells = <0>;
101 interrupt-map = </*TXEOB*/ 0 &UIC0 a 4 103 interrupt-map = </*TXEOB*/ 0 &UIC0 a 4
102 /*RXEOB*/ 1 &UIC0 b 4 104 /*RXEOB*/ 1 &UIC0 b 4
103 /*SERR*/ 2 &UIC1 0 4 105 /*SERR*/ 2 &UIC1 0 4
104 /*TXDE*/ 3 &UIC1 1 4 106 /*TXDE*/ 3 &UIC1 1 4
105 /*RXDE*/ 4 &UIC1 3 4>; 107 /*RXDE*/ 4 &UIC1 2 4>;
106 }; 108 };
107 109
108 POB0: opb { 110 POB0: opb {
@@ -196,6 +198,7 @@
196 }; 198 };
197 199
198 EMAC0: ethernet@ef600e00 { 200 EMAC0: ethernet@ef600e00 {
201 linux,network-index = <0>;
199 device_type = "network"; 202 device_type = "network";
200 compatible = "ibm,emac-440ep", "ibm,emac-440gp", "ibm,emac"; 203 compatible = "ibm,emac-440ep", "ibm,emac-440gp", "ibm,emac";
201 interrupt-parent = <&UIC1>; 204 interrupt-parent = <&UIC1>;
@@ -210,12 +213,13 @@
210 rx-fifo-size = <1000>; 213 rx-fifo-size = <1000>;
211 tx-fifo-size = <800>; 214 tx-fifo-size = <800>;
212 phy-mode = "rmii"; 215 phy-mode = "rmii";
213 phy-map = <00000001>; 216 phy-map = <00000000>;
214 zmii-device = <&ZMII0>; 217 zmii-device = <&ZMII0>;
215 zmii-channel = <0>; 218 zmii-channel = <0>;
216 }; 219 };
217 220
218 EMAC1: ethernet@ef600f00 { 221 EMAC1: ethernet@ef600f00 {
222 linux,network-index = <1>;
219 device_type = "network"; 223 device_type = "network";
220 compatible = "ibm,emac-440ep", "ibm,emac-440gp", "ibm,emac"; 224 compatible = "ibm,emac-440ep", "ibm,emac-440gp", "ibm,emac";
221 interrupt-parent = <&UIC1>; 225 interrupt-parent = <&UIC1>;
@@ -230,7 +234,7 @@
230 rx-fifo-size = <1000>; 234 rx-fifo-size = <1000>;
231 tx-fifo-size = <800>; 235 tx-fifo-size = <800>;
232 phy-mode = "rmii"; 236 phy-mode = "rmii";
233 phy-map = <00000001>; 237 phy-map = <00000000>;
234 zmii-device = <&ZMII0>; 238 zmii-device = <&ZMII0>;
235 zmii-channel = <1>; 239 zmii-channel = <1>;
236 }; 240 };
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index bc45f5fbb060..6731763f0282 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -70,18 +70,16 @@
70 }; 70 };
71 71
72 gpt@600 { // General Purpose Timer 72 gpt@600 { // General Purpose Timer
73 compatible = "mpc5200-gpt"; 73 compatible = "fsl,mpc5200-gpt";
74 device_type = "gpt";
75 cell-index = <0>; 74 cell-index = <0>;
76 reg = <600 10>; 75 reg = <600 10>;
77 interrupts = <1 9 0>; 76 interrupts = <1 9 0>;
78 interrupt-parent = <&mpc5200_pic>; 77 interrupt-parent = <&mpc5200_pic>;
79 has-wdt; 78 fsl,has-wdt;
80 }; 79 };
81 80
82 gpt@610 { // General Purpose Timer 81 gpt@610 { // General Purpose Timer
83 compatible = "mpc5200-gpt"; 82 compatible = "fsl,mpc5200-gpt";
84 device_type = "gpt";
85 cell-index = <1>; 83 cell-index = <1>;
86 reg = <610 10>; 84 reg = <610 10>;
87 interrupts = <1 a 0>; 85 interrupts = <1 a 0>;
@@ -89,8 +87,7 @@
89 }; 87 };
90 88
91 gpt@620 { // General Purpose Timer 89 gpt@620 { // General Purpose Timer
92 compatible = "mpc5200-gpt"; 90 compatible = "fsl,mpc5200-gpt";
93 device_type = "gpt";
94 cell-index = <2>; 91 cell-index = <2>;
95 reg = <620 10>; 92 reg = <620 10>;
96 interrupts = <1 b 0>; 93 interrupts = <1 b 0>;
@@ -98,8 +95,7 @@
98 }; 95 };
99 96
100 gpt@630 { // General Purpose Timer 97 gpt@630 { // General Purpose Timer
101 compatible = "mpc5200-gpt"; 98 compatible = "fsl,mpc5200-gpt";
102 device_type = "gpt";
103 cell-index = <3>; 99 cell-index = <3>;
104 reg = <630 10>; 100 reg = <630 10>;
105 interrupts = <1 c 0>; 101 interrupts = <1 c 0>;
@@ -107,8 +103,7 @@
107 }; 103 };
108 104
109 gpt@640 { // General Purpose Timer 105 gpt@640 { // General Purpose Timer
110 compatible = "mpc5200-gpt"; 106 compatible = "fsl,mpc5200-gpt";
111 device_type = "gpt";
112 cell-index = <4>; 107 cell-index = <4>;
113 reg = <640 10>; 108 reg = <640 10>;
114 interrupts = <1 d 0>; 109 interrupts = <1 d 0>;
@@ -116,8 +111,7 @@
116 }; 111 };
117 112
118 gpt@650 { // General Purpose Timer 113 gpt@650 { // General Purpose Timer
119 compatible = "mpc5200-gpt"; 114 compatible = "fsl,mpc5200-gpt";
120 device_type = "gpt";
121 cell-index = <5>; 115 cell-index = <5>;
122 reg = <650 10>; 116 reg = <650 10>;
123 interrupts = <1 e 0>; 117 interrupts = <1 e 0>;
@@ -125,8 +119,7 @@
125 }; 119 };
126 120
127 gpt@660 { // General Purpose Timer 121 gpt@660 { // General Purpose Timer
128 compatible = "mpc5200-gpt"; 122 compatible = "fsl,mpc5200-gpt";
129 device_type = "gpt";
130 cell-index = <6>; 123 cell-index = <6>;
131 reg = <660 10>; 124 reg = <660 10>;
132 interrupts = <1 f 0>; 125 interrupts = <1 f 0>;
@@ -134,8 +127,7 @@
134 }; 127 };
135 128
136 gpt@670 { // General Purpose Timer 129 gpt@670 { // General Purpose Timer
137 compatible = "mpc5200-gpt"; 130 compatible = "fsl,mpc5200-gpt";
138 device_type = "gpt";
139 cell-index = <7>; 131 cell-index = <7>;
140 reg = <670 10>; 132 reg = <670 10>;
141 interrupts = <1 10 0>; 133 interrupts = <1 10 0>;
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 6582c9a39b27..b540388c608c 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -70,18 +70,16 @@
70 }; 70 };
71 71
72 gpt@600 { // General Purpose Timer 72 gpt@600 { // General Purpose Timer
73 compatible = "mpc5200b-gpt","mpc5200-gpt"; 73 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
74 device_type = "gpt";
75 cell-index = <0>; 74 cell-index = <0>;
76 reg = <600 10>; 75 reg = <600 10>;
77 interrupts = <1 9 0>; 76 interrupts = <1 9 0>;
78 interrupt-parent = <&mpc5200_pic>; 77 interrupt-parent = <&mpc5200_pic>;
79 has-wdt; 78 fsl,has-wdt;
80 }; 79 };
81 80
82 gpt@610 { // General Purpose Timer 81 gpt@610 { // General Purpose Timer
83 compatible = "mpc5200b-gpt","mpc5200-gpt"; 82 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
84 device_type = "gpt";
85 cell-index = <1>; 83 cell-index = <1>;
86 reg = <610 10>; 84 reg = <610 10>;
87 interrupts = <1 a 0>; 85 interrupts = <1 a 0>;
@@ -89,8 +87,7 @@
89 }; 87 };
90 88
91 gpt@620 { // General Purpose Timer 89 gpt@620 { // General Purpose Timer
92 compatible = "mpc5200b-gpt","mpc5200-gpt"; 90 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
93 device_type = "gpt";
94 cell-index = <2>; 91 cell-index = <2>;
95 reg = <620 10>; 92 reg = <620 10>;
96 interrupts = <1 b 0>; 93 interrupts = <1 b 0>;
@@ -98,8 +95,7 @@
98 }; 95 };
99 96
100 gpt@630 { // General Purpose Timer 97 gpt@630 { // General Purpose Timer
101 compatible = "mpc5200b-gpt","mpc5200-gpt"; 98 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
102 device_type = "gpt";
103 cell-index = <3>; 99 cell-index = <3>;
104 reg = <630 10>; 100 reg = <630 10>;
105 interrupts = <1 c 0>; 101 interrupts = <1 c 0>;
@@ -107,8 +103,7 @@
107 }; 103 };
108 104
109 gpt@640 { // General Purpose Timer 105 gpt@640 { // General Purpose Timer
110 compatible = "mpc5200b-gpt","mpc5200-gpt"; 106 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
111 device_type = "gpt";
112 cell-index = <4>; 107 cell-index = <4>;
113 reg = <640 10>; 108 reg = <640 10>;
114 interrupts = <1 d 0>; 109 interrupts = <1 d 0>;
@@ -116,8 +111,7 @@
116 }; 111 };
117 112
118 gpt@650 { // General Purpose Timer 113 gpt@650 { // General Purpose Timer
119 compatible = "mpc5200b-gpt","mpc5200-gpt"; 114 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
120 device_type = "gpt";
121 cell-index = <5>; 115 cell-index = <5>;
122 reg = <650 10>; 116 reg = <650 10>;
123 interrupts = <1 e 0>; 117 interrupts = <1 e 0>;
@@ -125,8 +119,7 @@
125 }; 119 };
126 120
127 gpt@660 { // General Purpose Timer 121 gpt@660 { // General Purpose Timer
128 compatible = "mpc5200b-gpt","mpc5200-gpt"; 122 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
129 device_type = "gpt";
130 cell-index = <6>; 123 cell-index = <6>;
131 reg = <660 10>; 124 reg = <660 10>;
132 interrupts = <1 f 0>; 125 interrupts = <1 f 0>;
@@ -134,8 +127,7 @@
134 }; 127 };
135 128
136 gpt@670 { // General Purpose Timer 129 gpt@670 { // General Purpose Timer
137 compatible = "mpc5200b-gpt","mpc5200-gpt"; 130 compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
138 device_type = "gpt";
139 cell-index = <7>; 131 cell-index = <7>;
140 reg = <670 10>; 132 reg = <670 10>;
141 interrupts = <1 10 0>; 133 interrupts = <1 10 0>;
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index 36be75b04de1..8833dfe2e8b4 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -241,6 +241,12 @@
241 reg = <ef600d00 c>; 241 reg = <ef600d00 c>;
242 }; 242 };
243 243
244 RGMII0: emac-rgmii@ef601000 {
245 device_type = "rgmii-interface";
246 compatible = "ibm,rgmii-440epx", "ibm,rgmii";
247 reg = <ef601000 8>;
248 };
249
244 EMAC0: ethernet@ef600e00 { 250 EMAC0: ethernet@ef600e00 {
245 linux,network-index = <0>; 251 linux,network-index = <0>;
246 device_type = "network"; 252 device_type = "network";
@@ -261,10 +267,12 @@
261 max-frame-size = <5dc>; 267 max-frame-size = <5dc>;
262 rx-fifo-size = <1000>; 268 rx-fifo-size = <1000>;
263 tx-fifo-size = <800>; 269 tx-fifo-size = <800>;
264 phy-mode = "rmii"; 270 phy-mode = "rgmii";
265 phy-map = <00000000>; 271 phy-map = <00000000>;
266 zmii-device = <&ZMII0>; 272 zmii-device = <&ZMII0>;
267 zmii-channel = <0>; 273 zmii-channel = <0>;
274 rgmii-device = <&RGMII0>;
275 rgmii-channel = <0>;
268 }; 276 };
269 277
270 EMAC1: ethernet@ef600f00 { 278 EMAC1: ethernet@ef600f00 {
@@ -287,10 +295,12 @@
287 max-frame-size = <5dc>; 295 max-frame-size = <5dc>;
288 rx-fifo-size = <1000>; 296 rx-fifo-size = <1000>;
289 tx-fifo-size = <800>; 297 tx-fifo-size = <800>;
290 phy-mode = "rmii"; 298 phy-mode = "rgmii";
291 phy-map = <00000000>; 299 phy-map = <00000000>;
292 zmii-device = <&ZMII0>; 300 zmii-device = <&ZMII0>;
293 zmii-channel = <1>; 301 zmii-channel = <1>;
302 rgmii-device = <&RGMII0>;
303 rgmii-channel = <1>;
294 }; 304 };
295 }; 305 };
296 }; 306 };
diff --git a/arch/powerpc/boot/dts/walnut.dts b/arch/powerpc/boot/dts/walnut.dts
index ec54f4e04ad6..fa681f5343fe 100644
--- a/arch/powerpc/boot/dts/walnut.dts
+++ b/arch/powerpc/boot/dts/walnut.dts
@@ -64,10 +64,15 @@
64 MAL: mcmal { 64 MAL: mcmal {
65 compatible = "ibm,mcmal-405gp", "ibm,mcmal"; 65 compatible = "ibm,mcmal-405gp", "ibm,mcmal";
66 dcr-reg = <180 62>; 66 dcr-reg = <180 62>;
67 num-tx-chans = <2>; 67 num-tx-chans = <1>;
68 num-rx-chans = <1>; 68 num-rx-chans = <1>;
69 interrupt-parent = <&UIC0>; 69 interrupt-parent = <&UIC0>;
70 interrupts = <a 4 b 4 c 4 d 4 e 4>; 70 interrupts = <
71 b 4 /* TXEOB */
72 c 4 /* RXEOB */
73 a 4 /* SERR */
74 d 4 /* TXDE */
75 e 4 /* RXDE */>;
71 }; 76 };
72 77
73 POB0: opb { 78 POB0: opb {
@@ -118,9 +123,10 @@
118 compatible = "ibm,emac-405gp", "ibm,emac"; 123 compatible = "ibm,emac-405gp", "ibm,emac";
119 interrupt-parent = <&UIC0>; 124 interrupt-parent = <&UIC0>;
120 interrupts = <9 4 f 4>; 125 interrupts = <9 4 f 4>;
126 local-mac-address = [000000000000]; /* Filled in by zImage */
121 reg = <ef600800 70>; 127 reg = <ef600800 70>;
122 mal-device = <&MAL>; 128 mal-device = <&MAL>;
123 mal-tx-channel = <0 1>; 129 mal-tx-channel = <0>;
124 mal-rx-channel = <0>; 130 mal-rx-channel = <0>;
125 cell-index = <0>; 131 cell-index = <0>;
126 max-frame-size = <5dc>; 132 max-frame-size = <5dc>;
diff --git a/arch/powerpc/boot/treeboot-walnut.c b/arch/powerpc/boot/treeboot-walnut.c
index 3adf2d08a230..bb2c309d70fc 100644
--- a/arch/powerpc/boot/treeboot-walnut.c
+++ b/arch/powerpc/boot/treeboot-walnut.c
@@ -57,8 +57,8 @@ void ibm405gp_fixup_clocks(unsigned int sysclk, unsigned int ser_clk)
57 } 57 }
58 58
59 /* setup the timebase clock to tick at the cpu frequency */ 59 /* setup the timebase clock to tick at the cpu frequency */
60 cpc0_cr1 = cpc0_cr1 & ~ 0x00800000; 60 cpc0_cr1 = cpc0_cr1 & ~0x00800000;
61 mtdcr(DCRN_CPC0_CR1, cpc0_cr1); 61 mtdcr(DCRN_405_CPC0_CR1, cpc0_cr1);
62 tb = cpu; 62 tb = cpu;
63 63
64 dt_fixup_cpu_clocks(cpu, tb, 0); 64 dt_fixup_cpu_clocks(cpu, tb, 0);
@@ -109,6 +109,7 @@ static void walnut_flashsel_fixup(void)
109 setprop(sram, "reg", reg_sram, sizeof(reg_sram)); 109 setprop(sram, "reg", reg_sram, sizeof(reg_sram));
110} 110}
111 111
112#define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b
112static void walnut_fixups(void) 113static void walnut_fixups(void)
113{ 114{
114 ibm4xx_fixup_memsize(); 115 ibm4xx_fixup_memsize();
@@ -116,6 +117,7 @@ static void walnut_fixups(void)
116 ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); 117 ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
117 ibm4xx_fixup_ebc_ranges("/plb/ebc"); 118 ibm4xx_fixup_ebc_ranges("/plb/ebc");
118 walnut_flashsel_fixup(); 119 walnut_flashsel_fixup();
120 dt_fixup_mac_addresses((u8 *) WALNUT_OPENBIOS_MAC_OFF);
119} 121}
120 122
121void platform_init(void) 123void platform_init(void)
diff --git a/arch/powerpc/configs/bamboo_defconfig b/arch/powerpc/configs/bamboo_defconfig
index d22fed6d2cd9..844808ebf245 100644
--- a/arch/powerpc/configs/bamboo_defconfig
+++ b/arch/powerpc/configs/bamboo_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.23-rc1 3# Linux kernel version: 2.6.23
4# Fri Aug 3 10:46:53 2007 4# Fri Oct 19 09:01:11 2007
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -22,8 +22,13 @@ CONFIG_PHYS_64BIT=y
22# CONFIG_PPC_MM_SLICES is not set 22# CONFIG_PPC_MM_SLICES is not set
23CONFIG_NOT_COHERENT_CACHE=y 23CONFIG_NOT_COHERENT_CACHE=y
24CONFIG_PPC32=y 24CONFIG_PPC32=y
25CONFIG_WORD_SIZE=32
25CONFIG_PPC_MERGE=y 26CONFIG_PPC_MERGE=y
26CONFIG_MMU=y 27CONFIG_MMU=y
28CONFIG_GENERIC_CMOS_UPDATE=y
29CONFIG_GENERIC_TIME=y
30CONFIG_GENERIC_TIME_VSYSCALL=y
31CONFIG_GENERIC_CLOCKEVENTS=y
27CONFIG_GENERIC_HARDIRQS=y 32CONFIG_GENERIC_HARDIRQS=y
28CONFIG_IRQ_PER_CPU=y 33CONFIG_IRQ_PER_CPU=y
29CONFIG_RWSEM_XCHGADD_ALGORITHM=y 34CONFIG_RWSEM_XCHGADD_ALGORITHM=y
@@ -67,6 +72,8 @@ CONFIG_POSIX_MQUEUE=y
67# CONFIG_AUDIT is not set 72# CONFIG_AUDIT is not set
68# CONFIG_IKCONFIG is not set 73# CONFIG_IKCONFIG is not set
69CONFIG_LOG_BUF_SHIFT=14 74CONFIG_LOG_BUF_SHIFT=14
75CONFIG_FAIR_GROUP_SCHED=y
76CONFIG_FAIR_USER_SCHED=y
70CONFIG_SYSFS_DEPRECATED=y 77CONFIG_SYSFS_DEPRECATED=y
71# CONFIG_RELAY is not set 78# CONFIG_RELAY is not set
72CONFIG_BLK_DEV_INITRD=y 79CONFIG_BLK_DEV_INITRD=y
@@ -87,7 +94,6 @@ CONFIG_FUTEX=y
87CONFIG_ANON_INODES=y 94CONFIG_ANON_INODES=y
88CONFIG_EPOLL=y 95CONFIG_EPOLL=y
89CONFIG_SIGNALFD=y 96CONFIG_SIGNALFD=y
90CONFIG_TIMERFD=y
91CONFIG_EVENTFD=y 97CONFIG_EVENTFD=y
92CONFIG_SHMEM=y 98CONFIG_SHMEM=y
93CONFIG_VM_EVENT_COUNTERS=y 99CONFIG_VM_EVENT_COUNTERS=y
@@ -133,6 +139,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
133# CONFIG_PQ2ADS is not set 139# CONFIG_PQ2ADS is not set
134CONFIG_BAMBOO=y 140CONFIG_BAMBOO=y
135# CONFIG_EBONY is not set 141# CONFIG_EBONY is not set
142# CONFIG_SEQUOIA is not set
136CONFIG_440EP=y 143CONFIG_440EP=y
137CONFIG_IBM440EP_ERR42=y 144CONFIG_IBM440EP_ERR42=y
138# CONFIG_MPIC is not set 145# CONFIG_MPIC is not set
@@ -146,11 +153,16 @@ CONFIG_IBM440EP_ERR42=y
146# CONFIG_GENERIC_IOMAP is not set 153# CONFIG_GENERIC_IOMAP is not set
147# CONFIG_CPU_FREQ is not set 154# CONFIG_CPU_FREQ is not set
148# CONFIG_CPM2 is not set 155# CONFIG_CPM2 is not set
156# CONFIG_FSL_ULI1575 is not set
149 157
150# 158#
151# Kernel options 159# Kernel options
152# 160#
153# CONFIG_HIGHMEM is not set 161# CONFIG_HIGHMEM is not set
162# CONFIG_TICK_ONESHOT is not set
163# CONFIG_NO_HZ is not set
164# CONFIG_HIGH_RES_TIMERS is not set
165CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
154# CONFIG_HZ_100 is not set 166# CONFIG_HZ_100 is not set
155CONFIG_HZ_250=y 167CONFIG_HZ_250=y
156# CONFIG_HZ_300 is not set 168# CONFIG_HZ_300 is not set
@@ -172,6 +184,7 @@ CONFIG_FLATMEM_MANUAL=y
172CONFIG_FLATMEM=y 184CONFIG_FLATMEM=y
173CONFIG_FLAT_NODE_MEM_MAP=y 185CONFIG_FLAT_NODE_MEM_MAP=y
174# CONFIG_SPARSEMEM_STATIC is not set 186# CONFIG_SPARSEMEM_STATIC is not set
187# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
175CONFIG_SPLIT_PTLOCK_CPUS=4 188CONFIG_SPLIT_PTLOCK_CPUS=4
176CONFIG_RESOURCES_64BIT=y 189CONFIG_RESOURCES_64BIT=y
177CONFIG_ZONE_DMA_FLAG=1 190CONFIG_ZONE_DMA_FLAG=1
@@ -197,10 +210,6 @@ CONFIG_PCI_SYSCALL=y
197CONFIG_ARCH_SUPPORTS_MSI=y 210CONFIG_ARCH_SUPPORTS_MSI=y
198# CONFIG_PCI_MSI is not set 211# CONFIG_PCI_MSI is not set
199# CONFIG_PCI_DEBUG is not set 212# CONFIG_PCI_DEBUG is not set
200
201#
202# PCCARD (PCMCIA/CardBus) support
203#
204# CONFIG_PCCARD is not set 213# CONFIG_PCCARD is not set
205# CONFIG_HOTPLUG_PCI is not set 214# CONFIG_HOTPLUG_PCI is not set
206 215
@@ -215,7 +224,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y
215CONFIG_HIGHMEM_START=0xfe000000 224CONFIG_HIGHMEM_START=0xfe000000
216CONFIG_LOWMEM_SIZE=0x30000000 225CONFIG_LOWMEM_SIZE=0x30000000
217CONFIG_KERNEL_START=0xc0000000 226CONFIG_KERNEL_START=0xc0000000
218CONFIG_TASK_SIZE=0x80000000 227CONFIG_TASK_SIZE=0xc0000000
219CONFIG_CONSISTENT_START=0xff100000 228CONFIG_CONSISTENT_START=0xff100000
220CONFIG_CONSISTENT_SIZE=0x00200000 229CONFIG_CONSISTENT_SIZE=0x00200000
221CONFIG_BOOT_LOAD=0x01000000 230CONFIG_BOOT_LOAD=0x01000000
@@ -252,6 +261,7 @@ CONFIG_IP_PNP_BOOTP=y
252# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 261# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
253# CONFIG_INET_XFRM_MODE_TUNNEL is not set 262# CONFIG_INET_XFRM_MODE_TUNNEL is not set
254# CONFIG_INET_XFRM_MODE_BEET is not set 263# CONFIG_INET_XFRM_MODE_BEET is not set
264# CONFIG_INET_LRO is not set
255CONFIG_INET_DIAG=y 265CONFIG_INET_DIAG=y
256CONFIG_INET_TCP_DIAG=y 266CONFIG_INET_TCP_DIAG=y
257# CONFIG_TCP_CONG_ADVANCED is not set 267# CONFIG_TCP_CONG_ADVANCED is not set
@@ -309,6 +319,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
309# 319#
310# Generic Driver Options 320# Generic Driver Options
311# 321#
322CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
312CONFIG_STANDALONE=y 323CONFIG_STANDALONE=y
313CONFIG_PREVENT_FIRMWARE_BUILD=y 324CONFIG_PREVENT_FIRMWARE_BUILD=y
314CONFIG_FW_LOADER=y 325CONFIG_FW_LOADER=y
@@ -353,10 +364,6 @@ CONFIG_MISC_DEVICES=y
353# CONFIG_SCSI_NETLINK is not set 364# CONFIG_SCSI_NETLINK is not set
354# CONFIG_ATA is not set 365# CONFIG_ATA is not set
355# CONFIG_MD is not set 366# CONFIG_MD is not set
356
357#
358# Fusion MPT device support
359#
360# CONFIG_FUSION is not set 367# CONFIG_FUSION is not set
361 368
362# 369#
@@ -375,12 +382,36 @@ CONFIG_NETDEVICES=y
375# CONFIG_MACVLAN is not set 382# CONFIG_MACVLAN is not set
376# CONFIG_EQUALIZER is not set 383# CONFIG_EQUALIZER is not set
377# CONFIG_TUN is not set 384# CONFIG_TUN is not set
385# CONFIG_VETH is not set
386# CONFIG_IP1000 is not set
378# CONFIG_ARCNET is not set 387# CONFIG_ARCNET is not set
379# CONFIG_NET_ETHERNET is not set 388# CONFIG_PHYLIB is not set
389CONFIG_NET_ETHERNET=y
390# CONFIG_MII is not set
391# CONFIG_HAPPYMEAL is not set
392# CONFIG_SUNGEM is not set
393# CONFIG_CASSINI is not set
394# CONFIG_NET_VENDOR_3COM is not set
395# CONFIG_NET_TULIP is not set
396# CONFIG_HP100 is not set
397CONFIG_IBM_NEW_EMAC=y
398CONFIG_IBM_NEW_EMAC_RXB=128
399CONFIG_IBM_NEW_EMAC_TXB=64
400CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
401CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
402CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
403# CONFIG_IBM_NEW_EMAC_DEBUG is not set
404CONFIG_IBM_NEW_EMAC_ZMII=y
405# CONFIG_IBM_NEW_EMAC_RGMII is not set
406# CONFIG_IBM_NEW_EMAC_TAH is not set
407# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
408# CONFIG_NET_PCI is not set
409# CONFIG_B44 is not set
380CONFIG_NETDEV_1000=y 410CONFIG_NETDEV_1000=y
381# CONFIG_ACENIC is not set 411# CONFIG_ACENIC is not set
382# CONFIG_DL2K is not set 412# CONFIG_DL2K is not set
383# CONFIG_E1000 is not set 413# CONFIG_E1000 is not set
414# CONFIG_E1000E is not set
384# CONFIG_NS83820 is not set 415# CONFIG_NS83820 is not set
385# CONFIG_HAMACHI is not set 416# CONFIG_HAMACHI is not set
386# CONFIG_YELLOWFIN is not set 417# CONFIG_YELLOWFIN is not set
@@ -388,6 +419,7 @@ CONFIG_NETDEV_1000=y
388# CONFIG_SIS190 is not set 419# CONFIG_SIS190 is not set
389# CONFIG_SKGE is not set 420# CONFIG_SKGE is not set
390# CONFIG_SKY2 is not set 421# CONFIG_SKY2 is not set
422# CONFIG_SK98LIN is not set
391# CONFIG_VIA_VELOCITY is not set 423# CONFIG_VIA_VELOCITY is not set
392# CONFIG_TIGON3 is not set 424# CONFIG_TIGON3 is not set
393# CONFIG_BNX2 is not set 425# CONFIG_BNX2 is not set
@@ -396,11 +428,14 @@ CONFIG_NETDEV_1000=y
396CONFIG_NETDEV_10000=y 428CONFIG_NETDEV_10000=y
397# CONFIG_CHELSIO_T1 is not set 429# CONFIG_CHELSIO_T1 is not set
398# CONFIG_CHELSIO_T3 is not set 430# CONFIG_CHELSIO_T3 is not set
431# CONFIG_IXGBE is not set
399# CONFIG_IXGB is not set 432# CONFIG_IXGB is not set
400# CONFIG_S2IO is not set 433# CONFIG_S2IO is not set
401# CONFIG_MYRI10GE is not set 434# CONFIG_MYRI10GE is not set
402# CONFIG_NETXEN_NIC is not set 435# CONFIG_NETXEN_NIC is not set
436# CONFIG_NIU is not set
403# CONFIG_MLX4_CORE is not set 437# CONFIG_MLX4_CORE is not set
438# CONFIG_TEHUTI is not set
404# CONFIG_TR is not set 439# CONFIG_TR is not set
405 440
406# 441#
@@ -463,14 +498,11 @@ CONFIG_UNIX98_PTYS=y
463CONFIG_LEGACY_PTYS=y 498CONFIG_LEGACY_PTYS=y
464CONFIG_LEGACY_PTY_COUNT=256 499CONFIG_LEGACY_PTY_COUNT=256
465# CONFIG_IPMI_HANDLER is not set 500# CONFIG_IPMI_HANDLER is not set
466# CONFIG_WATCHDOG is not set
467# CONFIG_HW_RANDOM is not set 501# CONFIG_HW_RANDOM is not set
468# CONFIG_NVRAM is not set 502# CONFIG_NVRAM is not set
469# CONFIG_GEN_RTC is not set 503# CONFIG_GEN_RTC is not set
470# CONFIG_R3964 is not set 504# CONFIG_R3964 is not set
471# CONFIG_APPLICOM is not set 505# CONFIG_APPLICOM is not set
472# CONFIG_AGP is not set
473# CONFIG_DRM is not set
474# CONFIG_RAW_DRIVER is not set 506# CONFIG_RAW_DRIVER is not set
475# CONFIG_TCG_TPM is not set 507# CONFIG_TCG_TPM is not set
476CONFIG_DEVPORT=y 508CONFIG_DEVPORT=y
@@ -484,6 +516,13 @@ CONFIG_DEVPORT=y
484# CONFIG_W1 is not set 516# CONFIG_W1 is not set
485# CONFIG_POWER_SUPPLY is not set 517# CONFIG_POWER_SUPPLY is not set
486# CONFIG_HWMON is not set 518# CONFIG_HWMON is not set
519# CONFIG_WATCHDOG is not set
520
521#
522# Sonics Silicon Backplane
523#
524CONFIG_SSB_POSSIBLE=y
525# CONFIG_SSB is not set
487 526
488# 527#
489# Multifunction device drivers 528# Multifunction device drivers
@@ -500,16 +539,17 @@ CONFIG_DAB=y
500# 539#
501# Graphics support 540# Graphics support
502# 541#
542# CONFIG_AGP is not set
543# CONFIG_DRM is not set
544# CONFIG_VGASTATE is not set
545CONFIG_VIDEO_OUTPUT_CONTROL=m
546# CONFIG_FB is not set
503# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 547# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
504 548
505# 549#
506# Display device support 550# Display device support
507# 551#
508# CONFIG_DISPLAY_SUPPORT is not set 552# CONFIG_DISPLAY_SUPPORT is not set
509# CONFIG_VGASTATE is not set
510CONFIG_VIDEO_OUTPUT_CONTROL=m
511# CONFIG_FB is not set
512# CONFIG_FB_IBM_GXT4500 is not set
513 553
514# 554#
515# Sound 555# Sound
@@ -536,19 +576,6 @@ CONFIG_USB_ARCH_HAS_EHCI=y
536# CONFIG_RTC_CLASS is not set 576# CONFIG_RTC_CLASS is not set
537 577
538# 578#
539# DMA Engine support
540#
541# CONFIG_DMA_ENGINE is not set
542
543#
544# DMA Clients
545#
546
547#
548# DMA Devices
549#
550
551#
552# Userspace I/O 579# Userspace I/O
553# 580#
554# CONFIG_UIO is not set 581# CONFIG_UIO is not set
@@ -600,7 +627,6 @@ CONFIG_SYSFS=y
600CONFIG_TMPFS=y 627CONFIG_TMPFS=y
601# CONFIG_TMPFS_POSIX_ACL is not set 628# CONFIG_TMPFS_POSIX_ACL is not set
602# CONFIG_HUGETLB_PAGE is not set 629# CONFIG_HUGETLB_PAGE is not set
603CONFIG_RAMFS=y
604# CONFIG_CONFIGFS_FS is not set 630# CONFIG_CONFIGFS_FS is not set
605 631
606# 632#
@@ -619,10 +645,7 @@ CONFIG_CRAMFS=y
619# CONFIG_QNX4FS_FS is not set 645# CONFIG_QNX4FS_FS is not set
620# CONFIG_SYSV_FS is not set 646# CONFIG_SYSV_FS is not set
621# CONFIG_UFS_FS is not set 647# CONFIG_UFS_FS is not set
622 648CONFIG_NETWORK_FILESYSTEMS=y
623#
624# Network File Systems
625#
626CONFIG_NFS_FS=y 649CONFIG_NFS_FS=y
627CONFIG_NFS_V3=y 650CONFIG_NFS_V3=y
628# CONFIG_NFS_V3_ACL is not set 651# CONFIG_NFS_V3_ACL is not set
@@ -648,15 +671,7 @@ CONFIG_SUNRPC=y
648# 671#
649# CONFIG_PARTITION_ADVANCED is not set 672# CONFIG_PARTITION_ADVANCED is not set
650CONFIG_MSDOS_PARTITION=y 673CONFIG_MSDOS_PARTITION=y
651
652#
653# Native Language Support
654#
655# CONFIG_NLS is not set 674# CONFIG_NLS is not set
656
657#
658# Distributed Lock Manager
659#
660# CONFIG_DLM is not set 675# CONFIG_DLM is not set
661# CONFIG_UCC_SLOW is not set 676# CONFIG_UCC_SLOW is not set
662 677
@@ -709,6 +724,7 @@ CONFIG_SCHED_DEBUG=y
709# CONFIG_DEBUG_VM is not set 724# CONFIG_DEBUG_VM is not set
710# CONFIG_DEBUG_LIST is not set 725# CONFIG_DEBUG_LIST is not set
711CONFIG_FORCED_INLINING=y 726CONFIG_FORCED_INLINING=y
727# CONFIG_BOOT_PRINTK_DELAY is not set
712# CONFIG_RCU_TORTURE_TEST is not set 728# CONFIG_RCU_TORTURE_TEST is not set
713# CONFIG_FAULT_INJECTION is not set 729# CONFIG_FAULT_INJECTION is not set
714# CONFIG_DEBUG_STACKOVERFLOW is not set 730# CONFIG_DEBUG_STACKOVERFLOW is not set
@@ -728,6 +744,7 @@ CONFIG_PPC_EARLY_DEBUG=y
728# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set 744# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
729# CONFIG_PPC_EARLY_DEBUG_BEAT is not set 745# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
730CONFIG_PPC_EARLY_DEBUG_44x=y 746CONFIG_PPC_EARLY_DEBUG_44x=y
747# CONFIG_PPC_EARLY_DEBUG_CPM is not set
731CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0xef600300 748CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0xef600300
732CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x0 749CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x0
733 750
@@ -736,6 +753,7 @@ CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x0
736# 753#
737# CONFIG_KEYS is not set 754# CONFIG_KEYS is not set
738# CONFIG_SECURITY is not set 755# CONFIG_SECURITY is not set
756# CONFIG_SECURITY_FILE_CAPABILITIES is not set
739CONFIG_CRYPTO=y 757CONFIG_CRYPTO=y
740CONFIG_CRYPTO_ALGAPI=y 758CONFIG_CRYPTO_ALGAPI=y
741CONFIG_CRYPTO_BLKCIPHER=y 759CONFIG_CRYPTO_BLKCIPHER=y
@@ -755,6 +773,7 @@ CONFIG_CRYPTO_ECB=y
755CONFIG_CRYPTO_CBC=y 773CONFIG_CRYPTO_CBC=y
756CONFIG_CRYPTO_PCBC=y 774CONFIG_CRYPTO_PCBC=y
757# CONFIG_CRYPTO_LRW is not set 775# CONFIG_CRYPTO_LRW is not set
776# CONFIG_CRYPTO_XTS is not set
758# CONFIG_CRYPTO_CRYPTD is not set 777# CONFIG_CRYPTO_CRYPTD is not set
759CONFIG_CRYPTO_DES=y 778CONFIG_CRYPTO_DES=y
760# CONFIG_CRYPTO_FCRYPT is not set 779# CONFIG_CRYPTO_FCRYPT is not set
@@ -768,9 +787,12 @@ CONFIG_CRYPTO_DES=y
768# CONFIG_CRYPTO_ARC4 is not set 787# CONFIG_CRYPTO_ARC4 is not set
769# CONFIG_CRYPTO_KHAZAD is not set 788# CONFIG_CRYPTO_KHAZAD is not set
770# CONFIG_CRYPTO_ANUBIS is not set 789# CONFIG_CRYPTO_ANUBIS is not set
790# CONFIG_CRYPTO_SEED is not set
771# CONFIG_CRYPTO_DEFLATE is not set 791# CONFIG_CRYPTO_DEFLATE is not set
772# CONFIG_CRYPTO_MICHAEL_MIC is not set 792# CONFIG_CRYPTO_MICHAEL_MIC is not set
773# CONFIG_CRYPTO_CRC32C is not set 793# CONFIG_CRYPTO_CRC32C is not set
774# CONFIG_CRYPTO_CAMELLIA is not set 794# CONFIG_CRYPTO_CAMELLIA is not set
775# CONFIG_CRYPTO_TEST is not set 795# CONFIG_CRYPTO_TEST is not set
796# CONFIG_CRYPTO_AUTHENC is not set
776CONFIG_CRYPTO_HW=y 797CONFIG_CRYPTO_HW=y
798# CONFIG_PPC_CLOCK is not set
diff --git a/arch/powerpc/configs/ebony_defconfig b/arch/powerpc/configs/ebony_defconfig
index 35a95dda681e..d3ef642811ef 100644
--- a/arch/powerpc/configs/ebony_defconfig
+++ b/arch/powerpc/configs/ebony_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.23-rc4 3# Linux kernel version: 2.6.23
4# Thu Aug 30 16:34:11 2007 4# Thu Oct 18 08:01:57 2007
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -21,8 +21,13 @@ CONFIG_PHYS_64BIT=y
21# CONFIG_PPC_MM_SLICES is not set 21# CONFIG_PPC_MM_SLICES is not set
22CONFIG_NOT_COHERENT_CACHE=y 22CONFIG_NOT_COHERENT_CACHE=y
23CONFIG_PPC32=y 23CONFIG_PPC32=y
24CONFIG_WORD_SIZE=32
24CONFIG_PPC_MERGE=y 25CONFIG_PPC_MERGE=y
25CONFIG_MMU=y 26CONFIG_MMU=y
27CONFIG_GENERIC_CMOS_UPDATE=y
28CONFIG_GENERIC_TIME=y
29CONFIG_GENERIC_TIME_VSYSCALL=y
30CONFIG_GENERIC_CLOCKEVENTS=y
26CONFIG_GENERIC_HARDIRQS=y 31CONFIG_GENERIC_HARDIRQS=y
27CONFIG_IRQ_PER_CPU=y 32CONFIG_IRQ_PER_CPU=y
28CONFIG_RWSEM_XCHGADD_ALGORITHM=y 33CONFIG_RWSEM_XCHGADD_ALGORITHM=y
@@ -66,6 +71,8 @@ CONFIG_POSIX_MQUEUE=y
66# CONFIG_AUDIT is not set 71# CONFIG_AUDIT is not set
67# CONFIG_IKCONFIG is not set 72# CONFIG_IKCONFIG is not set
68CONFIG_LOG_BUF_SHIFT=14 73CONFIG_LOG_BUF_SHIFT=14
74CONFIG_FAIR_GROUP_SCHED=y
75CONFIG_FAIR_USER_SCHED=y
69CONFIG_SYSFS_DEPRECATED=y 76CONFIG_SYSFS_DEPRECATED=y
70# CONFIG_RELAY is not set 77# CONFIG_RELAY is not set
71CONFIG_BLK_DEV_INITRD=y 78CONFIG_BLK_DEV_INITRD=y
@@ -86,7 +93,6 @@ CONFIG_FUTEX=y
86CONFIG_ANON_INODES=y 93CONFIG_ANON_INODES=y
87CONFIG_EPOLL=y 94CONFIG_EPOLL=y
88CONFIG_SIGNALFD=y 95CONFIG_SIGNALFD=y
89CONFIG_TIMERFD=y
90CONFIG_EVENTFD=y 96CONFIG_EVENTFD=y
91CONFIG_SHMEM=y 97CONFIG_SHMEM=y
92CONFIG_VM_EVENT_COUNTERS=y 98CONFIG_VM_EVENT_COUNTERS=y
@@ -130,7 +136,9 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
130# CONFIG_PPC_CELL is not set 136# CONFIG_PPC_CELL is not set
131# CONFIG_PPC_CELL_NATIVE is not set 137# CONFIG_PPC_CELL_NATIVE is not set
132# CONFIG_PQ2ADS is not set 138# CONFIG_PQ2ADS is not set
139# CONFIG_BAMBOO is not set
133CONFIG_EBONY=y 140CONFIG_EBONY=y
141# CONFIG_SEQUOIA is not set
134CONFIG_440GP=y 142CONFIG_440GP=y
135# CONFIG_MPIC is not set 143# CONFIG_MPIC is not set
136# CONFIG_MPIC_WEIRD is not set 144# CONFIG_MPIC_WEIRD is not set
@@ -149,6 +157,10 @@ CONFIG_440GP=y
149# Kernel options 157# Kernel options
150# 158#
151# CONFIG_HIGHMEM is not set 159# CONFIG_HIGHMEM is not set
160# CONFIG_TICK_ONESHOT is not set
161# CONFIG_NO_HZ is not set
162# CONFIG_HIGH_RES_TIMERS is not set
163CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
152# CONFIG_HZ_100 is not set 164# CONFIG_HZ_100 is not set
153CONFIG_HZ_250=y 165CONFIG_HZ_250=y
154# CONFIG_HZ_300 is not set 166# CONFIG_HZ_300 is not set
@@ -170,6 +182,7 @@ CONFIG_FLATMEM_MANUAL=y
170CONFIG_FLATMEM=y 182CONFIG_FLATMEM=y
171CONFIG_FLAT_NODE_MEM_MAP=y 183CONFIG_FLAT_NODE_MEM_MAP=y
172# CONFIG_SPARSEMEM_STATIC is not set 184# CONFIG_SPARSEMEM_STATIC is not set
185# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
173CONFIG_SPLIT_PTLOCK_CPUS=4 186CONFIG_SPLIT_PTLOCK_CPUS=4
174CONFIG_RESOURCES_64BIT=y 187CONFIG_RESOURCES_64BIT=y
175CONFIG_ZONE_DMA_FLAG=1 188CONFIG_ZONE_DMA_FLAG=1
@@ -194,10 +207,6 @@ CONFIG_PCI_SYSCALL=y
194CONFIG_ARCH_SUPPORTS_MSI=y 207CONFIG_ARCH_SUPPORTS_MSI=y
195# CONFIG_PCI_MSI is not set 208# CONFIG_PCI_MSI is not set
196# CONFIG_PCI_DEBUG is not set 209# CONFIG_PCI_DEBUG is not set
197
198#
199# PCCARD (PCMCIA/CardBus) support
200#
201# CONFIG_PCCARD is not set 210# CONFIG_PCCARD is not set
202# CONFIG_HOTPLUG_PCI is not set 211# CONFIG_HOTPLUG_PCI is not set
203 212
@@ -212,7 +221,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y
212CONFIG_HIGHMEM_START=0xfe000000 221CONFIG_HIGHMEM_START=0xfe000000
213CONFIG_LOWMEM_SIZE=0x30000000 222CONFIG_LOWMEM_SIZE=0x30000000
214CONFIG_KERNEL_START=0xc0000000 223CONFIG_KERNEL_START=0xc0000000
215CONFIG_TASK_SIZE=0x80000000 224CONFIG_TASK_SIZE=0xc0000000
216CONFIG_CONSISTENT_START=0xff100000 225CONFIG_CONSISTENT_START=0xff100000
217CONFIG_CONSISTENT_SIZE=0x00200000 226CONFIG_CONSISTENT_SIZE=0x00200000
218CONFIG_BOOT_LOAD=0x01000000 227CONFIG_BOOT_LOAD=0x01000000
@@ -249,6 +258,7 @@ CONFIG_IP_PNP_BOOTP=y
249# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 258# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
250# CONFIG_INET_XFRM_MODE_TUNNEL is not set 259# CONFIG_INET_XFRM_MODE_TUNNEL is not set
251# CONFIG_INET_XFRM_MODE_BEET is not set 260# CONFIG_INET_XFRM_MODE_BEET is not set
261# CONFIG_INET_LRO is not set
252CONFIG_INET_DIAG=y 262CONFIG_INET_DIAG=y
253CONFIG_INET_TCP_DIAG=y 263CONFIG_INET_TCP_DIAG=y
254# CONFIG_TCP_CONG_ADVANCED is not set 264# CONFIG_TCP_CONG_ADVANCED is not set
@@ -306,6 +316,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
306# 316#
307# Generic Driver Options 317# Generic Driver Options
308# 318#
319CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
309CONFIG_STANDALONE=y 320CONFIG_STANDALONE=y
310CONFIG_PREVENT_FIRMWARE_BUILD=y 321CONFIG_PREVENT_FIRMWARE_BUILD=y
311CONFIG_FW_LOADER=y 322CONFIG_FW_LOADER=y
@@ -332,6 +343,7 @@ CONFIG_MTD_BLOCK=y
332# CONFIG_INFTL is not set 343# CONFIG_INFTL is not set
333# CONFIG_RFD_FTL is not set 344# CONFIG_RFD_FTL is not set
334# CONFIG_SSFDC is not set 345# CONFIG_SSFDC is not set
346# CONFIG_MTD_OOPS is not set
335 347
336# 348#
337# RAM/ROM/Flash chip drivers 349# RAM/ROM/Flash chip drivers
@@ -364,6 +376,7 @@ CONFIG_MTD_CFI_UTIL=y
364# CONFIG_MTD_COMPLEX_MAPPINGS is not set 376# CONFIG_MTD_COMPLEX_MAPPINGS is not set
365# CONFIG_MTD_PHYSMAP is not set 377# CONFIG_MTD_PHYSMAP is not set
366CONFIG_MTD_PHYSMAP_OF=y 378CONFIG_MTD_PHYSMAP_OF=y
379# CONFIG_MTD_INTEL_VR_NOR is not set
367# CONFIG_MTD_PLATRAM is not set 380# CONFIG_MTD_PLATRAM is not set
368 381
369# 382#
@@ -423,10 +436,6 @@ CONFIG_MISC_DEVICES=y
423# CONFIG_SCSI_NETLINK is not set 436# CONFIG_SCSI_NETLINK is not set
424# CONFIG_ATA is not set 437# CONFIG_ATA is not set
425# CONFIG_MD is not set 438# CONFIG_MD is not set
426
427#
428# Fusion MPT device support
429#
430# CONFIG_FUSION is not set 439# CONFIG_FUSION is not set
431 440
432# 441#
@@ -443,12 +452,36 @@ CONFIG_NETDEVICES=y
443# CONFIG_MACVLAN is not set 452# CONFIG_MACVLAN is not set
444# CONFIG_EQUALIZER is not set 453# CONFIG_EQUALIZER is not set
445# CONFIG_TUN is not set 454# CONFIG_TUN is not set
455# CONFIG_VETH is not set
456# CONFIG_IP1000 is not set
446# CONFIG_ARCNET is not set 457# CONFIG_ARCNET is not set
447# CONFIG_NET_ETHERNET is not set 458# CONFIG_PHYLIB is not set
459CONFIG_NET_ETHERNET=y
460# CONFIG_MII is not set
461# CONFIG_HAPPYMEAL is not set
462# CONFIG_SUNGEM is not set
463# CONFIG_CASSINI is not set
464# CONFIG_NET_VENDOR_3COM is not set
465# CONFIG_NET_TULIP is not set
466# CONFIG_HP100 is not set
467CONFIG_IBM_NEW_EMAC=y
468CONFIG_IBM_NEW_EMAC_RXB=128
469CONFIG_IBM_NEW_EMAC_TXB=64
470CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
471CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
472CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
473# CONFIG_IBM_NEW_EMAC_DEBUG is not set
474CONFIG_IBM_NEW_EMAC_ZMII=y
475# CONFIG_IBM_NEW_EMAC_RGMII is not set
476# CONFIG_IBM_NEW_EMAC_TAH is not set
477# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
478# CONFIG_NET_PCI is not set
479# CONFIG_B44 is not set
448CONFIG_NETDEV_1000=y 480CONFIG_NETDEV_1000=y
449# CONFIG_ACENIC is not set 481# CONFIG_ACENIC is not set
450# CONFIG_DL2K is not set 482# CONFIG_DL2K is not set
451# CONFIG_E1000 is not set 483# CONFIG_E1000 is not set
484# CONFIG_E1000E is not set
452# CONFIG_NS83820 is not set 485# CONFIG_NS83820 is not set
453# CONFIG_HAMACHI is not set 486# CONFIG_HAMACHI is not set
454# CONFIG_YELLOWFIN is not set 487# CONFIG_YELLOWFIN is not set
@@ -456,6 +489,7 @@ CONFIG_NETDEV_1000=y
456# CONFIG_SIS190 is not set 489# CONFIG_SIS190 is not set
457# CONFIG_SKGE is not set 490# CONFIG_SKGE is not set
458# CONFIG_SKY2 is not set 491# CONFIG_SKY2 is not set
492# CONFIG_SK98LIN is not set
459# CONFIG_VIA_VELOCITY is not set 493# CONFIG_VIA_VELOCITY is not set
460# CONFIG_TIGON3 is not set 494# CONFIG_TIGON3 is not set
461# CONFIG_BNX2 is not set 495# CONFIG_BNX2 is not set
@@ -464,11 +498,14 @@ CONFIG_NETDEV_1000=y
464CONFIG_NETDEV_10000=y 498CONFIG_NETDEV_10000=y
465# CONFIG_CHELSIO_T1 is not set 499# CONFIG_CHELSIO_T1 is not set
466# CONFIG_CHELSIO_T3 is not set 500# CONFIG_CHELSIO_T3 is not set
501# CONFIG_IXGBE is not set
467# CONFIG_IXGB is not set 502# CONFIG_IXGB is not set
468# CONFIG_S2IO is not set 503# CONFIG_S2IO is not set
469# CONFIG_MYRI10GE is not set 504# CONFIG_MYRI10GE is not set
470# CONFIG_NETXEN_NIC is not set 505# CONFIG_NETXEN_NIC is not set
506# CONFIG_NIU is not set
471# CONFIG_MLX4_CORE is not set 507# CONFIG_MLX4_CORE is not set
508# CONFIG_TEHUTI is not set
472# CONFIG_TR is not set 509# CONFIG_TR is not set
473 510
474# 511#
@@ -537,8 +574,6 @@ CONFIG_LEGACY_PTY_COUNT=256
537# CONFIG_GEN_RTC is not set 574# CONFIG_GEN_RTC is not set
538# CONFIG_R3964 is not set 575# CONFIG_R3964 is not set
539# CONFIG_APPLICOM is not set 576# CONFIG_APPLICOM is not set
540# CONFIG_AGP is not set
541# CONFIG_DRM is not set
542# CONFIG_RAW_DRIVER is not set 577# CONFIG_RAW_DRIVER is not set
543# CONFIG_TCG_TPM is not set 578# CONFIG_TCG_TPM is not set
544CONFIG_DEVPORT=y 579CONFIG_DEVPORT=y
@@ -554,6 +589,12 @@ CONFIG_DEVPORT=y
554# CONFIG_HWMON is not set 589# CONFIG_HWMON is not set
555 590
556# 591#
592# Sonics Silicon Backplane
593#
594CONFIG_SSB_POSSIBLE=y
595# CONFIG_SSB is not set
596
597#
557# Multifunction device drivers 598# Multifunction device drivers
558# 599#
559# CONFIG_MFD_SM501 is not set 600# CONFIG_MFD_SM501 is not set
@@ -568,16 +609,17 @@ CONFIG_DEVPORT=y
568# 609#
569# Graphics support 610# Graphics support
570# 611#
612# CONFIG_AGP is not set
613# CONFIG_DRM is not set
614# CONFIG_VGASTATE is not set
615# CONFIG_VIDEO_OUTPUT_CONTROL is not set
616# CONFIG_FB is not set
571# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 617# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
572 618
573# 619#
574# Display device support 620# Display device support
575# 621#
576# CONFIG_DISPLAY_SUPPORT is not set 622# CONFIG_DISPLAY_SUPPORT is not set
577# CONFIG_VGASTATE is not set
578# CONFIG_VIDEO_OUTPUT_CONTROL is not set
579# CONFIG_FB is not set
580# CONFIG_FB_IBM_GXT4500 is not set
581 623
582# 624#
583# Sound 625# Sound
@@ -604,19 +646,6 @@ CONFIG_USB_ARCH_HAS_EHCI=y
604# CONFIG_RTC_CLASS is not set 646# CONFIG_RTC_CLASS is not set
605 647
606# 648#
607# DMA Engine support
608#
609# CONFIG_DMA_ENGINE is not set
610
611#
612# DMA Clients
613#
614
615#
616# DMA Devices
617#
618
619#
620# Userspace I/O 649# Userspace I/O
621# 650#
622# CONFIG_UIO is not set 651# CONFIG_UIO is not set
@@ -668,7 +697,6 @@ CONFIG_SYSFS=y
668CONFIG_TMPFS=y 697CONFIG_TMPFS=y
669# CONFIG_TMPFS_POSIX_ACL is not set 698# CONFIG_TMPFS_POSIX_ACL is not set
670# CONFIG_HUGETLB_PAGE is not set 699# CONFIG_HUGETLB_PAGE is not set
671CONFIG_RAMFS=y
672# CONFIG_CONFIGFS_FS is not set 700# CONFIG_CONFIGFS_FS is not set
673 701
674# 702#
@@ -684,10 +712,12 @@ CONFIG_RAMFS=y
684CONFIG_JFFS2_FS=y 712CONFIG_JFFS2_FS=y
685CONFIG_JFFS2_FS_DEBUG=0 713CONFIG_JFFS2_FS_DEBUG=0
686CONFIG_JFFS2_FS_WRITEBUFFER=y 714CONFIG_JFFS2_FS_WRITEBUFFER=y
715# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
687# CONFIG_JFFS2_SUMMARY is not set 716# CONFIG_JFFS2_SUMMARY is not set
688# CONFIG_JFFS2_FS_XATTR is not set 717# CONFIG_JFFS2_FS_XATTR is not set
689# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set 718# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
690CONFIG_JFFS2_ZLIB=y 719CONFIG_JFFS2_ZLIB=y
720# CONFIG_JFFS2_LZO is not set
691CONFIG_JFFS2_RTIME=y 721CONFIG_JFFS2_RTIME=y
692# CONFIG_JFFS2_RUBIN is not set 722# CONFIG_JFFS2_RUBIN is not set
693CONFIG_CRAMFS=y 723CONFIG_CRAMFS=y
@@ -696,10 +726,7 @@ CONFIG_CRAMFS=y
696# CONFIG_QNX4FS_FS is not set 726# CONFIG_QNX4FS_FS is not set
697# CONFIG_SYSV_FS is not set 727# CONFIG_SYSV_FS is not set
698# CONFIG_UFS_FS is not set 728# CONFIG_UFS_FS is not set
699 729CONFIG_NETWORK_FILESYSTEMS=y
700#
701# Network File Systems
702#
703CONFIG_NFS_FS=y 730CONFIG_NFS_FS=y
704CONFIG_NFS_V3=y 731CONFIG_NFS_V3=y
705# CONFIG_NFS_V3_ACL is not set 732# CONFIG_NFS_V3_ACL is not set
@@ -725,15 +752,7 @@ CONFIG_SUNRPC=y
725# 752#
726# CONFIG_PARTITION_ADVANCED is not set 753# CONFIG_PARTITION_ADVANCED is not set
727CONFIG_MSDOS_PARTITION=y 754CONFIG_MSDOS_PARTITION=y
728
729#
730# Native Language Support
731#
732# CONFIG_NLS is not set 755# CONFIG_NLS is not set
733
734#
735# Distributed Lock Manager
736#
737# CONFIG_DLM is not set 756# CONFIG_DLM is not set
738# CONFIG_UCC_SLOW is not set 757# CONFIG_UCC_SLOW is not set
739 758
@@ -787,6 +806,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
787# CONFIG_DEBUG_VM is not set 806# CONFIG_DEBUG_VM is not set
788# CONFIG_DEBUG_LIST is not set 807# CONFIG_DEBUG_LIST is not set
789CONFIG_FORCED_INLINING=y 808CONFIG_FORCED_INLINING=y
809# CONFIG_BOOT_PRINTK_DELAY is not set
790# CONFIG_RCU_TORTURE_TEST is not set 810# CONFIG_RCU_TORTURE_TEST is not set
791# CONFIG_FAULT_INJECTION is not set 811# CONFIG_FAULT_INJECTION is not set
792# CONFIG_DEBUG_STACKOVERFLOW is not set 812# CONFIG_DEBUG_STACKOVERFLOW is not set
@@ -801,6 +821,7 @@ CONFIG_FORCED_INLINING=y
801# 821#
802# CONFIG_KEYS is not set 822# CONFIG_KEYS is not set
803# CONFIG_SECURITY is not set 823# CONFIG_SECURITY is not set
824# CONFIG_SECURITY_FILE_CAPABILITIES is not set
804CONFIG_CRYPTO=y 825CONFIG_CRYPTO=y
805CONFIG_CRYPTO_ALGAPI=y 826CONFIG_CRYPTO_ALGAPI=y
806CONFIG_CRYPTO_BLKCIPHER=y 827CONFIG_CRYPTO_BLKCIPHER=y
@@ -820,6 +841,7 @@ CONFIG_CRYPTO_ECB=y
820CONFIG_CRYPTO_CBC=y 841CONFIG_CRYPTO_CBC=y
821CONFIG_CRYPTO_PCBC=y 842CONFIG_CRYPTO_PCBC=y
822# CONFIG_CRYPTO_LRW is not set 843# CONFIG_CRYPTO_LRW is not set
844# CONFIG_CRYPTO_XTS is not set
823# CONFIG_CRYPTO_CRYPTD is not set 845# CONFIG_CRYPTO_CRYPTD is not set
824CONFIG_CRYPTO_DES=y 846CONFIG_CRYPTO_DES=y
825# CONFIG_CRYPTO_FCRYPT is not set 847# CONFIG_CRYPTO_FCRYPT is not set
@@ -833,9 +855,12 @@ CONFIG_CRYPTO_DES=y
833# CONFIG_CRYPTO_ARC4 is not set 855# CONFIG_CRYPTO_ARC4 is not set
834# CONFIG_CRYPTO_KHAZAD is not set 856# CONFIG_CRYPTO_KHAZAD is not set
835# CONFIG_CRYPTO_ANUBIS is not set 857# CONFIG_CRYPTO_ANUBIS is not set
858# CONFIG_CRYPTO_SEED is not set
836# CONFIG_CRYPTO_DEFLATE is not set 859# CONFIG_CRYPTO_DEFLATE is not set
837# CONFIG_CRYPTO_MICHAEL_MIC is not set 860# CONFIG_CRYPTO_MICHAEL_MIC is not set
838# CONFIG_CRYPTO_CRC32C is not set 861# CONFIG_CRYPTO_CRC32C is not set
839# CONFIG_CRYPTO_CAMELLIA is not set 862# CONFIG_CRYPTO_CAMELLIA is not set
840# CONFIG_CRYPTO_TEST is not set 863# CONFIG_CRYPTO_TEST is not set
864# CONFIG_CRYPTO_AUTHENC is not set
841# CONFIG_CRYPTO_HW is not set 865# CONFIG_CRYPTO_HW is not set
866# CONFIG_PPC_CLOCK is not set
diff --git a/arch/powerpc/configs/walnut_defconfig b/arch/powerpc/configs/walnut_defconfig
index 7724292cc06d..02896ecba490 100644
--- a/arch/powerpc/configs/walnut_defconfig
+++ b/arch/powerpc/configs/walnut_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.23-rc4 3# Linux kernel version: 2.6.23
4# Wed Sep 5 12:06:37 2007 4# Thu Oct 18 12:54:18 2007
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -18,8 +18,13 @@ CONFIG_4xx=y
18# CONFIG_PPC_MM_SLICES is not set 18# CONFIG_PPC_MM_SLICES is not set
19CONFIG_NOT_COHERENT_CACHE=y 19CONFIG_NOT_COHERENT_CACHE=y
20CONFIG_PPC32=y 20CONFIG_PPC32=y
21CONFIG_WORD_SIZE=32
21CONFIG_PPC_MERGE=y 22CONFIG_PPC_MERGE=y
22CONFIG_MMU=y 23CONFIG_MMU=y
24CONFIG_GENERIC_CMOS_UPDATE=y
25CONFIG_GENERIC_TIME=y
26CONFIG_GENERIC_TIME_VSYSCALL=y
27CONFIG_GENERIC_CLOCKEVENTS=y
23CONFIG_GENERIC_HARDIRQS=y 28CONFIG_GENERIC_HARDIRQS=y
24CONFIG_IRQ_PER_CPU=y 29CONFIG_IRQ_PER_CPU=y
25CONFIG_RWSEM_XCHGADD_ALGORITHM=y 30CONFIG_RWSEM_XCHGADD_ALGORITHM=y
@@ -63,6 +68,8 @@ CONFIG_POSIX_MQUEUE=y
63# CONFIG_AUDIT is not set 68# CONFIG_AUDIT is not set
64# CONFIG_IKCONFIG is not set 69# CONFIG_IKCONFIG is not set
65CONFIG_LOG_BUF_SHIFT=14 70CONFIG_LOG_BUF_SHIFT=14
71CONFIG_FAIR_GROUP_SCHED=y
72CONFIG_FAIR_USER_SCHED=y
66CONFIG_SYSFS_DEPRECATED=y 73CONFIG_SYSFS_DEPRECATED=y
67# CONFIG_RELAY is not set 74# CONFIG_RELAY is not set
68CONFIG_BLK_DEV_INITRD=y 75CONFIG_BLK_DEV_INITRD=y
@@ -83,7 +90,6 @@ CONFIG_FUTEX=y
83CONFIG_ANON_INODES=y 90CONFIG_ANON_INODES=y
84CONFIG_EPOLL=y 91CONFIG_EPOLL=y
85CONFIG_SIGNALFD=y 92CONFIG_SIGNALFD=y
86CONFIG_TIMERFD=y
87CONFIG_EVENTFD=y 93CONFIG_EVENTFD=y
88CONFIG_SHMEM=y 94CONFIG_SHMEM=y
89CONFIG_VM_EVENT_COUNTERS=y 95CONFIG_VM_EVENT_COUNTERS=y
@@ -127,7 +133,9 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
127# CONFIG_PPC_CELL is not set 133# CONFIG_PPC_CELL is not set
128# CONFIG_PPC_CELL_NATIVE is not set 134# CONFIG_PPC_CELL_NATIVE is not set
129# CONFIG_PQ2ADS is not set 135# CONFIG_PQ2ADS is not set
136# CONFIG_KILAUEA is not set
130CONFIG_WALNUT=y 137CONFIG_WALNUT=y
138# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
131CONFIG_405GP=y 139CONFIG_405GP=y
132CONFIG_IBM405_ERR77=y 140CONFIG_IBM405_ERR77=y
133CONFIG_IBM405_ERR51=y 141CONFIG_IBM405_ERR51=y
@@ -148,6 +156,10 @@ CONFIG_IBM405_ERR51=y
148# Kernel options 156# Kernel options
149# 157#
150# CONFIG_HIGHMEM is not set 158# CONFIG_HIGHMEM is not set
159# CONFIG_TICK_ONESHOT is not set
160# CONFIG_NO_HZ is not set
161# CONFIG_HIGH_RES_TIMERS is not set
162CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
151# CONFIG_HZ_100 is not set 163# CONFIG_HZ_100 is not set
152CONFIG_HZ_250=y 164CONFIG_HZ_250=y
153# CONFIG_HZ_300 is not set 165# CONFIG_HZ_300 is not set
@@ -169,6 +181,7 @@ CONFIG_FLATMEM_MANUAL=y
169CONFIG_FLATMEM=y 181CONFIG_FLATMEM=y
170CONFIG_FLAT_NODE_MEM_MAP=y 182CONFIG_FLAT_NODE_MEM_MAP=y
171# CONFIG_SPARSEMEM_STATIC is not set 183# CONFIG_SPARSEMEM_STATIC is not set
184# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
172CONFIG_SPLIT_PTLOCK_CPUS=4 185CONFIG_SPLIT_PTLOCK_CPUS=4
173CONFIG_RESOURCES_64BIT=y 186CONFIG_RESOURCES_64BIT=y
174CONFIG_ZONE_DMA_FLAG=1 187CONFIG_ZONE_DMA_FLAG=1
@@ -177,6 +190,8 @@ CONFIG_VIRT_TO_BUS=y
177CONFIG_PROC_DEVICETREE=y 190CONFIG_PROC_DEVICETREE=y
178# CONFIG_CMDLINE_BOOL is not set 191# CONFIG_CMDLINE_BOOL is not set
179# CONFIG_PM is not set 192# CONFIG_PM is not set
193CONFIG_SUSPEND_UP_POSSIBLE=y
194CONFIG_HIBERNATION_UP_POSSIBLE=y
180CONFIG_SECCOMP=y 195CONFIG_SECCOMP=y
181CONFIG_WANT_DEVICE_TREE=y 196CONFIG_WANT_DEVICE_TREE=y
182CONFIG_DEVICE_TREE="walnut.dts" 197CONFIG_DEVICE_TREE="walnut.dts"
@@ -190,10 +205,6 @@ CONFIG_ZONE_DMA=y
190# CONFIG_PCI_DOMAINS is not set 205# CONFIG_PCI_DOMAINS is not set
191# CONFIG_PCI_SYSCALL is not set 206# CONFIG_PCI_SYSCALL is not set
192# CONFIG_ARCH_SUPPORTS_MSI is not set 207# CONFIG_ARCH_SUPPORTS_MSI is not set
193
194#
195# PCCARD (PCMCIA/CardBus) support
196#
197# CONFIG_PCCARD is not set 208# CONFIG_PCCARD is not set
198 209
199# 210#
@@ -207,7 +218,7 @@ CONFIG_ZONE_DMA=y
207CONFIG_HIGHMEM_START=0xfe000000 218CONFIG_HIGHMEM_START=0xfe000000
208CONFIG_LOWMEM_SIZE=0x30000000 219CONFIG_LOWMEM_SIZE=0x30000000
209CONFIG_KERNEL_START=0xc0000000 220CONFIG_KERNEL_START=0xc0000000
210CONFIG_TASK_SIZE=0x80000000 221CONFIG_TASK_SIZE=0xc0000000
211CONFIG_CONSISTENT_START=0xff100000 222CONFIG_CONSISTENT_START=0xff100000
212CONFIG_CONSISTENT_SIZE=0x00200000 223CONFIG_CONSISTENT_SIZE=0x00200000
213CONFIG_BOOT_LOAD=0x00400000 224CONFIG_BOOT_LOAD=0x00400000
@@ -244,6 +255,7 @@ CONFIG_IP_PNP_BOOTP=y
244# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 255# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
245# CONFIG_INET_XFRM_MODE_TUNNEL is not set 256# CONFIG_INET_XFRM_MODE_TUNNEL is not set
246# CONFIG_INET_XFRM_MODE_BEET is not set 257# CONFIG_INET_XFRM_MODE_BEET is not set
258# CONFIG_INET_LRO is not set
247CONFIG_INET_DIAG=y 259CONFIG_INET_DIAG=y
248CONFIG_INET_TCP_DIAG=y 260CONFIG_INET_TCP_DIAG=y
249# CONFIG_TCP_CONG_ADVANCED is not set 261# CONFIG_TCP_CONG_ADVANCED is not set
@@ -301,6 +313,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
301# 313#
302# Generic Driver Options 314# Generic Driver Options
303# 315#
316CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
304CONFIG_STANDALONE=y 317CONFIG_STANDALONE=y
305CONFIG_PREVENT_FIRMWARE_BUILD=y 318CONFIG_PREVENT_FIRMWARE_BUILD=y
306CONFIG_FW_LOADER=y 319CONFIG_FW_LOADER=y
@@ -328,6 +341,7 @@ CONFIG_MTD_BLOCK=m
328# CONFIG_INFTL is not set 341# CONFIG_INFTL is not set
329# CONFIG_RFD_FTL is not set 342# CONFIG_RFD_FTL is not set
330# CONFIG_SSFDC is not set 343# CONFIG_SSFDC is not set
344# CONFIG_MTD_OOPS is not set
331 345
332# 346#
333# RAM/ROM/Flash chip drivers 347# RAM/ROM/Flash chip drivers
@@ -360,7 +374,6 @@ CONFIG_MTD_CFI_UTIL=y
360# CONFIG_MTD_COMPLEX_MAPPINGS is not set 374# CONFIG_MTD_COMPLEX_MAPPINGS is not set
361# CONFIG_MTD_PHYSMAP is not set 375# CONFIG_MTD_PHYSMAP is not set
362CONFIG_MTD_PHYSMAP_OF=y 376CONFIG_MTD_PHYSMAP_OF=y
363# CONFIG_MTD_WALNUT is not set
364# CONFIG_MTD_PLATRAM is not set 377# CONFIG_MTD_PLATRAM is not set
365 378
366# 379#
@@ -419,7 +432,22 @@ CONFIG_NETDEVICES=y
419# CONFIG_MACVLAN is not set 432# CONFIG_MACVLAN is not set
420# CONFIG_EQUALIZER is not set 433# CONFIG_EQUALIZER is not set
421# CONFIG_TUN is not set 434# CONFIG_TUN is not set
422# CONFIG_NET_ETHERNET is not set 435# CONFIG_VETH is not set
436# CONFIG_PHYLIB is not set
437CONFIG_NET_ETHERNET=y
438# CONFIG_MII is not set
439CONFIG_IBM_NEW_EMAC=y
440CONFIG_IBM_NEW_EMAC_RXB=128
441CONFIG_IBM_NEW_EMAC_TXB=64
442CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
443CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
444CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
445# CONFIG_IBM_NEW_EMAC_DEBUG is not set
446CONFIG_IBM_NEW_EMAC_ZMII=y
447# CONFIG_IBM_NEW_EMAC_RGMII is not set
448# CONFIG_IBM_NEW_EMAC_TAH is not set
449# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
450# CONFIG_B44 is not set
423CONFIG_NETDEV_1000=y 451CONFIG_NETDEV_1000=y
424CONFIG_NETDEV_10000=y 452CONFIG_NETDEV_10000=y
425 453
@@ -498,6 +526,12 @@ CONFIG_LEGACY_PTY_COUNT=256
498# CONFIG_HWMON is not set 526# CONFIG_HWMON is not set
499 527
500# 528#
529# Sonics Silicon Backplane
530#
531CONFIG_SSB_POSSIBLE=y
532# CONFIG_SSB is not set
533
534#
501# Multifunction device drivers 535# Multifunction device drivers
502# 536#
503# CONFIG_MFD_SM501 is not set 537# CONFIG_MFD_SM501 is not set
@@ -512,16 +546,15 @@ CONFIG_LEGACY_PTY_COUNT=256
512# 546#
513# Graphics support 547# Graphics support
514# 548#
549# CONFIG_VGASTATE is not set
550CONFIG_VIDEO_OUTPUT_CONTROL=m
551# CONFIG_FB is not set
515# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 552# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
516 553
517# 554#
518# Display device support 555# Display device support
519# 556#
520# CONFIG_DISPLAY_SUPPORT is not set 557# CONFIG_DISPLAY_SUPPORT is not set
521# CONFIG_VGASTATE is not set
522CONFIG_VIDEO_OUTPUT_CONTROL=m
523# CONFIG_FB is not set
524# CONFIG_FB_IBM_GXT4500 is not set
525 558
526# 559#
527# Sound 560# Sound
@@ -546,19 +579,6 @@ CONFIG_USB_SUPPORT=y
546# CONFIG_RTC_CLASS is not set 579# CONFIG_RTC_CLASS is not set
547 580
548# 581#
549# DMA Engine support
550#
551# CONFIG_DMA_ENGINE is not set
552
553#
554# DMA Clients
555#
556
557#
558# DMA Devices
559#
560
561#
562# Userspace I/O 582# Userspace I/O
563# 583#
564# CONFIG_UIO is not set 584# CONFIG_UIO is not set
@@ -610,7 +630,6 @@ CONFIG_SYSFS=y
610CONFIG_TMPFS=y 630CONFIG_TMPFS=y
611# CONFIG_TMPFS_POSIX_ACL is not set 631# CONFIG_TMPFS_POSIX_ACL is not set
612# CONFIG_HUGETLB_PAGE is not set 632# CONFIG_HUGETLB_PAGE is not set
613CONFIG_RAMFS=y
614# CONFIG_CONFIGFS_FS is not set 633# CONFIG_CONFIGFS_FS is not set
615 634
616# 635#
@@ -630,10 +649,7 @@ CONFIG_CRAMFS=y
630# CONFIG_QNX4FS_FS is not set 649# CONFIG_QNX4FS_FS is not set
631# CONFIG_SYSV_FS is not set 650# CONFIG_SYSV_FS is not set
632# CONFIG_UFS_FS is not set 651# CONFIG_UFS_FS is not set
633 652CONFIG_NETWORK_FILESYSTEMS=y
634#
635# Network File Systems
636#
637CONFIG_NFS_FS=y 653CONFIG_NFS_FS=y
638CONFIG_NFS_V3=y 654CONFIG_NFS_V3=y
639# CONFIG_NFS_V3_ACL is not set 655# CONFIG_NFS_V3_ACL is not set
@@ -659,15 +675,7 @@ CONFIG_SUNRPC=y
659# 675#
660# CONFIG_PARTITION_ADVANCED is not set 676# CONFIG_PARTITION_ADVANCED is not set
661CONFIG_MSDOS_PARTITION=y 677CONFIG_MSDOS_PARTITION=y
662
663#
664# Native Language Support
665#
666# CONFIG_NLS is not set 678# CONFIG_NLS is not set
667
668#
669# Distributed Lock Manager
670#
671# CONFIG_DLM is not set 679# CONFIG_DLM is not set
672# CONFIG_UCC_SLOW is not set 680# CONFIG_UCC_SLOW is not set
673 681
@@ -720,6 +728,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
720# CONFIG_DEBUG_VM is not set 728# CONFIG_DEBUG_VM is not set
721# CONFIG_DEBUG_LIST is not set 729# CONFIG_DEBUG_LIST is not set
722CONFIG_FORCED_INLINING=y 730CONFIG_FORCED_INLINING=y
731# CONFIG_BOOT_PRINTK_DELAY is not set
723# CONFIG_RCU_TORTURE_TEST is not set 732# CONFIG_RCU_TORTURE_TEST is not set
724# CONFIG_FAULT_INJECTION is not set 733# CONFIG_FAULT_INJECTION is not set
725# CONFIG_DEBUG_STACKOVERFLOW is not set 734# CONFIG_DEBUG_STACKOVERFLOW is not set
@@ -734,6 +743,7 @@ CONFIG_FORCED_INLINING=y
734# 743#
735# CONFIG_KEYS is not set 744# CONFIG_KEYS is not set
736# CONFIG_SECURITY is not set 745# CONFIG_SECURITY is not set
746# CONFIG_SECURITY_FILE_CAPABILITIES is not set
737CONFIG_CRYPTO=y 747CONFIG_CRYPTO=y
738CONFIG_CRYPTO_ALGAPI=y 748CONFIG_CRYPTO_ALGAPI=y
739CONFIG_CRYPTO_BLKCIPHER=y 749CONFIG_CRYPTO_BLKCIPHER=y
@@ -753,6 +763,7 @@ CONFIG_CRYPTO_ECB=y
753CONFIG_CRYPTO_CBC=y 763CONFIG_CRYPTO_CBC=y
754CONFIG_CRYPTO_PCBC=y 764CONFIG_CRYPTO_PCBC=y
755# CONFIG_CRYPTO_LRW is not set 765# CONFIG_CRYPTO_LRW is not set
766# CONFIG_CRYPTO_XTS is not set
756# CONFIG_CRYPTO_CRYPTD is not set 767# CONFIG_CRYPTO_CRYPTD is not set
757CONFIG_CRYPTO_DES=y 768CONFIG_CRYPTO_DES=y
758# CONFIG_CRYPTO_FCRYPT is not set 769# CONFIG_CRYPTO_FCRYPT is not set
@@ -766,9 +777,12 @@ CONFIG_CRYPTO_DES=y
766# CONFIG_CRYPTO_ARC4 is not set 777# CONFIG_CRYPTO_ARC4 is not set
767# CONFIG_CRYPTO_KHAZAD is not set 778# CONFIG_CRYPTO_KHAZAD is not set
768# CONFIG_CRYPTO_ANUBIS is not set 779# CONFIG_CRYPTO_ANUBIS is not set
780# CONFIG_CRYPTO_SEED is not set
769# CONFIG_CRYPTO_DEFLATE is not set 781# CONFIG_CRYPTO_DEFLATE is not set
770# CONFIG_CRYPTO_MICHAEL_MIC is not set 782# CONFIG_CRYPTO_MICHAEL_MIC is not set
771# CONFIG_CRYPTO_CRC32C is not set 783# CONFIG_CRYPTO_CRC32C is not set
772# CONFIG_CRYPTO_CAMELLIA is not set 784# CONFIG_CRYPTO_CAMELLIA is not set
773# CONFIG_CRYPTO_TEST is not set 785# CONFIG_CRYPTO_TEST is not set
786# CONFIG_CRYPTO_AUTHENC is not set
774CONFIG_CRYPTO_HW=y 787CONFIG_CRYPTO_HW=y
788# CONFIG_PPC_CLOCK is not set
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 9001104b56b0..14206e3f0819 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -161,8 +161,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
161 int i; 161 int i;
162 162
163 for_each_sg(sgl, sg, nents, i) { 163 for_each_sg(sgl, sg, nents, i) {
164 sg->dma_address = (page_to_phys(sg->page) + sg->offset) | 164 sg->dma_address = sg_phys(sg) | dma_direct_offset;
165 dma_direct_offset;
166 sg->dma_length = sg->length; 165 sg->dma_length = sg->length;
167 } 166 }
168 167
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 289d7e935918..72fd87156b24 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -102,8 +102,7 @@ static int ibmebus_map_sg(struct device *dev,
102 int i; 102 int i;
103 103
104 for_each_sg(sgl, sg, nents, i) { 104 for_each_sg(sgl, sg, nents, i) {
105 sg->dma_address = (dma_addr_t)page_address(sg->page) 105 sg->dma_address = (dma_addr_t) sg_virt(sg);
106 + sg->offset;
107 sg->dma_length = sg->length; 106 sg->dma_length = sg->length;
108 } 107 }
109 108
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 306a6f75b6c5..2d0c9ef555e9 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -307,7 +307,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
307 continue; 307 continue;
308 } 308 }
309 /* Allocate iommu entries for that segment */ 309 /* Allocate iommu entries for that segment */
310 vaddr = (unsigned long)page_address(s->page) + s->offset; 310 vaddr = (unsigned long) sg_virt(s);
311 npages = iommu_num_pages(vaddr, slen); 311 npages = iommu_num_pages(vaddr, slen);
312 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); 312 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
313 313
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 47b3b0a3864a..8f6699fcc145 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -100,6 +100,7 @@ config 405GP
100 bool 100 bool
101 select IBM405_ERR77 101 select IBM405_ERR77
102 select IBM405_ERR51 102 select IBM405_ERR51
103 select IBM_NEW_EMAC_ZMII
103 104
104config 405EP 105config 405EP
105 bool 106 bool
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 51f3ea40a285..8390cc164135 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -43,14 +43,14 @@ config 440EP
43 bool 43 bool
44 select PPC_FPU 44 select PPC_FPU
45 select IBM440EP_ERR42 45 select IBM440EP_ERR42
46# select IBM_NEW_EMAC_ZMII 46 select IBM_NEW_EMAC_ZMII
47 47
48config 440EPX 48config 440EPX
49 bool 49 bool
50 select PPC_FPU 50 select PPC_FPU
51# Disabled until the new EMAC Driver is merged. 51 select IBM_NEW_EMAC_EMAC4
52# select IBM_NEW_EMAC_EMAC4 52 select IBM_NEW_EMAC_RGMII
53# select IBM_NEW_EMAC_ZMII 53 select IBM_NEW_EMAC_ZMII
54 54
55config 440GP 55config 440GP
56 bool 56 bool
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 65b7ae426238..25d2bfa3d9dc 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -145,6 +145,9 @@ static void __init lite5200_setup_arch(void)
145 /* Some mpc5200 & mpc5200b related configuration */ 145 /* Some mpc5200 & mpc5200b related configuration */
146 mpc5200_setup_xlb_arbiter(); 146 mpc5200_setup_xlb_arbiter();
147 147
148 /* Map wdt for mpc52xx_restart() */
149 mpc52xx_map_wdt();
150
148#ifdef CONFIG_PM 151#ifdef CONFIG_PM
149 mpc52xx_suspend.board_suspend_prepare = lite5200_suspend_prepare; 152 mpc52xx_suspend.board_suspend_prepare = lite5200_suspend_prepare;
150 mpc52xx_suspend.board_resume_finish = lite5200_resume_finish; 153 mpc52xx_suspend.board_resume_finish = lite5200_resume_finish;
@@ -183,5 +186,6 @@ define_machine(lite5200) {
183 .init = mpc52xx_declare_of_platform_devices, 186 .init = mpc52xx_declare_of_platform_devices,
184 .init_IRQ = mpc52xx_init_irq, 187 .init_IRQ = mpc52xx_init_irq,
185 .get_irq = mpc52xx_get_irq, 188 .get_irq = mpc52xx_get_irq,
189 .restart = mpc52xx_restart,
186 .calibrate_decr = generic_calibrate_decr, 190 .calibrate_decr = generic_calibrate_decr,
187}; 191};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 3bc201e07e6b..9850685c5429 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -18,15 +18,20 @@
18#include <asm/prom.h> 18#include <asm/prom.h>
19#include <asm/mpc52xx.h> 19#include <asm/mpc52xx.h>
20 20
21/*
22 * This variable is mapped in mpc52xx_map_wdt() and used in mpc52xx_restart().
23 * Permanent mapping is required because mpc52xx_restart() can be called
24 * from interrupt context while node mapping (which calls ioremap())
25 * cannot be used at such point.
26 */
27static volatile struct mpc52xx_gpt *mpc52xx_wdt = NULL;
21 28
22void __iomem * 29static void __iomem *
23mpc52xx_find_and_map(const char *compatible) 30mpc52xx_map_node(struct device_node *ofn)
24{ 31{
25 struct device_node *ofn;
26 const u32 *regaddr_p; 32 const u32 *regaddr_p;
27 u64 regaddr64, size64; 33 u64 regaddr64, size64;
28 34
29 ofn = of_find_compatible_node(NULL, NULL, compatible);
30 if (!ofn) 35 if (!ofn)
31 return NULL; 36 return NULL;
32 37
@@ -42,8 +47,23 @@ mpc52xx_find_and_map(const char *compatible)
42 47
43 return ioremap((u32)regaddr64, (u32)size64); 48 return ioremap((u32)regaddr64, (u32)size64);
44} 49}
50
51void __iomem *
52mpc52xx_find_and_map(const char *compatible)
53{
54 return mpc52xx_map_node(
55 of_find_compatible_node(NULL, NULL, compatible));
56}
57
45EXPORT_SYMBOL(mpc52xx_find_and_map); 58EXPORT_SYMBOL(mpc52xx_find_and_map);
46 59
60void __iomem *
61mpc52xx_find_and_map_path(const char *path)
62{
63 return mpc52xx_map_node(of_find_node_by_path(path));
64}
65
66EXPORT_SYMBOL(mpc52xx_find_and_map_path);
47 67
48/** 68/**
49 * mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device 69 * mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device
@@ -113,3 +133,46 @@ mpc52xx_declare_of_platform_devices(void)
113 "Error while probing of_platform bus\n"); 133 "Error while probing of_platform bus\n");
114} 134}
115 135
136void __init
137mpc52xx_map_wdt(void)
138{
139 const void *has_wdt;
140 struct device_node *np;
141
142 /* mpc52xx_wdt is mapped here and used in mpc52xx_restart,
143 * possibly from a interrupt context. wdt is only implement
144 * on a gpt0, so check has-wdt property before mapping.
145 */
146 for_each_compatible_node(np, NULL, "fsl,mpc5200-gpt") {
147 has_wdt = of_get_property(np, "fsl,has-wdt", NULL);
148 if (has_wdt) {
149 mpc52xx_wdt = mpc52xx_map_node(np);
150 return;
151 }
152 }
153 for_each_compatible_node(np, NULL, "mpc5200-gpt") {
154 has_wdt = of_get_property(np, "has-wdt", NULL);
155 if (has_wdt) {
156 mpc52xx_wdt = mpc52xx_map_node(np);
157 return;
158 }
159 }
160}
161
162void
163mpc52xx_restart(char *cmd)
164{
165 local_irq_disable();
166
167 /* Turn on the watchdog and wait for it to expire.
168 * It effectively does a reset. */
169 if (mpc52xx_wdt) {
170 out_be32(&mpc52xx_wdt->mode, 0x00000000);
171 out_be32(&mpc52xx_wdt->count, 0x000000ff);
172 out_be32(&mpc52xx_wdt->mode, 0x00009004);
173 } else
174 printk("mpc52xx_restart: Can't access wdt. "
175 "Restart impossible, system halted.\n");
176
177 while (1);
178}
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 3c7325ec36ec..99684ea606af 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -48,6 +48,7 @@ config 44x
48 bool "AMCC 44x" 48 bool "AMCC 44x"
49 select PPC_DCR_NATIVE 49 select PPC_DCR_NATIVE
50 select WANT_DEVICE_TREE 50 select WANT_DEVICE_TREE
51 select PPC_UDBG_16550
51 52
52config E200 53config E200
53 bool "Freescale e200" 54 bool "Freescale e200"
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 07e64b48e7fc..6405f4a36763 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -628,9 +628,8 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
628 int i; 628 int i;
629 629
630 for_each_sg(sgl, sg, nents, i) { 630 for_each_sg(sgl, sg, nents, i) {
631 int result = ps3_dma_map(dev->d_region, 631 int result = ps3_dma_map(dev->d_region, sg_phys(sg),
632 page_to_phys(sg->page) + sg->offset, sg->length, 632 sg->length, &sg->dma_address, 0);
633 &sg->dma_address, 0);
634 633
635 if (result) { 634 if (result) {
636 pr_debug("%s:%d: ps3_dma_map failed (%d)\n", 635 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 48492a83e5a7..740ad73ce5cc 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -269,6 +269,7 @@ bcom_engine_init(void)
269 int task; 269 int task;
270 phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa; 270 phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
271 unsigned int tdt_size, ctx_size, var_size, fdt_size; 271 unsigned int tdt_size, ctx_size, var_size, fdt_size;
272 u16 regval;
272 273
273 /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */ 274 /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
274 tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt); 275 tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
@@ -319,9 +320,11 @@ bcom_engine_init(void)
319 /* Init 'always' initiator */ 320 /* Init 'always' initiator */
320 out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS); 321 out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
321 322
322 /* Disable COMM Bus Prefetch, apparently it's not reliable yet */ 323 /* Disable COMM Bus Prefetch on the original 5200; it's broken */
323 /* FIXME: This should be done on 5200 and not 5200B ... */ 324 if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) {
324 out_be16(&bcom_eng->regs->PtdCntrl, in_be16(&bcom_eng->regs->PtdCntrl) | 1); 325 regval = in_be16(&bcom_eng->regs->PtdCntrl);
326 out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
327 }
325 328
326 /* Init lock */ 329 /* Init lock */
327 spin_lock_init(&bcom_eng->lock); 330 spin_lock_init(&bcom_eng->lock);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2aae23dba4bb..ece7b99da895 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22 3# Linux kernel version: 2.6.23
4# Tue Jul 17 12:50:23 2007 4# Mon Oct 22 12:10:44 2007
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_ZONE_DMA=y 7CONFIG_ZONE_DMA=y
@@ -19,15 +19,11 @@ CONFIG_S390=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
20 20
21# 21#
22# Code maturity level options 22# General setup
23# 23#
24CONFIG_EXPERIMENTAL=y 24CONFIG_EXPERIMENTAL=y
25CONFIG_LOCK_KERNEL=y 25CONFIG_LOCK_KERNEL=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 26CONFIG_INIT_ENV_ARG_LIMIT=32
27
28#
29# General setup
30#
31CONFIG_LOCALVERSION="" 27CONFIG_LOCALVERSION=""
32CONFIG_LOCALVERSION_AUTO=y 28CONFIG_LOCALVERSION_AUTO=y
33CONFIG_SWAP=y 29CONFIG_SWAP=y
@@ -42,7 +38,14 @@ CONFIG_AUDIT=y
42CONFIG_IKCONFIG=y 38CONFIG_IKCONFIG=y
43CONFIG_IKCONFIG_PROC=y 39CONFIG_IKCONFIG_PROC=y
44CONFIG_LOG_BUF_SHIFT=17 40CONFIG_LOG_BUF_SHIFT=17
41CONFIG_CGROUPS=y
42# CONFIG_CGROUP_DEBUG is not set
43CONFIG_CGROUP_NS=y
44CONFIG_CGROUP_CPUACCT=y
45# CONFIG_CPUSETS is not set 45# CONFIG_CPUSETS is not set
46CONFIG_FAIR_GROUP_SCHED=y
47CONFIG_FAIR_USER_SCHED=y
48# CONFIG_FAIR_CGROUP_SCHED is not set
46CONFIG_SYSFS_DEPRECATED=y 49CONFIG_SYSFS_DEPRECATED=y
47# CONFIG_RELAY is not set 50# CONFIG_RELAY is not set
48CONFIG_BLK_DEV_INITRD=y 51CONFIG_BLK_DEV_INITRD=y
@@ -63,7 +66,6 @@ CONFIG_FUTEX=y
63CONFIG_ANON_INODES=y 66CONFIG_ANON_INODES=y
64CONFIG_EPOLL=y 67CONFIG_EPOLL=y
65CONFIG_SIGNALFD=y 68CONFIG_SIGNALFD=y
66CONFIG_TIMERFD=y
67CONFIG_EVENTFD=y 69CONFIG_EVENTFD=y
68CONFIG_SHMEM=y 70CONFIG_SHMEM=y
69CONFIG_VM_EVENT_COUNTERS=y 71CONFIG_VM_EVENT_COUNTERS=y
@@ -83,6 +85,7 @@ CONFIG_STOP_MACHINE=y
83CONFIG_BLOCK=y 85CONFIG_BLOCK=y
84# CONFIG_BLK_DEV_IO_TRACE is not set 86# CONFIG_BLK_DEV_IO_TRACE is not set
85CONFIG_BLK_DEV_BSG=y 87CONFIG_BLK_DEV_BSG=y
88CONFIG_BLOCK_COMPAT=y
86 89
87# 90#
88# IO Schedulers 91# IO Schedulers
@@ -108,7 +111,6 @@ CONFIG_64BIT=y
108CONFIG_SMP=y 111CONFIG_SMP=y
109CONFIG_NR_CPUS=32 112CONFIG_NR_CPUS=32
110CONFIG_HOTPLUG_CPU=y 113CONFIG_HOTPLUG_CPU=y
111CONFIG_DEFAULT_MIGRATION_COST=1000000
112CONFIG_COMPAT=y 114CONFIG_COMPAT=y
113CONFIG_SYSVIPC_COMPAT=y 115CONFIG_SYSVIPC_COMPAT=y
114CONFIG_AUDIT_ARCH=y 116CONFIG_AUDIT_ARCH=y
@@ -143,9 +145,11 @@ CONFIG_FLATMEM_MANUAL=y
143CONFIG_FLATMEM=y 145CONFIG_FLATMEM=y
144CONFIG_FLAT_NODE_MEM_MAP=y 146CONFIG_FLAT_NODE_MEM_MAP=y
145# CONFIG_SPARSEMEM_STATIC is not set 147# CONFIG_SPARSEMEM_STATIC is not set
148# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
146CONFIG_SPLIT_PTLOCK_CPUS=4 149CONFIG_SPLIT_PTLOCK_CPUS=4
147CONFIG_RESOURCES_64BIT=y 150CONFIG_RESOURCES_64BIT=y
148CONFIG_ZONE_DMA_FLAG=1 151CONFIG_ZONE_DMA_FLAG=1
152CONFIG_BOUNCE=y
149CONFIG_VIRT_TO_BUS=y 153CONFIG_VIRT_TO_BUS=y
150CONFIG_HOLES_IN_ZONE=y 154CONFIG_HOLES_IN_ZONE=y
151 155
@@ -219,12 +223,14 @@ CONFIG_INET_TUNNEL=y
219CONFIG_INET_XFRM_MODE_TRANSPORT=y 223CONFIG_INET_XFRM_MODE_TRANSPORT=y
220CONFIG_INET_XFRM_MODE_TUNNEL=y 224CONFIG_INET_XFRM_MODE_TUNNEL=y
221CONFIG_INET_XFRM_MODE_BEET=y 225CONFIG_INET_XFRM_MODE_BEET=y
226CONFIG_INET_LRO=y
222CONFIG_INET_DIAG=y 227CONFIG_INET_DIAG=y
223CONFIG_INET_TCP_DIAG=y 228CONFIG_INET_TCP_DIAG=y
224# CONFIG_TCP_CONG_ADVANCED is not set 229# CONFIG_TCP_CONG_ADVANCED is not set
225CONFIG_TCP_CONG_CUBIC=y 230CONFIG_TCP_CONG_CUBIC=y
226CONFIG_DEFAULT_TCP_CONG="cubic" 231CONFIG_DEFAULT_TCP_CONG="cubic"
227# CONFIG_TCP_MD5SIG is not set 232# CONFIG_TCP_MD5SIG is not set
233# CONFIG_IP_VS is not set
228CONFIG_IPV6=y 234CONFIG_IPV6=y
229# CONFIG_IPV6_PRIVACY is not set 235# CONFIG_IPV6_PRIVACY is not set
230# CONFIG_IPV6_ROUTER_PREF is not set 236# CONFIG_IPV6_ROUTER_PREF is not set
@@ -243,7 +249,48 @@ CONFIG_IPV6_SIT=y
243# CONFIG_IPV6_TUNNEL is not set 249# CONFIG_IPV6_TUNNEL is not set
244# CONFIG_IPV6_MULTIPLE_TABLES is not set 250# CONFIG_IPV6_MULTIPLE_TABLES is not set
245# CONFIG_NETWORK_SECMARK is not set 251# CONFIG_NETWORK_SECMARK is not set
246# CONFIG_NETFILTER is not set 252CONFIG_NETFILTER=y
253# CONFIG_NETFILTER_DEBUG is not set
254
255#
256# Core Netfilter Configuration
257#
258CONFIG_NETFILTER_NETLINK=m
259CONFIG_NETFILTER_NETLINK_QUEUE=m
260CONFIG_NETFILTER_NETLINK_LOG=m
261CONFIG_NF_CONNTRACK_ENABLED=m
262CONFIG_NF_CONNTRACK=m
263# CONFIG_NF_CT_ACCT is not set
264# CONFIG_NF_CONNTRACK_MARK is not set
265# CONFIG_NF_CONNTRACK_EVENTS is not set
266# CONFIG_NF_CT_PROTO_SCTP is not set
267# CONFIG_NF_CT_PROTO_UDPLITE is not set
268# CONFIG_NF_CONNTRACK_AMANDA is not set
269# CONFIG_NF_CONNTRACK_FTP is not set
270# CONFIG_NF_CONNTRACK_H323 is not set
271# CONFIG_NF_CONNTRACK_IRC is not set
272# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
273# CONFIG_NF_CONNTRACK_PPTP is not set
274# CONFIG_NF_CONNTRACK_SANE is not set
275# CONFIG_NF_CONNTRACK_SIP is not set
276# CONFIG_NF_CONNTRACK_TFTP is not set
277# CONFIG_NF_CT_NETLINK is not set
278# CONFIG_NETFILTER_XTABLES is not set
279
280#
281# IP: Netfilter Configuration
282#
283# CONFIG_NF_CONNTRACK_IPV4 is not set
284# CONFIG_IP_NF_QUEUE is not set
285# CONFIG_IP_NF_IPTABLES is not set
286# CONFIG_IP_NF_ARPTABLES is not set
287
288#
289# IPv6: Netfilter Configuration (EXPERIMENTAL)
290#
291# CONFIG_NF_CONNTRACK_IPV6 is not set
292# CONFIG_IP6_NF_QUEUE is not set
293# CONFIG_IP6_NF_IPTABLES is not set
247# CONFIG_IP_DCCP is not set 294# CONFIG_IP_DCCP is not set
248CONFIG_IP_SCTP=m 295CONFIG_IP_SCTP=m
249# CONFIG_SCTP_DBG_MSG is not set 296# CONFIG_SCTP_DBG_MSG is not set
@@ -263,12 +310,7 @@ CONFIG_SCTP_HMAC_MD5=y
263# CONFIG_LAPB is not set 310# CONFIG_LAPB is not set
264# CONFIG_ECONET is not set 311# CONFIG_ECONET is not set
265# CONFIG_WAN_ROUTER is not set 312# CONFIG_WAN_ROUTER is not set
266
267#
268# QoS and/or fair queueing
269#
270CONFIG_NET_SCHED=y 313CONFIG_NET_SCHED=y
271CONFIG_NET_SCH_FIFO=y
272 314
273# 315#
274# Queueing/Scheduling 316# Queueing/Scheduling
@@ -306,10 +348,12 @@ CONFIG_NET_CLS_ACT=y
306CONFIG_NET_ACT_POLICE=y 348CONFIG_NET_ACT_POLICE=y
307# CONFIG_NET_ACT_GACT is not set 349# CONFIG_NET_ACT_GACT is not set
308# CONFIG_NET_ACT_MIRRED is not set 350# CONFIG_NET_ACT_MIRRED is not set
351CONFIG_NET_ACT_NAT=m
309# CONFIG_NET_ACT_PEDIT is not set 352# CONFIG_NET_ACT_PEDIT is not set
310# CONFIG_NET_ACT_SIMP is not set 353# CONFIG_NET_ACT_SIMP is not set
311CONFIG_NET_CLS_POLICE=y 354CONFIG_NET_CLS_POLICE=y
312# CONFIG_NET_CLS_IND is not set 355# CONFIG_NET_CLS_IND is not set
356CONFIG_NET_SCH_FIFO=y
313 357
314# 358#
315# Network testing 359# Network testing
@@ -329,6 +373,7 @@ CONFIG_CCW=y
329# 373#
330# Generic Driver Options 374# Generic Driver Options
331# 375#
376CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
332CONFIG_STANDALONE=y 377CONFIG_STANDALONE=y
333CONFIG_PREVENT_FIRMWARE_BUILD=y 378CONFIG_PREVENT_FIRMWARE_BUILD=y
334# CONFIG_FW_LOADER is not set 379# CONFIG_FW_LOADER is not set
@@ -400,17 +445,11 @@ CONFIG_SCSI_FC_ATTRS=y
400# CONFIG_SCSI_ISCSI_ATTRS is not set 445# CONFIG_SCSI_ISCSI_ATTRS is not set
401# CONFIG_SCSI_SAS_ATTRS is not set 446# CONFIG_SCSI_SAS_ATTRS is not set
402# CONFIG_SCSI_SAS_LIBSAS is not set 447# CONFIG_SCSI_SAS_LIBSAS is not set
403 448# CONFIG_SCSI_SRP_ATTRS is not set
404# 449CONFIG_SCSI_LOWLEVEL=y
405# SCSI low-level drivers
406#
407# CONFIG_ISCSI_TCP is not set 450# CONFIG_ISCSI_TCP is not set
408# CONFIG_SCSI_DEBUG is not set 451# CONFIG_SCSI_DEBUG is not set
409CONFIG_ZFCP=y 452CONFIG_ZFCP=y
410
411#
412# Multi-device support (RAID and LVM)
413#
414CONFIG_MD=y 453CONFIG_MD=y
415CONFIG_BLK_DEV_MD=y 454CONFIG_BLK_DEV_MD=y
416CONFIG_MD_LINEAR=m 455CONFIG_MD_LINEAR=m
@@ -429,7 +468,9 @@ CONFIG_DM_ZERO=y
429CONFIG_DM_MULTIPATH=y 468CONFIG_DM_MULTIPATH=y
430# CONFIG_DM_MULTIPATH_EMC is not set 469# CONFIG_DM_MULTIPATH_EMC is not set
431# CONFIG_DM_MULTIPATH_RDAC is not set 470# CONFIG_DM_MULTIPATH_RDAC is not set
471# CONFIG_DM_MULTIPATH_HP is not set
432# CONFIG_DM_DELAY is not set 472# CONFIG_DM_DELAY is not set
473# CONFIG_DM_UEVENT is not set
433CONFIG_NETDEVICES=y 474CONFIG_NETDEVICES=y
434# CONFIG_NETDEVICES_MULTIQUEUE is not set 475# CONFIG_NETDEVICES_MULTIQUEUE is not set
435# CONFIG_IFB is not set 476# CONFIG_IFB is not set
@@ -438,8 +479,13 @@ CONFIG_BONDING=m
438# CONFIG_MACVLAN is not set 479# CONFIG_MACVLAN is not set
439CONFIG_EQUALIZER=m 480CONFIG_EQUALIZER=m
440CONFIG_TUN=m 481CONFIG_TUN=m
482CONFIG_VETH=m
441CONFIG_NET_ETHERNET=y 483CONFIG_NET_ETHERNET=y
442# CONFIG_MII is not set 484# CONFIG_MII is not set
485# CONFIG_IBM_NEW_EMAC_ZMII is not set
486# CONFIG_IBM_NEW_EMAC_RGMII is not set
487# CONFIG_IBM_NEW_EMAC_TAH is not set
488# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
443CONFIG_NETDEV_1000=y 489CONFIG_NETDEV_1000=y
444CONFIG_NETDEV_10000=y 490CONFIG_NETDEV_10000=y
445# CONFIG_TR is not set 491# CONFIG_TR is not set
@@ -473,7 +519,6 @@ CONFIG_CCWGROUP=y
473CONFIG_UNIX98_PTYS=y 519CONFIG_UNIX98_PTYS=y
474CONFIG_LEGACY_PTYS=y 520CONFIG_LEGACY_PTYS=y
475CONFIG_LEGACY_PTY_COUNT=256 521CONFIG_LEGACY_PTY_COUNT=256
476# CONFIG_WATCHDOG is not set
477CONFIG_HW_RANDOM=m 522CONFIG_HW_RANDOM=m
478# CONFIG_R3964 is not set 523# CONFIG_R3964 is not set
479CONFIG_RAW_DRIVER=m 524CONFIG_RAW_DRIVER=m
@@ -490,7 +535,6 @@ CONFIG_TN3270_CONSOLE=y
490CONFIG_TN3215=y 535CONFIG_TN3215=y
491CONFIG_TN3215_CONSOLE=y 536CONFIG_TN3215_CONSOLE=y
492CONFIG_CCW_CONSOLE=y 537CONFIG_CCW_CONSOLE=y
493CONFIG_SCLP=y
494CONFIG_SCLP_TTY=y 538CONFIG_SCLP_TTY=y
495CONFIG_SCLP_CONSOLE=y 539CONFIG_SCLP_CONSOLE=y
496CONFIG_SCLP_VT220_TTY=y 540CONFIG_SCLP_VT220_TTY=y
@@ -514,6 +558,11 @@ CONFIG_S390_TAPE_34XX=m
514CONFIG_MONWRITER=m 558CONFIG_MONWRITER=m
515CONFIG_S390_VMUR=m 559CONFIG_S390_VMUR=m
516# CONFIG_POWER_SUPPLY is not set 560# CONFIG_POWER_SUPPLY is not set
561# CONFIG_WATCHDOG is not set
562
563#
564# Sonics Silicon Backplane
565#
517 566
518# 567#
519# File systems 568# File systems
@@ -569,7 +618,6 @@ CONFIG_SYSFS=y
569CONFIG_TMPFS=y 618CONFIG_TMPFS=y
570CONFIG_TMPFS_POSIX_ACL=y 619CONFIG_TMPFS_POSIX_ACL=y
571# CONFIG_HUGETLB_PAGE is not set 620# CONFIG_HUGETLB_PAGE is not set
572CONFIG_RAMFS=y
573CONFIG_CONFIGFS_FS=m 621CONFIG_CONFIGFS_FS=m
574 622
575# 623#
@@ -588,10 +636,7 @@ CONFIG_CONFIGFS_FS=m
588# CONFIG_QNX4FS_FS is not set 636# CONFIG_QNX4FS_FS is not set
589# CONFIG_SYSV_FS is not set 637# CONFIG_SYSV_FS is not set
590# CONFIG_UFS_FS is not set 638# CONFIG_UFS_FS is not set
591 639CONFIG_NETWORK_FILESYSTEMS=y
592#
593# Network File Systems
594#
595CONFIG_NFS_FS=y 640CONFIG_NFS_FS=y
596CONFIG_NFS_V3=y 641CONFIG_NFS_V3=y
597# CONFIG_NFS_V3_ACL is not set 642# CONFIG_NFS_V3_ACL is not set
@@ -638,27 +683,13 @@ CONFIG_MSDOS_PARTITION=y
638# CONFIG_KARMA_PARTITION is not set 683# CONFIG_KARMA_PARTITION is not set
639# CONFIG_EFI_PARTITION is not set 684# CONFIG_EFI_PARTITION is not set
640# CONFIG_SYSV68_PARTITION is not set 685# CONFIG_SYSV68_PARTITION is not set
641
642#
643# Native Language Support
644#
645# CONFIG_NLS is not set 686# CONFIG_NLS is not set
646
647#
648# Distributed Lock Manager
649#
650CONFIG_DLM=m 687CONFIG_DLM=m
651# CONFIG_DLM_DEBUG is not set 688# CONFIG_DLM_DEBUG is not set
652 689CONFIG_INSTRUMENTATION=y
653#
654# Instrumentation Support
655#
656
657#
658# Profiling support
659#
660# CONFIG_PROFILING is not set 690# CONFIG_PROFILING is not set
661CONFIG_KPROBES=y 691CONFIG_KPROBES=y
692# CONFIG_MARKERS is not set
662 693
663# 694#
664# Kernel hacking 695# Kernel hacking
@@ -682,6 +713,7 @@ CONFIG_DEBUG_SPINLOCK=y
682CONFIG_DEBUG_MUTEXES=y 713CONFIG_DEBUG_MUTEXES=y
683# CONFIG_DEBUG_LOCK_ALLOC is not set 714# CONFIG_DEBUG_LOCK_ALLOC is not set
684# CONFIG_PROVE_LOCKING is not set 715# CONFIG_PROVE_LOCKING is not set
716# CONFIG_LOCK_STAT is not set
685CONFIG_DEBUG_SPINLOCK_SLEEP=y 717CONFIG_DEBUG_SPINLOCK_SLEEP=y
686# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 718# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
687# CONFIG_DEBUG_KOBJECT is not set 719# CONFIG_DEBUG_KOBJECT is not set
@@ -694,14 +726,17 @@ CONFIG_FORCED_INLINING=y
694# CONFIG_RCU_TORTURE_TEST is not set 726# CONFIG_RCU_TORTURE_TEST is not set
695# CONFIG_LKDTM is not set 727# CONFIG_LKDTM is not set
696# CONFIG_FAULT_INJECTION is not set 728# CONFIG_FAULT_INJECTION is not set
729CONFIG_SAMPLES=y
697 730
698# 731#
699# Security options 732# Security options
700# 733#
701# CONFIG_KEYS is not set 734# CONFIG_KEYS is not set
702# CONFIG_SECURITY is not set 735# CONFIG_SECURITY is not set
736# CONFIG_SECURITY_FILE_CAPABILITIES is not set
703CONFIG_CRYPTO=y 737CONFIG_CRYPTO=y
704CONFIG_CRYPTO_ALGAPI=y 738CONFIG_CRYPTO_ALGAPI=y
739CONFIG_CRYPTO_AEAD=m
705CONFIG_CRYPTO_BLKCIPHER=y 740CONFIG_CRYPTO_BLKCIPHER=y
706CONFIG_CRYPTO_HASH=m 741CONFIG_CRYPTO_HASH=m
707CONFIG_CRYPTO_MANAGER=y 742CONFIG_CRYPTO_MANAGER=y
@@ -720,6 +755,7 @@ CONFIG_CRYPTO_ECB=m
720CONFIG_CRYPTO_CBC=y 755CONFIG_CRYPTO_CBC=y
721CONFIG_CRYPTO_PCBC=m 756CONFIG_CRYPTO_PCBC=m
722# CONFIG_CRYPTO_LRW is not set 757# CONFIG_CRYPTO_LRW is not set
758# CONFIG_CRYPTO_XTS is not set
723# CONFIG_CRYPTO_CRYPTD is not set 759# CONFIG_CRYPTO_CRYPTD is not set
724# CONFIG_CRYPTO_DES is not set 760# CONFIG_CRYPTO_DES is not set
725CONFIG_CRYPTO_FCRYPT=m 761CONFIG_CRYPTO_FCRYPT=m
@@ -733,11 +769,13 @@ CONFIG_CRYPTO_FCRYPT=m
733# CONFIG_CRYPTO_ARC4 is not set 769# CONFIG_CRYPTO_ARC4 is not set
734# CONFIG_CRYPTO_KHAZAD is not set 770# CONFIG_CRYPTO_KHAZAD is not set
735# CONFIG_CRYPTO_ANUBIS is not set 771# CONFIG_CRYPTO_ANUBIS is not set
772CONFIG_CRYPTO_SEED=m
736# CONFIG_CRYPTO_DEFLATE is not set 773# CONFIG_CRYPTO_DEFLATE is not set
737# CONFIG_CRYPTO_MICHAEL_MIC is not set 774# CONFIG_CRYPTO_MICHAEL_MIC is not set
738# CONFIG_CRYPTO_CRC32C is not set 775# CONFIG_CRYPTO_CRC32C is not set
739CONFIG_CRYPTO_CAMELLIA=m 776CONFIG_CRYPTO_CAMELLIA=m
740# CONFIG_CRYPTO_TEST is not set 777# CONFIG_CRYPTO_TEST is not set
778CONFIG_CRYPTO_AUTHENC=m
741CONFIG_CRYPTO_HW=y 779CONFIG_CRYPTO_HW=y
742# CONFIG_CRYPTO_SHA1_S390 is not set 780# CONFIG_CRYPTO_SHA1_S390 is not set
743# CONFIG_CRYPTO_SHA256_S390 is not set 781# CONFIG_CRYPTO_SHA256_S390 is not set
@@ -755,5 +793,6 @@ CONFIG_BITREVERSE=m
755# CONFIG_CRC16 is not set 793# CONFIG_CRC16 is not set
756# CONFIG_CRC_ITU_T is not set 794# CONFIG_CRC_ITU_T is not set
757CONFIG_CRC32=m 795CONFIG_CRC32=m
796CONFIG_CRC7=m
758# CONFIG_LIBCRC32C is not set 797# CONFIG_LIBCRC32C is not set
759CONFIG_PLIST=y 798CONFIG_PLIST=y
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 66b51901c87d..ce0856d32500 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -648,6 +648,8 @@ static int dump_set_type(enum dump_type type)
648 case DUMP_TYPE_CCW: 648 case DUMP_TYPE_CCW:
649 if (MACHINE_IS_VM) 649 if (MACHINE_IS_VM)
650 dump_method = DUMP_METHOD_CCW_VM; 650 dump_method = DUMP_METHOD_CCW_VM;
651 else if (diag308_set_works)
652 dump_method = DUMP_METHOD_CCW_DIAG;
651 else 653 else
652 dump_method = DUMP_METHOD_CCW_CIO; 654 dump_method = DUMP_METHOD_CCW_CIO;
653 break; 655 break;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 70c57378f426..96492cf2d491 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -44,6 +44,7 @@
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/irq.h> 45#include <asm/irq.h>
46#include <asm/timer.h> 46#include <asm/timer.h>
47#include <asm/cpu.h>
47 48
48asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 49asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
49 50
@@ -91,6 +92,14 @@ EXPORT_SYMBOL(unregister_idle_notifier);
91 92
92void do_monitor_call(struct pt_regs *regs, long interruption_code) 93void do_monitor_call(struct pt_regs *regs, long interruption_code)
93{ 94{
95 struct s390_idle_data *idle;
96
97 idle = &__get_cpu_var(s390_idle);
98 spin_lock(&idle->lock);
99 idle->idle_time += get_clock() - idle->idle_enter;
100 idle->in_idle = 0;
101 spin_unlock(&idle->lock);
102
94 /* disable monitor call class 0 */ 103 /* disable monitor call class 0 */
95 __ctl_clear_bit(8, 15); 104 __ctl_clear_bit(8, 15);
96 105
@@ -105,6 +114,7 @@ extern void s390_handle_mcck(void);
105static void default_idle(void) 114static void default_idle(void)
106{ 115{
107 int cpu, rc; 116 int cpu, rc;
117 struct s390_idle_data *idle;
108 118
109 /* CPU is going idle. */ 119 /* CPU is going idle. */
110 cpu = smp_processor_id(); 120 cpu = smp_processor_id();
@@ -142,6 +152,12 @@ static void default_idle(void)
142 return; 152 return;
143 } 153 }
144 154
155 idle = &__get_cpu_var(s390_idle);
156 spin_lock(&idle->lock);
157 idle->idle_count++;
158 idle->in_idle = 1;
159 idle->idle_enter = get_clock();
160 spin_unlock(&idle->lock);
145 trace_hardirqs_on(); 161 trace_hardirqs_on();
146 /* Wait for external, I/O or machine check interrupt. */ 162 /* Wait for external, I/O or machine check interrupt. */
147 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 163 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@@ -254,14 +270,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
254 save_fp_regs(&current->thread.fp_regs); 270 save_fp_regs(&current->thread.fp_regs);
255 memcpy(&p->thread.fp_regs, &current->thread.fp_regs, 271 memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
256 sizeof(s390_fp_regs)); 272 sizeof(s390_fp_regs));
257 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
258 /* Set a new TLS ? */ 273 /* Set a new TLS ? */
259 if (clone_flags & CLONE_SETTLS) 274 if (clone_flags & CLONE_SETTLS)
260 p->thread.acrs[0] = regs->gprs[6]; 275 p->thread.acrs[0] = regs->gprs[6];
261#else /* CONFIG_64BIT */ 276#else /* CONFIG_64BIT */
262 /* Save the fpu registers to new thread structure. */ 277 /* Save the fpu registers to new thread structure. */
263 save_fp_regs(&p->thread.fp_regs); 278 save_fp_regs(&p->thread.fp_regs);
264 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
265 /* Set a new TLS ? */ 279 /* Set a new TLS ? */
266 if (clone_flags & CLONE_SETTLS) { 280 if (clone_flags & CLONE_SETTLS) {
267 if (test_thread_flag(TIF_31BIT)) { 281 if (test_thread_flag(TIF_31BIT)) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35edbef1d222..1d97fe1c0e53 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/cpu.h>
45 46
46/* 47/*
47 * An array with a pointer the lowcore of every CPU. 48 * An array with a pointer the lowcore of every CPU.
@@ -325,7 +326,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
325 */ 326 */
326void smp_ptlb_callback(void *info) 327void smp_ptlb_callback(void *info)
327{ 328{
328 local_flush_tlb(); 329 __tlb_flush_local();
329} 330}
330 331
331void smp_ptlb_all(void) 332void smp_ptlb_all(void)
@@ -494,6 +495,8 @@ int __cpuinit start_secondary(void *cpuvoid)
494 return 0; 495 return 0;
495} 496}
496 497
498DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
499
497static void __init smp_create_idle(unsigned int cpu) 500static void __init smp_create_idle(unsigned int cpu)
498{ 501{
499 struct task_struct *p; 502 struct task_struct *p;
@@ -506,6 +509,7 @@ static void __init smp_create_idle(unsigned int cpu)
506 if (IS_ERR(p)) 509 if (IS_ERR(p))
507 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 510 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
508 current_set[cpu] = p; 511 current_set[cpu] = p;
512 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
509} 513}
510 514
511static int cpu_stopped(int cpu) 515static int cpu_stopped(int cpu)
@@ -724,6 +728,7 @@ void __init smp_prepare_boot_cpu(void)
724 cpu_set(0, cpu_online_map); 728 cpu_set(0, cpu_online_map);
725 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 729 S390_lowcore.percpu_offset = __per_cpu_offset[0];
726 current_set[0] = current; 730 current_set[0] = current;
731 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
727} 732}
728 733
729void __init smp_cpus_done(unsigned int max_cpus) 734void __init smp_cpus_done(unsigned int max_cpus)
@@ -756,22 +761,71 @@ static ssize_t show_capability(struct sys_device *dev, char *buf)
756} 761}
757static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 762static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
758 763
764static ssize_t show_idle_count(struct sys_device *dev, char *buf)
765{
766 struct s390_idle_data *idle;
767 unsigned long long idle_count;
768
769 idle = &per_cpu(s390_idle, dev->id);
770 spin_lock_irq(&idle->lock);
771 idle_count = idle->idle_count;
772 spin_unlock_irq(&idle->lock);
773 return sprintf(buf, "%llu\n", idle_count);
774}
775static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
776
777static ssize_t show_idle_time(struct sys_device *dev, char *buf)
778{
779 struct s390_idle_data *idle;
780 unsigned long long new_time;
781
782 idle = &per_cpu(s390_idle, dev->id);
783 spin_lock_irq(&idle->lock);
784 if (idle->in_idle) {
785 new_time = get_clock();
786 idle->idle_time += new_time - idle->idle_enter;
787 idle->idle_enter = new_time;
788 }
789 new_time = idle->idle_time;
790 spin_unlock_irq(&idle->lock);
791 return sprintf(buf, "%llu us\n", new_time >> 12);
792}
793static SYSDEV_ATTR(idle_time, 0444, show_idle_time, NULL);
794
795static struct attribute *cpu_attrs[] = {
796 &attr_capability.attr,
797 &attr_idle_count.attr,
798 &attr_idle_time.attr,
799 NULL,
800};
801
802static struct attribute_group cpu_attr_group = {
803 .attrs = cpu_attrs,
804};
805
759static int __cpuinit smp_cpu_notify(struct notifier_block *self, 806static int __cpuinit smp_cpu_notify(struct notifier_block *self,
760 unsigned long action, void *hcpu) 807 unsigned long action, void *hcpu)
761{ 808{
762 unsigned int cpu = (unsigned int)(long)hcpu; 809 unsigned int cpu = (unsigned int)(long)hcpu;
763 struct cpu *c = &per_cpu(cpu_devices, cpu); 810 struct cpu *c = &per_cpu(cpu_devices, cpu);
764 struct sys_device *s = &c->sysdev; 811 struct sys_device *s = &c->sysdev;
812 struct s390_idle_data *idle;
765 813
766 switch (action) { 814 switch (action) {
767 case CPU_ONLINE: 815 case CPU_ONLINE:
768 case CPU_ONLINE_FROZEN: 816 case CPU_ONLINE_FROZEN:
769 if (sysdev_create_file(s, &attr_capability)) 817 idle = &per_cpu(s390_idle, cpu);
818 spin_lock_irq(&idle->lock);
819 idle->idle_enter = 0;
820 idle->idle_time = 0;
821 idle->idle_count = 0;
822 spin_unlock_irq(&idle->lock);
823 if (sysfs_create_group(&s->kobj, &cpu_attr_group))
770 return NOTIFY_BAD; 824 return NOTIFY_BAD;
771 break; 825 break;
772 case CPU_DEAD: 826 case CPU_DEAD:
773 case CPU_DEAD_FROZEN: 827 case CPU_DEAD_FROZEN:
774 sysdev_remove_file(s, &attr_capability); 828 sysfs_remove_group(&s->kobj, &cpu_attr_group);
775 break; 829 break;
776 } 830 }
777 return NOTIFY_OK; 831 return NOTIFY_OK;
@@ -784,6 +838,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
784static int __init topology_init(void) 838static int __init topology_init(void)
785{ 839{
786 int cpu; 840 int cpu;
841 int rc;
787 842
788 register_cpu_notifier(&smp_cpu_nb); 843 register_cpu_notifier(&smp_cpu_nb);
789 844
@@ -796,7 +851,9 @@ static int __init topology_init(void)
796 if (!cpu_online(cpu)) 851 if (!cpu_online(cpu))
797 continue; 852 continue;
798 s = &c->sysdev; 853 s = &c->sysdev;
799 sysdev_create_file(s, &attr_capability); 854 rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
855 if (rc)
856 return rc;
800 } 857 }
801 return 0; 858 return 0;
802} 859}
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index b159a9d65680..7e8efaade2ea 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -15,6 +15,27 @@
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h" 16#include "uaccess.h"
17 17
18static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
19{
20 pgd_t *pgd;
21 pud_t *pud;
22 pmd_t *pmd;
23
24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 return NULL;
27
28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 return NULL;
31
32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 return NULL;
35
36 return pte_offset_map(pmd, addr);
37}
38
18static int __handle_fault(struct mm_struct *mm, unsigned long address, 39static int __handle_fault(struct mm_struct *mm, unsigned long address,
19 int write_access) 40 int write_access)
20{ 41{
@@ -85,8 +106,6 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
85{ 106{
86 struct mm_struct *mm = current->mm; 107 struct mm_struct *mm = current->mm;
87 unsigned long offset, pfn, done, size; 108 unsigned long offset, pfn, done, size;
88 pgd_t *pgd;
89 pmd_t *pmd;
90 pte_t *pte; 109 pte_t *pte;
91 void *from, *to; 110 void *from, *to;
92 111
@@ -94,15 +113,7 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
94retry: 113retry:
95 spin_lock(&mm->page_table_lock); 114 spin_lock(&mm->page_table_lock);
96 do { 115 do {
97 pgd = pgd_offset(mm, uaddr); 116 pte = follow_table(mm, uaddr);
98 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
99 goto fault;
100
101 pmd = pmd_offset(pgd, uaddr);
102 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
103 goto fault;
104
105 pte = pte_offset_map(pmd, uaddr);
106 if (!pte || !pte_present(*pte) || 117 if (!pte || !pte_present(*pte) ||
107 (write_user && !pte_write(*pte))) 118 (write_user && !pte_write(*pte)))
108 goto fault; 119 goto fault;
@@ -142,22 +153,12 @@ static unsigned long __dat_user_addr(unsigned long uaddr)
142{ 153{
143 struct mm_struct *mm = current->mm; 154 struct mm_struct *mm = current->mm;
144 unsigned long pfn, ret; 155 unsigned long pfn, ret;
145 pgd_t *pgd;
146 pmd_t *pmd;
147 pte_t *pte; 156 pte_t *pte;
148 int rc; 157 int rc;
149 158
150 ret = 0; 159 ret = 0;
151retry: 160retry:
152 pgd = pgd_offset(mm, uaddr); 161 pte = follow_table(mm, uaddr);
153 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
154 goto fault;
155
156 pmd = pmd_offset(pgd, uaddr);
157 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
158 goto fault;
159
160 pte = pte_offset_map(pmd, uaddr);
161 if (!pte || !pte_present(*pte)) 162 if (!pte || !pte_present(*pte))
162 goto fault; 163 goto fault;
163 164
@@ -229,8 +230,6 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
229 unsigned long uaddr = (unsigned long) src; 230 unsigned long uaddr = (unsigned long) src;
230 struct mm_struct *mm = current->mm; 231 struct mm_struct *mm = current->mm;
231 unsigned long offset, pfn, done, len; 232 unsigned long offset, pfn, done, len;
232 pgd_t *pgd;
233 pmd_t *pmd;
234 pte_t *pte; 233 pte_t *pte;
235 size_t len_str; 234 size_t len_str;
236 235
@@ -240,15 +239,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
240retry: 239retry:
241 spin_lock(&mm->page_table_lock); 240 spin_lock(&mm->page_table_lock);
242 do { 241 do {
243 pgd = pgd_offset(mm, uaddr); 242 pte = follow_table(mm, uaddr);
244 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
245 goto fault;
246
247 pmd = pmd_offset(pgd, uaddr);
248 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
249 goto fault;
250
251 pte = pte_offset_map(pmd, uaddr);
252 if (!pte || !pte_present(*pte)) 243 if (!pte || !pte_present(*pte))
253 goto fault; 244 goto fault;
254 245
@@ -308,8 +299,6 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
308 uaddr, done, size; 299 uaddr, done, size;
309 unsigned long uaddr_from = (unsigned long) from; 300 unsigned long uaddr_from = (unsigned long) from;
310 unsigned long uaddr_to = (unsigned long) to; 301 unsigned long uaddr_to = (unsigned long) to;
311 pgd_t *pgd_from, *pgd_to;
312 pmd_t *pmd_from, *pmd_to;
313 pte_t *pte_from, *pte_to; 302 pte_t *pte_from, *pte_to;
314 int write_user; 303 int write_user;
315 304
@@ -317,39 +306,14 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
317retry: 306retry:
318 spin_lock(&mm->page_table_lock); 307 spin_lock(&mm->page_table_lock);
319 do { 308 do {
320 pgd_from = pgd_offset(mm, uaddr_from); 309 pte_from = follow_table(mm, uaddr_from);
321 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
322 uaddr = uaddr_from;
323 write_user = 0;
324 goto fault;
325 }
326 pgd_to = pgd_offset(mm, uaddr_to);
327 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
328 uaddr = uaddr_to;
329 write_user = 1;
330 goto fault;
331 }
332
333 pmd_from = pmd_offset(pgd_from, uaddr_from);
334 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
335 uaddr = uaddr_from;
336 write_user = 0;
337 goto fault;
338 }
339 pmd_to = pmd_offset(pgd_to, uaddr_to);
340 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
341 uaddr = uaddr_to;
342 write_user = 1;
343 goto fault;
344 }
345
346 pte_from = pte_offset_map(pmd_from, uaddr_from);
347 if (!pte_from || !pte_present(*pte_from)) { 310 if (!pte_from || !pte_present(*pte_from)) {
348 uaddr = uaddr_from; 311 uaddr = uaddr_from;
349 write_user = 0; 312 write_user = 0;
350 goto fault; 313 goto fault;
351 } 314 }
352 pte_to = pte_offset_map(pmd_to, uaddr_to); 315
316 pte_to = follow_table(mm, uaddr_to);
353 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { 317 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
354 uaddr = uaddr_to; 318 uaddr = uaddr_to;
355 write_user = 1; 319 write_user = 1;
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index f95449b29fa5..66401930f83e 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7 7
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3a25bbf2eb0a..b234bb4a6da7 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -81,6 +81,7 @@ void show_mem(void)
81static void __init setup_ro_region(void) 81static void __init setup_ro_region(void)
82{ 82{
83 pgd_t *pgd; 83 pgd_t *pgd;
84 pud_t *pud;
84 pmd_t *pmd; 85 pmd_t *pmd;
85 pte_t *pte; 86 pte_t *pte;
86 pte_t new_pte; 87 pte_t new_pte;
@@ -91,7 +92,8 @@ static void __init setup_ro_region(void)
91 92
92 for (; address < end; address += PAGE_SIZE) { 93 for (; address < end; address += PAGE_SIZE) {
93 pgd = pgd_offset_k(address); 94 pgd = pgd_offset_k(address);
94 pmd = pmd_offset(pgd, address); 95 pud = pud_offset(pgd, address);
96 pmd = pmd_offset(pud, address);
95 pte = pte_offset_kernel(pmd, address); 97 pte = pte_offset_kernel(pmd, address);
96 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 98 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
97 *pte = new_pte; 99 *pte = new_pte;
@@ -103,32 +105,28 @@ static void __init setup_ro_region(void)
103 */ 105 */
104void __init paging_init(void) 106void __init paging_init(void)
105{ 107{
106 pgd_t *pg_dir;
107 int i;
108 unsigned long pgdir_k;
109 static const int ssm_mask = 0x04000000L; 108 static const int ssm_mask = 0x04000000L;
110 unsigned long max_zone_pfns[MAX_NR_ZONES]; 109 unsigned long max_zone_pfns[MAX_NR_ZONES];
110 unsigned long pgd_type;
111 111
112 pg_dir = swapper_pg_dir; 112 init_mm.pgd = swapper_pg_dir;
113 113 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
114#ifdef CONFIG_64BIT 114#ifdef CONFIG_64BIT
115 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 115 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
116 for (i = 0; i < PTRS_PER_PGD; i++) 116 pgd_type = _REGION3_ENTRY_EMPTY;
117 pgd_clear_kernel(pg_dir + i);
118#else 117#else
119 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 118 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
120 for (i = 0; i < PTRS_PER_PGD; i++) 119 pgd_type = _SEGMENT_ENTRY_EMPTY;
121 pmd_clear_kernel((pmd_t *)(pg_dir + i));
122#endif 120#endif
121 clear_table((unsigned long *) init_mm.pgd, pgd_type,
122 sizeof(unsigned long)*2048);
123 vmem_map_init(); 123 vmem_map_init();
124 setup_ro_region(); 124 setup_ro_region();
125 125
126 S390_lowcore.kernel_asce = pgdir_k;
127
128 /* enable virtual mapping in kernel mode */ 126 /* enable virtual mapping in kernel mode */
129 __ctl_load(pgdir_k, 1, 1); 127 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
130 __ctl_load(pgdir_k, 7, 7); 128 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
131 __ctl_load(pgdir_k, 13, 13); 129 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
132 __raw_local_irq_ssm(ssm_mask); 130 __raw_local_irq_ssm(ssm_mask);
133 131
134 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 132 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
new file mode 100644
index 000000000000..e60e0ae13402
--- /dev/null
+++ b/arch/s390/mm/pgtable.c
@@ -0,0 +1,94 @@
1/*
2 * arch/s390/mm/pgtable.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/mm.h>
12#include <linux/swap.h>
13#include <linux/smp.h>
14#include <linux/highmem.h>
15#include <linux/slab.h>
16#include <linux/pagemap.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/quicklist.h>
20
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
29#else
30#define ALLOC_ORDER 2
31#endif
32
33unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
34{
35 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
36
37 if (!page)
38 return NULL;
39 page->index = 0;
40 if (noexec) {
41 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
42 if (!shadow) {
43 __free_pages(page, ALLOC_ORDER);
44 return NULL;
45 }
46 page->index = page_to_phys(shadow);
47 }
48 return (unsigned long *) page_to_phys(page);
49}
50
51void crst_table_free(unsigned long *table)
52{
53 unsigned long *shadow = get_shadow_table(table);
54
55 if (shadow)
56 free_pages((unsigned long) shadow, ALLOC_ORDER);
57 free_pages((unsigned long) table, ALLOC_ORDER);
58}
59
60/*
61 * page table entry allocation/free routines.
62 */
63unsigned long *page_table_alloc(int noexec)
64{
65 struct page *page = alloc_page(GFP_KERNEL);
66 unsigned long *table;
67
68 if (!page)
69 return NULL;
70 page->index = 0;
71 if (noexec) {
72 struct page *shadow = alloc_page(GFP_KERNEL);
73 if (!shadow) {
74 __free_page(page);
75 return NULL;
76 }
77 table = (unsigned long *) page_to_phys(shadow);
78 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
79 page->index = (addr_t) table;
80 }
81 table = (unsigned long *) page_to_phys(page);
82 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
83 return table;
84}
85
86void page_table_free(unsigned long *table)
87{
88 unsigned long *shadow = get_shadow_pte(table);
89
90 if (shadow)
91 free_page((unsigned long) shadow);
92 free_page((unsigned long) table);
93
94}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fd594d5fe142..fb9c5a85aa56 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -73,31 +73,28 @@ static void __init_refok *vmem_alloc_pages(unsigned int order)
73 return alloc_bootmem_pages((1 << order) * PAGE_SIZE); 73 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
74} 74}
75 75
76#define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); })
77
76static inline pmd_t *vmem_pmd_alloc(void) 78static inline pmd_t *vmem_pmd_alloc(void)
77{ 79{
78 pmd_t *pmd; 80 pmd_t *pmd = NULL;
79 int i;
80 81
81 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); 82#ifdef CONFIG_64BIT
83 pmd = vmem_alloc_pages(2);
82 if (!pmd) 84 if (!pmd)
83 return NULL; 85 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 86 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
85 pmd_clear_kernel(pmd + i); 87#endif
86 return pmd; 88 return pmd;
87} 89}
88 90
89static inline pte_t *vmem_pte_alloc(void) 91static inline pte_t *vmem_pte_alloc(void)
90{ 92{
91 pte_t *pte; 93 pte_t *pte = vmem_alloc_pages(0);
92 pte_t empty_pte;
93 int i;
94 94
95 pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
96 if (!pte) 95 if (!pte)
97 return NULL; 96 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 97 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
99 for (i = 0; i < PTRS_PER_PTE; i++)
100 pte[i] = empty_pte;
101 return pte; 98 return pte;
102} 99}
103 100
@@ -108,6 +105,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
108{ 105{
109 unsigned long address; 106 unsigned long address;
110 pgd_t *pg_dir; 107 pgd_t *pg_dir;
108 pud_t *pu_dir;
111 pmd_t *pm_dir; 109 pmd_t *pm_dir;
112 pte_t *pt_dir; 110 pte_t *pt_dir;
113 pte_t pte; 111 pte_t pte;
@@ -116,13 +114,21 @@ static int vmem_add_range(unsigned long start, unsigned long size)
116 for (address = start; address < start + size; address += PAGE_SIZE) { 114 for (address = start; address < start + size; address += PAGE_SIZE) {
117 pg_dir = pgd_offset_k(address); 115 pg_dir = pgd_offset_k(address);
118 if (pgd_none(*pg_dir)) { 116 if (pgd_none(*pg_dir)) {
117 pu_dir = vmem_pud_alloc();
118 if (!pu_dir)
119 goto out;
120 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
121 }
122
123 pu_dir = pud_offset(pg_dir, address);
124 if (pud_none(*pu_dir)) {
119 pm_dir = vmem_pmd_alloc(); 125 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 126 if (!pm_dir)
121 goto out; 127 goto out;
122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir); 128 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
123 } 129 }
124 130
125 pm_dir = pmd_offset(pg_dir, address); 131 pm_dir = pmd_offset(pu_dir, address);
126 if (pmd_none(*pm_dir)) { 132 if (pmd_none(*pm_dir)) {
127 pt_dir = vmem_pte_alloc(); 133 pt_dir = vmem_pte_alloc();
128 if (!pt_dir) 134 if (!pt_dir)
@@ -148,6 +154,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
148{ 154{
149 unsigned long address; 155 unsigned long address;
150 pgd_t *pg_dir; 156 pgd_t *pg_dir;
157 pud_t *pu_dir;
151 pmd_t *pm_dir; 158 pmd_t *pm_dir;
152 pte_t *pt_dir; 159 pte_t *pt_dir;
153 pte_t pte; 160 pte_t pte;
@@ -155,9 +162,10 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
155 pte_val(pte) = _PAGE_TYPE_EMPTY; 162 pte_val(pte) = _PAGE_TYPE_EMPTY;
156 for (address = start; address < start + size; address += PAGE_SIZE) { 163 for (address = start; address < start + size; address += PAGE_SIZE) {
157 pg_dir = pgd_offset_k(address); 164 pg_dir = pgd_offset_k(address);
158 if (pgd_none(*pg_dir)) 165 pu_dir = pud_offset(pg_dir, address);
166 if (pud_none(*pu_dir))
159 continue; 167 continue;
160 pm_dir = pmd_offset(pg_dir, address); 168 pm_dir = pmd_offset(pu_dir, address);
161 if (pmd_none(*pm_dir)) 169 if (pmd_none(*pm_dir))
162 continue; 170 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 171 pt_dir = pte_offset_kernel(pm_dir, address);
@@ -174,6 +182,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
174 unsigned long address, start_addr, end_addr; 182 unsigned long address, start_addr, end_addr;
175 struct page *map_start, *map_end; 183 struct page *map_start, *map_end;
176 pgd_t *pg_dir; 184 pgd_t *pg_dir;
185 pud_t *pu_dir;
177 pmd_t *pm_dir; 186 pmd_t *pm_dir;
178 pte_t *pt_dir; 187 pte_t *pt_dir;
179 pte_t pte; 188 pte_t pte;
@@ -188,13 +197,21 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
188 for (address = start_addr; address < end_addr; address += PAGE_SIZE) { 197 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
189 pg_dir = pgd_offset_k(address); 198 pg_dir = pgd_offset_k(address);
190 if (pgd_none(*pg_dir)) { 199 if (pgd_none(*pg_dir)) {
200 pu_dir = vmem_pud_alloc();
201 if (!pu_dir)
202 goto out;
203 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
204 }
205
206 pu_dir = pud_offset(pg_dir, address);
207 if (pud_none(*pu_dir)) {
191 pm_dir = vmem_pmd_alloc(); 208 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 209 if (!pm_dir)
193 goto out; 210 goto out;
194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir); 211 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
195 } 212 }
196 213
197 pm_dir = pmd_offset(pg_dir, address); 214 pm_dir = pmd_offset(pu_dir, address);
198 if (pmd_none(*pm_dir)) { 215 if (pmd_none(*pm_dir)) {
199 pt_dir = vmem_pte_alloc(); 216 pt_dir = vmem_pte_alloc();
200 if (!pt_dir) 217 if (!pt_dir)
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 9c3ed88853f3..97aa50d1e4ae 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
727 BUG_ON(direction == PCI_DMA_NONE); 727 BUG_ON(direction == PCI_DMA_NONE);
728 /* IIep is write-through, not flushing. */ 728 /* IIep is write-through, not flushing. */
729 for_each_sg(sgl, sg, nents, n) { 729 for_each_sg(sgl, sg, nents, n) {
730 BUG_ON(page_address(sg->page) == NULL); 730 BUG_ON(page_address(sg_page(sg)) == NULL);
731 sg->dvma_address = 731 sg->dvma_address = virt_to_phys(sg_virt(sg));
732 virt_to_phys(page_address(sg->page)) + sg->offset;
733 sg->dvma_length = sg->length; 732 sg->dvma_length = sg->length;
734 } 733 }
735 return nents; 734 return nents;
@@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
748 BUG_ON(direction == PCI_DMA_NONE); 747 BUG_ON(direction == PCI_DMA_NONE);
749 if (direction != PCI_DMA_TODEVICE) { 748 if (direction != PCI_DMA_TODEVICE) {
750 for_each_sg(sgl, sg, nents, n) { 749 for_each_sg(sgl, sg, nents, n) {
751 BUG_ON(page_address(sg->page) == NULL); 750 BUG_ON(page_address(sg_page(sg)) == NULL);
752 mmu_inval_dma_area( 751 mmu_inval_dma_area(
753 (unsigned long) page_address(sg->page), 752 (unsigned long) page_address(sg_page(sg)),
754 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 753 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
755 } 754 }
756 } 755 }
@@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
798 BUG_ON(direction == PCI_DMA_NONE); 797 BUG_ON(direction == PCI_DMA_NONE);
799 if (direction != PCI_DMA_TODEVICE) { 798 if (direction != PCI_DMA_TODEVICE) {
800 for_each_sg(sgl, sg, nents, n) { 799 for_each_sg(sgl, sg, nents, n) {
801 BUG_ON(page_address(sg->page) == NULL); 800 BUG_ON(page_address(sg_page(sg)) == NULL);
802 mmu_inval_dma_area( 801 mmu_inval_dma_area(
803 (unsigned long) page_address(sg->page), 802 (unsigned long) page_address(sg_page(sg)),
804 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 803 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
805 } 804 }
806 } 805 }
@@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
814 BUG_ON(direction == PCI_DMA_NONE); 813 BUG_ON(direction == PCI_DMA_NONE);
815 if (direction != PCI_DMA_TODEVICE) { 814 if (direction != PCI_DMA_TODEVICE) {
816 for_each_sg(sgl, sg, nents, n) { 815 for_each_sg(sgl, sg, nents, n) {
817 BUG_ON(page_address(sg->page) == NULL); 816 BUG_ON(page_address(sg_page(sg)) == NULL);
818 mmu_inval_dma_area( 817 mmu_inval_dma_area(
819 (unsigned long) page_address(sg->page), 818 (unsigned long) page_address(sg_page(sg)),
820 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 819 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
821 } 820 }
822 } 821 }
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 375b4db63704..1666087c5b80 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -144,7 +144,7 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
144 spin_lock_irqsave(&iounit->lock, flags); 144 spin_lock_irqsave(&iounit->lock, flags);
145 while (sz != 0) { 145 while (sz != 0) {
146 --sz; 146 --sz;
147 sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length); 147 sg->dvma_address = iounit_get_area(iounit, sg_virt(sg), sg->length);
148 sg->dvma_length = sg->length; 148 sg->dvma_length = sg->length;
149 sg = sg_next(sg); 149 sg = sg_next(sg);
150 } 150 }
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 283656d9f6ea..4b934270f05e 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -238,7 +238,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
238 while (sz != 0) { 238 while (sz != 0) {
239 --sz; 239 --sz;
240 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 240 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
241 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; 241 sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
242 sg->dvma_length = (__u32) sg->length; 242 sg->dvma_length = (__u32) sg->length;
243 sg = sg_next(sg); 243 sg = sg_next(sg);
244 } 244 }
@@ -252,7 +252,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
252 while (sz != 0) { 252 while (sz != 0) {
253 --sz; 253 --sz;
254 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 254 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
255 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; 255 sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
256 sg->dvma_length = (__u32) sg->length; 256 sg->dvma_length = (__u32) sg->length;
257 sg = sg_next(sg); 257 sg = sg_next(sg);
258 } 258 }
@@ -273,7 +273,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
273 * XXX Is this a good assumption? 273 * XXX Is this a good assumption?
274 * XXX What if someone else unmaps it here and races us? 274 * XXX What if someone else unmaps it here and races us?
275 */ 275 */
276 if ((page = (unsigned long) page_address(sg->page)) != 0) { 276 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
277 for (i = 0; i < n; i++) { 277 for (i = 0; i < n; i++) {
278 if (page != oldpage) { /* Already flushed? */ 278 if (page != oldpage) { /* Already flushed? */
279 flush_page_for_dma(page); 279 flush_page_for_dma(page);
@@ -283,7 +283,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
283 } 283 }
284 } 284 }
285 285
286 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; 286 sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
287 sg->dvma_length = (__u32) sg->length; 287 sg->dvma_length = (__u32) sg->length;
288 sg = sg_next(sg); 288 sg = sg_next(sg);
289 } 289 }
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index ee6708fc4492..a2cc141291c7 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1228,7 +1228,7 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
1228{ 1228{
1229 while (sz != 0) { 1229 while (sz != 0) {
1230 --sz; 1230 --sz;
1231 sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length); 1231 sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
1232 sg->dvma_length = sg->length; 1232 sg->dvma_length = sg->length;
1233 sg = sg_next(sg); 1233 sg = sg_next(sg);
1234 } 1234 }
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index c7a74e376985..03c4e5c1b94a 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -72,6 +72,10 @@ config ARCH_NO_VIRT_TO_BUS
72config OF 72config OF
73 def_bool y 73 def_bool y
74 74
75config GENERIC_HARDIRQS_NO__DO_IRQ
76 bool
77 def_bool y
78
75choice 79choice
76 prompt "Kernel page size" 80 prompt "Kernel page size"
77 default SPARC64_PAGE_SIZE_8KB 81 default SPARC64_PAGE_SIZE_8KB
diff --git a/arch/sparc64/Makefile b/arch/sparc64/Makefile
index 6c92a42efe76..01159cb5f16d 100644
--- a/arch/sparc64/Makefile
+++ b/arch/sparc64/Makefile
@@ -18,8 +18,6 @@ NEW_GCC := $(call cc-option-yn, -m64 -mcmodel=medlow)
18NEW_GAS := $(shell if $(LD) -V 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi) 18NEW_GAS := $(shell if $(LD) -V 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
19UNDECLARED_REGS := $(shell if $(CC) -c -x assembler /dev/null -Wa,--help | grep undeclared-regs > /dev/null; then echo y; else echo n; fi; ) 19UNDECLARED_REGS := $(shell if $(CC) -c -x assembler /dev/null -Wa,--help | grep undeclared-regs > /dev/null; then echo y; else echo n; fi; )
20 20
21export NEW_GCC
22
23ifneq ($(NEW_GAS),y) 21ifneq ($(NEW_GAS),y)
24AS = sparc64-linux-as 22AS = sparc64-linux-as
25LD = sparc64-linux-ld 23LD = sparc64-linux-ld
@@ -58,8 +56,6 @@ core-y += arch/sparc64/kernel/ arch/sparc64/mm/
58core-$(CONFIG_SOLARIS_EMUL) += arch/sparc64/solaris/ 56core-$(CONFIG_SOLARIS_EMUL) += arch/sparc64/solaris/
59core-y += arch/sparc64/math-emu/ 57core-y += arch/sparc64/math-emu/
60libs-y += arch/sparc64/prom/ arch/sparc64/lib/ 58libs-y += arch/sparc64/prom/ arch/sparc64/lib/
61
62# FIXME: is drivers- right?
63drivers-$(CONFIG_OPROFILE) += arch/sparc64/oprofile/ 59drivers-$(CONFIG_OPROFILE) += arch/sparc64/oprofile/
64 60
65boot := arch/sparc64/boot 61boot := arch/sparc64/boot
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 1aa2c4048e4b..e023d4b2fef4 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.23 3# Linux kernel version: 2.6.23
4# Sat Oct 13 21:53:54 2007 4# Sun Oct 21 19:57:44 2007
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -49,6 +49,10 @@ CONFIG_POSIX_MQUEUE=y
49# CONFIG_AUDIT is not set 49# CONFIG_AUDIT is not set
50# CONFIG_IKCONFIG is not set 50# CONFIG_IKCONFIG is not set
51CONFIG_LOG_BUF_SHIFT=18 51CONFIG_LOG_BUF_SHIFT=18
52# CONFIG_CGROUPS is not set
53CONFIG_FAIR_GROUP_SCHED=y
54CONFIG_FAIR_USER_SCHED=y
55# CONFIG_FAIR_CGROUP_SCHED is not set
52CONFIG_SYSFS_DEPRECATED=y 56CONFIG_SYSFS_DEPRECATED=y
53CONFIG_RELAY=y 57CONFIG_RELAY=y
54# CONFIG_BLK_DEV_INITRD is not set 58# CONFIG_BLK_DEV_INITRD is not set
@@ -145,7 +149,10 @@ CONFIG_SELECT_MEMORY_MODEL=y
145CONFIG_SPARSEMEM_MANUAL=y 149CONFIG_SPARSEMEM_MANUAL=y
146CONFIG_SPARSEMEM=y 150CONFIG_SPARSEMEM=y
147CONFIG_HAVE_MEMORY_PRESENT=y 151CONFIG_HAVE_MEMORY_PRESENT=y
148CONFIG_SPARSEMEM_STATIC=y 152# CONFIG_SPARSEMEM_STATIC is not set
153CONFIG_SPARSEMEM_EXTREME=y
154CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
155CONFIG_SPARSEMEM_VMEMMAP=y
149CONFIG_SPLIT_PTLOCK_CPUS=4 156CONFIG_SPLIT_PTLOCK_CPUS=4
150CONFIG_RESOURCES_64BIT=y 157CONFIG_RESOURCES_64BIT=y
151CONFIG_ZONE_DMA_FLAG=0 158CONFIG_ZONE_DMA_FLAG=0
@@ -275,10 +282,6 @@ CONFIG_VLAN_8021Q=m
275# CONFIG_LAPB is not set 282# CONFIG_LAPB is not set
276# CONFIG_ECONET is not set 283# CONFIG_ECONET is not set
277# CONFIG_WAN_ROUTER is not set 284# CONFIG_WAN_ROUTER is not set
278
279#
280# QoS and/or fair queueing
281#
282# CONFIG_NET_SCHED is not set 285# CONFIG_NET_SCHED is not set
283 286
284# 287#
@@ -372,8 +375,6 @@ CONFIG_IDEPCI_PCIBUS_ORDER=y
372# CONFIG_BLK_DEV_GENERIC is not set 375# CONFIG_BLK_DEV_GENERIC is not set
373# CONFIG_BLK_DEV_OPTI621 is not set 376# CONFIG_BLK_DEV_OPTI621 is not set
374CONFIG_BLK_DEV_IDEDMA_PCI=y 377CONFIG_BLK_DEV_IDEDMA_PCI=y
375# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
376CONFIG_IDEDMA_ONLYDISK=y
377# CONFIG_BLK_DEV_AEC62XX is not set 378# CONFIG_BLK_DEV_AEC62XX is not set
378CONFIG_BLK_DEV_ALI15X3=y 379CONFIG_BLK_DEV_ALI15X3=y
379# CONFIG_WDC_ALI15X3 is not set 380# CONFIG_WDC_ALI15X3 is not set
@@ -401,6 +402,7 @@ CONFIG_BLK_DEV_ALI15X3=y
401# CONFIG_BLK_DEV_TC86C001 is not set 402# CONFIG_BLK_DEV_TC86C001 is not set
402# CONFIG_IDE_ARM is not set 403# CONFIG_IDE_ARM is not set
403CONFIG_BLK_DEV_IDEDMA=y 404CONFIG_BLK_DEV_IDEDMA=y
405CONFIG_IDE_ARCH_OBSOLETE_INIT=y
404# CONFIG_BLK_DEV_HD is not set 406# CONFIG_BLK_DEV_HD is not set
405 407
406# 408#
@@ -441,6 +443,7 @@ CONFIG_SCSI_FC_ATTRS=y
441CONFIG_SCSI_ISCSI_ATTRS=m 443CONFIG_SCSI_ISCSI_ATTRS=m
442# CONFIG_SCSI_SAS_ATTRS is not set 444# CONFIG_SCSI_SAS_ATTRS is not set
443# CONFIG_SCSI_SAS_LIBSAS is not set 445# CONFIG_SCSI_SAS_LIBSAS is not set
446# CONFIG_SCSI_SRP_ATTRS is not set
444CONFIG_SCSI_LOWLEVEL=y 447CONFIG_SCSI_LOWLEVEL=y
445CONFIG_ISCSI_TCP=m 448CONFIG_ISCSI_TCP=m
446# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 449# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
@@ -492,14 +495,8 @@ CONFIG_DM_MIRROR=m
492CONFIG_DM_ZERO=m 495CONFIG_DM_ZERO=m
493# CONFIG_DM_MULTIPATH is not set 496# CONFIG_DM_MULTIPATH is not set
494# CONFIG_DM_DELAY is not set 497# CONFIG_DM_DELAY is not set
495 498# CONFIG_DM_UEVENT is not set
496#
497# Fusion MPT device support
498#
499# CONFIG_FUSION is not set 499# CONFIG_FUSION is not set
500# CONFIG_FUSION_SPI is not set
501# CONFIG_FUSION_FC is not set
502# CONFIG_FUSION_SAS is not set
503 500
504# 501#
505# IEEE 1394 (FireWire) support 502# IEEE 1394 (FireWire) support
@@ -638,7 +635,6 @@ CONFIG_INPUT_MOUSEDEV_PSAUX=y
638CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 635CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
639CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 636CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
640# CONFIG_INPUT_JOYDEV is not set 637# CONFIG_INPUT_JOYDEV is not set
641# CONFIG_INPUT_TSDEV is not set
642CONFIG_INPUT_EVDEV=y 638CONFIG_INPUT_EVDEV=y
643# CONFIG_INPUT_EVBUG is not set 639# CONFIG_INPUT_EVBUG is not set
644 640
@@ -714,11 +710,9 @@ CONFIG_SERIAL_CORE_CONSOLE=y
714CONFIG_UNIX98_PTYS=y 710CONFIG_UNIX98_PTYS=y
715# CONFIG_LEGACY_PTYS is not set 711# CONFIG_LEGACY_PTYS is not set
716# CONFIG_IPMI_HANDLER is not set 712# CONFIG_IPMI_HANDLER is not set
717# CONFIG_WATCHDOG is not set
718# CONFIG_HW_RANDOM is not set 713# CONFIG_HW_RANDOM is not set
719# CONFIG_R3964 is not set 714# CONFIG_R3964 is not set
720# CONFIG_APPLICOM is not set 715# CONFIG_APPLICOM is not set
721# CONFIG_DRM is not set
722# CONFIG_RAW_DRIVER is not set 716# CONFIG_RAW_DRIVER is not set
723# CONFIG_TCG_TPM is not set 717# CONFIG_TCG_TPM is not set
724CONFIG_DEVPORT=y 718CONFIG_DEVPORT=y
@@ -786,8 +780,6 @@ CONFIG_I2C_ALGOBIT=y
786# CONFIG_POWER_SUPPLY is not set 780# CONFIG_POWER_SUPPLY is not set
787CONFIG_HWMON=y 781CONFIG_HWMON=y
788# CONFIG_HWMON_VID is not set 782# CONFIG_HWMON_VID is not set
789# CONFIG_SENSORS_ABITUGURU is not set
790# CONFIG_SENSORS_ABITUGURU3 is not set
791# CONFIG_SENSORS_AD7418 is not set 783# CONFIG_SENSORS_AD7418 is not set
792# CONFIG_SENSORS_ADM1021 is not set 784# CONFIG_SENSORS_ADM1021 is not set
793# CONFIG_SENSORS_ADM1025 is not set 785# CONFIG_SENSORS_ADM1025 is not set
@@ -795,12 +787,12 @@ CONFIG_HWMON=y
795# CONFIG_SENSORS_ADM1029 is not set 787# CONFIG_SENSORS_ADM1029 is not set
796# CONFIG_SENSORS_ADM1031 is not set 788# CONFIG_SENSORS_ADM1031 is not set
797# CONFIG_SENSORS_ADM9240 is not set 789# CONFIG_SENSORS_ADM9240 is not set
798# CONFIG_SENSORS_ASB100 is not set 790# CONFIG_SENSORS_ADT7470 is not set
799# CONFIG_SENSORS_ATXP1 is not set 791# CONFIG_SENSORS_ATXP1 is not set
800# CONFIG_SENSORS_DS1621 is not set 792# CONFIG_SENSORS_DS1621 is not set
801# CONFIG_SENSORS_F71805F is not set 793# CONFIG_SENSORS_F71805F is not set
802# CONFIG_SENSORS_FSCHER is not set 794# CONFIG_SENSORS_F71882FG is not set
803# CONFIG_SENSORS_FSCPOS is not set 795# CONFIG_SENSORS_F75375S is not set
804# CONFIG_SENSORS_GL518SM is not set 796# CONFIG_SENSORS_GL518SM is not set
805# CONFIG_SENSORS_GL520SM is not set 797# CONFIG_SENSORS_GL520SM is not set
806# CONFIG_SENSORS_IT87 is not set 798# CONFIG_SENSORS_IT87 is not set
@@ -836,6 +828,7 @@ CONFIG_HWMON=y
836# CONFIG_SENSORS_W83627HF is not set 828# CONFIG_SENSORS_W83627HF is not set
837# CONFIG_SENSORS_W83627EHF is not set 829# CONFIG_SENSORS_W83627EHF is not set
838# CONFIG_HWMON_DEBUG_CHIP is not set 830# CONFIG_HWMON_DEBUG_CHIP is not set
831# CONFIG_WATCHDOG is not set
839 832
840# 833#
841# Sonics Silicon Backplane 834# Sonics Silicon Backplane
@@ -858,12 +851,7 @@ CONFIG_SSB_POSSIBLE=y
858# 851#
859# Graphics support 852# Graphics support
860# 853#
861# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 854# CONFIG_DRM is not set
862
863#
864# Display device support
865#
866# CONFIG_DISPLAY_SUPPORT is not set
867# CONFIG_VGASTATE is not set 855# CONFIG_VGASTATE is not set
868# CONFIG_VIDEO_OUTPUT_CONTROL is not set 856# CONFIG_VIDEO_OUTPUT_CONTROL is not set
869CONFIG_FB=y 857CONFIG_FB=y
@@ -872,6 +860,7 @@ CONFIG_FB_DDC=y
872CONFIG_FB_CFB_FILLRECT=y 860CONFIG_FB_CFB_FILLRECT=y
873CONFIG_FB_CFB_COPYAREA=y 861CONFIG_FB_CFB_COPYAREA=y
874CONFIG_FB_CFB_IMAGEBLIT=y 862CONFIG_FB_CFB_IMAGEBLIT=y
863# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
875# CONFIG_FB_SYS_FILLRECT is not set 864# CONFIG_FB_SYS_FILLRECT is not set
876# CONFIG_FB_SYS_COPYAREA is not set 865# CONFIG_FB_SYS_COPYAREA is not set
877# CONFIG_FB_SYS_IMAGEBLIT is not set 866# CONFIG_FB_SYS_IMAGEBLIT is not set
@@ -890,6 +879,7 @@ CONFIG_FB_TILEBLITTING=y
890# CONFIG_FB_PM2 is not set 879# CONFIG_FB_PM2 is not set
891# CONFIG_FB_ASILIANT is not set 880# CONFIG_FB_ASILIANT is not set
892# CONFIG_FB_IMSTT is not set 881# CONFIG_FB_IMSTT is not set
882# CONFIG_FB_UVESA is not set
893# CONFIG_FB_SBUS is not set 883# CONFIG_FB_SBUS is not set
894# CONFIG_FB_XVR500 is not set 884# CONFIG_FB_XVR500 is not set
895# CONFIG_FB_XVR2500 is not set 885# CONFIG_FB_XVR2500 is not set
@@ -915,6 +905,12 @@ CONFIG_FB_RADEON_I2C=y
915# CONFIG_FB_ARK is not set 905# CONFIG_FB_ARK is not set
916# CONFIG_FB_PM3 is not set 906# CONFIG_FB_PM3 is not set
917# CONFIG_FB_VIRTUAL is not set 907# CONFIG_FB_VIRTUAL is not set
908# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
909
910#
911# Display device support
912#
913# CONFIG_DISPLAY_SUPPORT is not set
918 914
919# 915#
920# Console display driver support 916# Console display driver support
@@ -1066,6 +1062,7 @@ CONFIG_AC97_BUS=m
1066CONFIG_HID_SUPPORT=y 1062CONFIG_HID_SUPPORT=y
1067CONFIG_HID=y 1063CONFIG_HID=y
1068# CONFIG_HID_DEBUG is not set 1064# CONFIG_HID_DEBUG is not set
1065# CONFIG_HIDRAW is not set
1069 1066
1070# 1067#
1071# USB Input Devices 1068# USB Input Devices
@@ -1187,19 +1184,6 @@ CONFIG_USB_STORAGE=m
1187# CONFIG_RTC_CLASS is not set 1184# CONFIG_RTC_CLASS is not set
1188 1185
1189# 1186#
1190# DMA Engine support
1191#
1192# CONFIG_DMA_ENGINE is not set
1193
1194#
1195# DMA Clients
1196#
1197
1198#
1199# DMA Devices
1200#
1201
1202#
1203# Userspace I/O 1187# Userspace I/O
1204# 1188#
1205# CONFIG_UIO is not set 1189# CONFIG_UIO is not set
@@ -1275,7 +1259,6 @@ CONFIG_TMPFS=y
1275# CONFIG_TMPFS_POSIX_ACL is not set 1259# CONFIG_TMPFS_POSIX_ACL is not set
1276CONFIG_HUGETLBFS=y 1260CONFIG_HUGETLBFS=y
1277CONFIG_HUGETLB_PAGE=y 1261CONFIG_HUGETLB_PAGE=y
1278CONFIG_RAMFS=y
1279# CONFIG_CONFIGFS_FS is not set 1262# CONFIG_CONFIGFS_FS is not set
1280 1263
1281# 1264#
@@ -1295,10 +1278,7 @@ CONFIG_RAMFS=y
1295# CONFIG_QNX4FS_FS is not set 1278# CONFIG_QNX4FS_FS is not set
1296# CONFIG_SYSV_FS is not set 1279# CONFIG_SYSV_FS is not set
1297# CONFIG_UFS_FS is not set 1280# CONFIG_UFS_FS is not set
1298 1281CONFIG_NETWORK_FILESYSTEMS=y
1299#
1300# Network File Systems
1301#
1302# CONFIG_NFS_FS is not set 1282# CONFIG_NFS_FS is not set
1303# CONFIG_NFSD is not set 1283# CONFIG_NFSD is not set
1304# CONFIG_SMB_FS is not set 1284# CONFIG_SMB_FS is not set
@@ -1313,10 +1293,6 @@ CONFIG_RAMFS=y
1313# CONFIG_PARTITION_ADVANCED is not set 1293# CONFIG_PARTITION_ADVANCED is not set
1314CONFIG_MSDOS_PARTITION=y 1294CONFIG_MSDOS_PARTITION=y
1315CONFIG_SUN_PARTITION=y 1295CONFIG_SUN_PARTITION=y
1316
1317#
1318# Native Language Support
1319#
1320CONFIG_NLS=m 1296CONFIG_NLS=m
1321CONFIG_NLS_DEFAULT="iso8859-1" 1297CONFIG_NLS_DEFAULT="iso8859-1"
1322# CONFIG_NLS_CODEPAGE_437 is not set 1298# CONFIG_NLS_CODEPAGE_437 is not set
@@ -1357,18 +1333,12 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1357# CONFIG_NLS_KOI8_R is not set 1333# CONFIG_NLS_KOI8_R is not set
1358# CONFIG_NLS_KOI8_U is not set 1334# CONFIG_NLS_KOI8_U is not set
1359# CONFIG_NLS_UTF8 is not set 1335# CONFIG_NLS_UTF8 is not set
1360
1361#
1362# Distributed Lock Manager
1363#
1364# CONFIG_DLM is not set 1336# CONFIG_DLM is not set
1365 1337CONFIG_INSTRUMENTATION=y
1366#
1367# Instrumentation Support
1368#
1369CONFIG_PROFILING=y 1338CONFIG_PROFILING=y
1370CONFIG_OPROFILE=m 1339CONFIG_OPROFILE=m
1371CONFIG_KPROBES=y 1340CONFIG_KPROBES=y
1341# CONFIG_MARKERS is not set
1372 1342
1373# 1343#
1374# Kernel hacking 1344# Kernel hacking
@@ -1402,9 +1372,11 @@ CONFIG_DEBUG_BUGVERBOSE=y
1402# CONFIG_DEBUG_VM is not set 1372# CONFIG_DEBUG_VM is not set
1403# CONFIG_DEBUG_LIST is not set 1373# CONFIG_DEBUG_LIST is not set
1404CONFIG_FORCED_INLINING=y 1374CONFIG_FORCED_INLINING=y
1375# CONFIG_BOOT_PRINTK_DELAY is not set
1405# CONFIG_RCU_TORTURE_TEST is not set 1376# CONFIG_RCU_TORTURE_TEST is not set
1406# CONFIG_LKDTM is not set 1377# CONFIG_LKDTM is not set
1407# CONFIG_FAULT_INJECTION is not set 1378# CONFIG_FAULT_INJECTION is not set
1379# CONFIG_SAMPLES is not set
1408# CONFIG_DEBUG_STACK_USAGE is not set 1380# CONFIG_DEBUG_STACK_USAGE is not set
1409# CONFIG_DEBUG_DCFLUSH is not set 1381# CONFIG_DEBUG_DCFLUSH is not set
1410# CONFIG_STACK_DEBUG is not set 1382# CONFIG_STACK_DEBUG is not set
@@ -1417,6 +1389,7 @@ CONFIG_FORCED_INLINING=y
1417CONFIG_KEYS=y 1389CONFIG_KEYS=y
1418# CONFIG_KEYS_DEBUG_PROC_KEYS is not set 1390# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
1419# CONFIG_SECURITY is not set 1391# CONFIG_SECURITY is not set
1392# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1420CONFIG_XOR_BLOCKS=m 1393CONFIG_XOR_BLOCKS=m
1421CONFIG_ASYNC_CORE=m 1394CONFIG_ASYNC_CORE=m
1422CONFIG_ASYNC_MEMCPY=m 1395CONFIG_ASYNC_MEMCPY=m
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 112c46e66578..ef50d217432f 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -39,12 +39,3 @@ else
39 obj-y += sys_sunos32.o sunos_ioctl32.o 39 obj-y += sys_sunos32.o sunos_ioctl32.o
40 endif 40 endif
41endif 41endif
42
43ifneq ($(NEW_GCC),y)
44 CMODEL_CFLAG := -mmedlow
45else
46 CMODEL_CFLAG := -m64 -mcmodel=medlow
47endif
48
49head.o: head.S ttable.S itlb_miss.S dtlb_miss.S ktlb.S tsb.S \
50 etrap.S rtrap.S winfixup.S entry.S
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index 29af777d7ac9..070a4846c0cb 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -472,8 +472,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
472 spin_unlock_irqrestore(&iommu->lock, flags); 472 spin_unlock_irqrestore(&iommu->lock, flags);
473} 473}
474 474
475#define SG_ENT_PHYS_ADDRESS(SG) \ 475#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
476 (__pa(page_address((SG)->page)) + (SG)->offset)
477 476
478static void fill_sg(iopte_t *iopte, struct scatterlist *sg, 477static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
479 int nused, int nelems, 478 int nused, int nelems,
@@ -565,9 +564,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
565 /* Fast path single entry scatterlists. */ 564 /* Fast path single entry scatterlists. */
566 if (nelems == 1) { 565 if (nelems == 1) {
567 sglist->dma_address = 566 sglist->dma_address =
568 dma_4u_map_single(dev, 567 dma_4u_map_single(dev, sg_virt(sglist),
569 (page_address(sglist->page) +
570 sglist->offset),
571 sglist->length, direction); 568 sglist->length, direction);
572 if (unlikely(sglist->dma_address == DMA_ERROR_CODE)) 569 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
573 return 0; 570 return 0;
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
index d7ca900ec51d..b70324e0d83d 100644
--- a/arch/sparc64/kernel/iommu_common.c
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -73,7 +73,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
73 73
74 daddr = dma_sg->dma_address; 74 daddr = dma_sg->dma_address;
75 sglen = sg->length; 75 sglen = sg->length;
76 sgaddr = (unsigned long) (page_address(sg->page) + sg->offset); 76 sgaddr = (unsigned long) sg_virt(sg);
77 while (dlen > 0) { 77 while (dlen > 0) {
78 unsigned long paddr; 78 unsigned long paddr;
79 79
@@ -123,7 +123,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
123 sg = sg_next(sg); 123 sg = sg_next(sg);
124 if (--nents <= 0) 124 if (--nents <= 0)
125 break; 125 break;
126 sgaddr = (unsigned long) (page_address(sg->page) + sg->offset); 126 sgaddr = (unsigned long) sg_virt(sg);
127 sglen = sg->length; 127 sglen = sg->length;
128 } 128 }
129 if (dlen < 0) { 129 if (dlen < 0) {
@@ -191,7 +191,7 @@ void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int np
191 printk("sg(%d): page_addr(%p) off(%x) length(%x) " 191 printk("sg(%d): page_addr(%p) off(%x) length(%x) "
192 "dma_address[%016x] dma_length[%016x]\n", 192 "dma_address[%016x] dma_length[%016x]\n",
193 i, 193 i,
194 page_address(sg->page), sg->offset, 194 page_address(sg_page(sg)), sg->offset,
195 sg->length, 195 sg->length,
196 sg->dma_address, sg->dma_length); 196 sg->dma_address, sg->dma_length);
197 } 197 }
@@ -207,15 +207,14 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
207 unsigned long prev; 207 unsigned long prev;
208 u32 dent_addr, dent_len; 208 u32 dent_addr, dent_len;
209 209
210 prev = (unsigned long) (page_address(sg->page) + sg->offset); 210 prev = (unsigned long) sg_virt(sg);
211 prev += (unsigned long) (dent_len = sg->length); 211 prev += (unsigned long) (dent_len = sg->length);
212 dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset) 212 dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
213 & (IO_PAGE_SIZE - 1UL));
214 while (--nents) { 213 while (--nents) {
215 unsigned long addr; 214 unsigned long addr;
216 215
217 sg = sg_next(sg); 216 sg = sg_next(sg);
218 addr = (unsigned long) (page_address(sg->page) + sg->offset); 217 addr = (unsigned long) sg_virt(sg);
219 if (! VCONTIG(prev, addr)) { 218 if (! VCONTIG(prev, addr)) {
220 dma_sg->dma_address = dent_addr; 219 dma_sg->dma_address = dent_addr;
221 dma_sg->dma_length = dent_len; 220 dma_sg->dma_length = dent_len;
@@ -234,6 +233,11 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
234 dma_sg->dma_address = dent_addr; 233 dma_sg->dma_address = dent_addr;
235 dma_sg->dma_length = dent_len; 234 dma_sg->dma_length = dent_len;
236 235
236 if (dma_sg != sg) {
237 dma_sg = next_sg(dma_sg);
238 dma_sg->dma_length = 0;
239 }
240
237 return ((unsigned long) dent_addr + 241 return ((unsigned long) dent_addr +
238 (unsigned long) dent_len + 242 (unsigned long) dent_len +
239 (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT; 243 (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 2c3bea228159..30431bd24e1e 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -257,8 +257,8 @@ struct irq_handler_data {
257 unsigned long imap; 257 unsigned long imap;
258 258
259 void (*pre_handler)(unsigned int, void *, void *); 259 void (*pre_handler)(unsigned int, void *, void *);
260 void *pre_handler_arg1; 260 void *arg1;
261 void *pre_handler_arg2; 261 void *arg2;
262}; 262};
263 263
264#ifdef CONFIG_SMP 264#ifdef CONFIG_SMP
@@ -346,7 +346,7 @@ static void sun4u_irq_disable(unsigned int virt_irq)
346 } 346 }
347} 347}
348 348
349static void sun4u_irq_end(unsigned int virt_irq) 349static void sun4u_irq_eoi(unsigned int virt_irq)
350{ 350{
351 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 351 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
352 struct irq_desc *desc = irq_desc + virt_irq; 352 struct irq_desc *desc = irq_desc + virt_irq;
@@ -401,7 +401,7 @@ static void sun4v_irq_disable(unsigned int virt_irq)
401 "err(%d)\n", ino, err); 401 "err(%d)\n", ino, err);
402} 402}
403 403
404static void sun4v_irq_end(unsigned int virt_irq) 404static void sun4v_irq_eoi(unsigned int virt_irq)
405{ 405{
406 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 406 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
407 struct irq_desc *desc = irq_desc + virt_irq; 407 struct irq_desc *desc = irq_desc + virt_irq;
@@ -478,7 +478,7 @@ static void sun4v_virq_disable(unsigned int virt_irq)
478 dev_handle, dev_ino, err); 478 dev_handle, dev_ino, err);
479} 479}
480 480
481static void sun4v_virq_end(unsigned int virt_irq) 481static void sun4v_virq_eoi(unsigned int virt_irq)
482{ 482{
483 struct irq_desc *desc = irq_desc + virt_irq; 483 struct irq_desc *desc = irq_desc + virt_irq;
484 unsigned long dev_handle, dev_ino; 484 unsigned long dev_handle, dev_ino;
@@ -498,33 +498,11 @@ static void sun4v_virq_end(unsigned int virt_irq)
498 dev_handle, dev_ino, err); 498 dev_handle, dev_ino, err);
499} 499}
500 500
501static void run_pre_handler(unsigned int virt_irq)
502{
503 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
504 unsigned int ino;
505
506 ino = virt_irq_table[virt_irq].dev_ino;
507 if (likely(data->pre_handler)) {
508 data->pre_handler(ino,
509 data->pre_handler_arg1,
510 data->pre_handler_arg2);
511 }
512}
513
514static struct irq_chip sun4u_irq = { 501static struct irq_chip sun4u_irq = {
515 .typename = "sun4u", 502 .typename = "sun4u",
516 .enable = sun4u_irq_enable, 503 .enable = sun4u_irq_enable,
517 .disable = sun4u_irq_disable, 504 .disable = sun4u_irq_disable,
518 .end = sun4u_irq_end, 505 .eoi = sun4u_irq_eoi,
519 .set_affinity = sun4u_set_affinity,
520};
521
522static struct irq_chip sun4u_irq_ack = {
523 .typename = "sun4u+ack",
524 .enable = sun4u_irq_enable,
525 .disable = sun4u_irq_disable,
526 .ack = run_pre_handler,
527 .end = sun4u_irq_end,
528 .set_affinity = sun4u_set_affinity, 506 .set_affinity = sun4u_set_affinity,
529}; 507};
530 508
@@ -532,7 +510,7 @@ static struct irq_chip sun4v_irq = {
532 .typename = "sun4v", 510 .typename = "sun4v",
533 .enable = sun4v_irq_enable, 511 .enable = sun4v_irq_enable,
534 .disable = sun4v_irq_disable, 512 .disable = sun4v_irq_disable,
535 .end = sun4v_irq_end, 513 .eoi = sun4v_irq_eoi,
536 .set_affinity = sun4v_set_affinity, 514 .set_affinity = sun4v_set_affinity,
537}; 515};
538 516
@@ -540,31 +518,33 @@ static struct irq_chip sun4v_virq = {
540 .typename = "vsun4v", 518 .typename = "vsun4v",
541 .enable = sun4v_virq_enable, 519 .enable = sun4v_virq_enable,
542 .disable = sun4v_virq_disable, 520 .disable = sun4v_virq_disable,
543 .end = sun4v_virq_end, 521 .eoi = sun4v_virq_eoi,
544 .set_affinity = sun4v_virt_set_affinity, 522 .set_affinity = sun4v_virt_set_affinity,
545}; 523};
546 524
525static void fastcall pre_flow_handler(unsigned int virt_irq,
526 struct irq_desc *desc)
527{
528 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
529 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
530
531 data->pre_handler(ino, data->arg1, data->arg2);
532
533 handle_fasteoi_irq(virt_irq, desc);
534}
535
547void irq_install_pre_handler(int virt_irq, 536void irq_install_pre_handler(int virt_irq,
548 void (*func)(unsigned int, void *, void *), 537 void (*func)(unsigned int, void *, void *),
549 void *arg1, void *arg2) 538 void *arg1, void *arg2)
550{ 539{
551 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 540 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
552 struct irq_chip *chip = get_irq_chip(virt_irq); 541 struct irq_desc *desc = irq_desc + virt_irq;
553
554 if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
555 printk(KERN_ERR "IRQ: Trying to install pre-handler on "
556 "sun4v irq %u\n", virt_irq);
557 return;
558 }
559 542
560 data->pre_handler = func; 543 data->pre_handler = func;
561 data->pre_handler_arg1 = arg1; 544 data->arg1 = arg1;
562 data->pre_handler_arg2 = arg2; 545 data->arg2 = arg2;
563
564 if (chip == &sun4u_irq_ack)
565 return;
566 546
567 set_irq_chip(virt_irq, &sun4u_irq_ack); 547 desc->handle_irq = pre_flow_handler;
568} 548}
569 549
570unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 550unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
@@ -582,7 +562,10 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
582 if (!virt_irq) { 562 if (!virt_irq) {
583 virt_irq = virt_irq_alloc(0, ino); 563 virt_irq = virt_irq_alloc(0, ino);
584 bucket_set_virt_irq(__pa(bucket), virt_irq); 564 bucket_set_virt_irq(__pa(bucket), virt_irq);
585 set_irq_chip(virt_irq, &sun4u_irq); 565 set_irq_chip_and_handler_name(virt_irq,
566 &sun4u_irq,
567 handle_fasteoi_irq,
568 "IVEC");
586 } 569 }
587 570
588 data = get_irq_chip_data(virt_irq); 571 data = get_irq_chip_data(virt_irq);
@@ -617,7 +600,9 @@ static unsigned int sun4v_build_common(unsigned long sysino,
617 if (!virt_irq) { 600 if (!virt_irq) {
618 virt_irq = virt_irq_alloc(0, sysino); 601 virt_irq = virt_irq_alloc(0, sysino);
619 bucket_set_virt_irq(__pa(bucket), virt_irq); 602 bucket_set_virt_irq(__pa(bucket), virt_irq);
620 set_irq_chip(virt_irq, chip); 603 set_irq_chip_and_handler_name(virt_irq, chip,
604 handle_fasteoi_irq,
605 "IVEC");
621 } 606 }
622 607
623 data = get_irq_chip_data(virt_irq); 608 data = get_irq_chip_data(virt_irq);
@@ -665,7 +650,10 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
665 650
666 virt_irq = virt_irq_alloc(devhandle, devino); 651 virt_irq = virt_irq_alloc(devhandle, devino);
667 bucket_set_virt_irq(__pa(bucket), virt_irq); 652 bucket_set_virt_irq(__pa(bucket), virt_irq);
668 set_irq_chip(virt_irq, &sun4v_virq); 653
654 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
655 handle_fasteoi_irq,
656 "IVEC");
669 657
670 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 658 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
671 if (unlikely(!data)) 659 if (unlikely(!data))
@@ -724,6 +712,7 @@ void handler_irq(int irq, struct pt_regs *regs)
724 : "memory"); 712 : "memory");
725 713
726 while (bucket_pa) { 714 while (bucket_pa) {
715 struct irq_desc *desc;
727 unsigned long next_pa; 716 unsigned long next_pa;
728 unsigned int virt_irq; 717 unsigned int virt_irq;
729 718
@@ -731,7 +720,9 @@ void handler_irq(int irq, struct pt_regs *regs)
731 virt_irq = bucket_get_virt_irq(bucket_pa); 720 virt_irq = bucket_get_virt_irq(bucket_pa);
732 bucket_clear_chain_pa(bucket_pa); 721 bucket_clear_chain_pa(bucket_pa);
733 722
734 __do_IRQ(virt_irq); 723 desc = irq_desc + virt_irq;
724
725 desc->handle_irq(virt_irq, desc);
735 726
736 bucket_pa = next_pa; 727 bucket_pa = next_pa;
737 } 728 }
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
index 85a2be0b0962..c8313cb60f0a 100644
--- a/arch/sparc64/kernel/ldc.c
+++ b/arch/sparc64/kernel/ldc.c
@@ -2057,7 +2057,7 @@ static void fill_cookies(struct cookie_state *sp, unsigned long pa,
2057 2057
2058static int sg_count_one(struct scatterlist *sg) 2058static int sg_count_one(struct scatterlist *sg)
2059{ 2059{
2060 unsigned long base = page_to_pfn(sg->page) << PAGE_SHIFT; 2060 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
2061 long len = sg->length; 2061 long len = sg->length;
2062 2062
2063 if ((sg->offset | len) & (8UL - 1)) 2063 if ((sg->offset | len) & (8UL - 1))
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 9b808640a193..63b3ebc0c3c2 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -207,8 +207,7 @@ static struct {
207 { "SUNW,sun4v-pci", sun4v_pci_init }, 207 { "SUNW,sun4v-pci", sun4v_pci_init },
208 { "pciex108e,80f0", fire_pci_init }, 208 { "pciex108e,80f0", fire_pci_init },
209}; 209};
210#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ 210#define PCI_NUM_CONTROLLER_TYPES ARRAY_SIZE(pci_controller_table)
211 sizeof(pci_controller_table[0]))
212 211
213static int __init pci_controller_init(const char *model_name, int namelen, struct device_node *dp) 212static int __init pci_controller_init(const char *model_name, int namelen, struct device_node *dp)
214{ 213{
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c
index 31a165fd3e48..d6d64b44af63 100644
--- a/arch/sparc64/kernel/pci_msi.c
+++ b/arch/sparc64/kernel/pci_msi.c
@@ -28,8 +28,15 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
28 unsigned long msi; 28 unsigned long msi;
29 29
30 err = ops->dequeue_msi(pbm, msiqid, &head, &msi); 30 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
31 if (likely(err > 0)) 31 if (likely(err > 0)) {
32 __do_IRQ(pbm->msi_irq_table[msi - pbm->msi_first]); 32 struct irq_desc *desc;
33 unsigned int virt_irq;
34
35 virt_irq = pbm->msi_irq_table[msi - pbm->msi_first];
36 desc = irq_desc + virt_irq;
37
38 desc->handle_irq(virt_irq, desc);
39 }
33 40
34 if (unlikely(err < 0)) 41 if (unlikely(err < 0))
35 goto err_dequeue; 42 goto err_dequeue;
@@ -128,7 +135,8 @@ int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
128 if (!*virt_irq_p) 135 if (!*virt_irq_p)
129 goto out_err; 136 goto out_err;
130 137
131 set_irq_chip(*virt_irq_p, &msi_irq); 138 set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq,
139 handle_simple_irq, "MSI");
132 140
133 err = alloc_msi(pbm); 141 err = alloc_msi(pbm);
134 if (unlikely(err < 0)) 142 if (unlikely(err < 0))
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index fe46ace3e59f..8c4875bdb4a8 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -365,8 +365,7 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
365 spin_unlock_irqrestore(&iommu->lock, flags); 365 spin_unlock_irqrestore(&iommu->lock, flags);
366} 366}
367 367
368#define SG_ENT_PHYS_ADDRESS(SG) \ 368#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
369 (__pa(page_address((SG)->page)) + (SG)->offset)
370 369
371static long fill_sg(long entry, struct device *dev, 370static long fill_sg(long entry, struct device *dev,
372 struct scatterlist *sg, 371 struct scatterlist *sg,
@@ -477,9 +476,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
477 /* Fast path single entry scatterlists. */ 476 /* Fast path single entry scatterlists. */
478 if (nelems == 1) { 477 if (nelems == 1) {
479 sglist->dma_address = 478 sglist->dma_address =
480 dma_4v_map_single(dev, 479 dma_4v_map_single(dev, sg_virt(sglist),
481 (page_address(sglist->page) +
482 sglist->offset),
483 sglist->length, direction); 480 sglist->length, direction);
484 if (unlikely(sglist->dma_address == DMA_ERROR_CODE)) 481 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
485 return 0; 482 return 0;
diff --git a/arch/sparc64/math-emu/Makefile b/arch/sparc64/math-emu/Makefile
index a0b06fd29467..cc5cb9baf6aa 100644
--- a/arch/sparc64/math-emu/Makefile
+++ b/arch/sparc64/math-emu/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-y := math.o 5obj-y := math.o
6 6
7EXTRA_CFLAGS = -I. -Iinclude/math-emu -w 7EXTRA_CFLAGS = -Iinclude/math-emu -w
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 25b248a02507..3a8cd3dfb51c 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1115,7 +1115,7 @@ static void do_ubd_request(struct request_queue *q)
1115 } 1115 }
1116 prepare_request(req, io_req, 1116 prepare_request(req, io_req,
1117 (unsigned long long) req->sector << 9, 1117 (unsigned long long) req->sector << 9,
1118 sg->offset, sg->length, sg->page); 1118 sg->offset, sg->length, sg_page(sg));
1119 1119
1120 last_sectors = sg->length >> 9; 1120 last_sectors = sg->length >> 9;
1121 n = os_write_file(thread_fd, &io_req, 1121 n = os_write_file(thread_fd, &io_req,
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index f35ea2237522..a0ae2e7f6cec 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -27,13 +27,22 @@
27#include <asm/segment.h> 27#include <asm/segment.h>
28#include <asm/page.h> 28#include <asm/page.h>
29#include <asm/boot.h> 29#include <asm/boot.h>
30#include <asm/asm-offsets.h>
30 31
31.section ".text.head","ax",@progbits 32.section ".text.head","ax",@progbits
32 .globl startup_32 33 .globl startup_32
33 34
34startup_32: 35startup_32:
35 cld 36 /* check to see if KEEP_SEGMENTS flag is meaningful */
36 cli 37 cmpw $0x207, BP_version(%esi)
38 jb 1f
39
40 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
41 * us to not reload segments */
42 testb $(1<<6), BP_loadflags(%esi)
43 jnz 2f
44
451: cli
37 movl $(__BOOT_DS),%eax 46 movl $(__BOOT_DS),%eax
38 movl %eax,%ds 47 movl %eax,%ds
39 movl %eax,%es 48 movl %eax,%es
@@ -41,6 +50,8 @@ startup_32:
41 movl %eax,%gs 50 movl %eax,%gs
42 movl %eax,%ss 51 movl %eax,%ss
43 52
532: cld
54
44/* Calculate the delta between where we were compiled to run 55/* Calculate the delta between where we were compiled to run
45 * at and where we were actually loaded at. This can only be done 56 * at and where we were actually loaded at. This can only be done
46 * with a short local call on x86. Nothing else will tell us what 57 * with a short local call on x86. Nothing else will tell us what
diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c
index 1dc1e19c0a9f..b74d60d1b2fa 100644
--- a/arch/x86/boot/compressed/misc_32.c
+++ b/arch/x86/boot/compressed/misc_32.c
@@ -247,6 +247,9 @@ static void putstr(const char *s)
247 int x,y,pos; 247 int x,y,pos;
248 char c; 248 char c;
249 249
250 if (RM_SCREEN_INFO.orig_video_mode == 0 && lines == 0 && cols == 0)
251 return;
252
250 x = RM_SCREEN_INFO.orig_x; 253 x = RM_SCREEN_INFO.orig_x;
251 y = RM_SCREEN_INFO.orig_y; 254 y = RM_SCREEN_INFO.orig_y;
252 255
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index f3140e596d40..8353c81c41c0 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -119,7 +119,7 @@ _start:
119 # Part 2 of the header, from the old setup.S 119 # Part 2 of the header, from the old setup.S
120 120
121 .ascii "HdrS" # header signature 121 .ascii "HdrS" # header signature
122 .word 0x0206 # header version number (>= 0x0105) 122 .word 0x0207 # header version number (>= 0x0105)
123 # or else old loadlin-1.5 will fail) 123 # or else old loadlin-1.5 will fail)
124 .globl realmode_swtch 124 .globl realmode_swtch
125realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 125realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -214,6 +214,11 @@ cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
214 #added with boot protocol 214 #added with boot protocol
215 #version 2.06 215 #version 2.06
216 216
217hardware_subarch: .long 0 # subarchitecture, added with 2.07
218 # default to 0 for normal x86 PC
219
220hardware_subarch_data: .quad 0
221
217# End of setup header ##################################################### 222# End of setup header #####################################################
218 223
219 .section ".inittext", "ax" 224 .section ".inittext", "ax"
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index f1b7cdda82b3..f8764716b0c0 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -15,6 +15,7 @@
15#include <asm/fixmap.h> 15#include <asm/fixmap.h>
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/bootparam.h>
18#include <asm/elf.h> 19#include <asm/elf.h>
19 20
20#include <xen/interface/xen.h> 21#include <xen/interface/xen.h>
@@ -146,4 +147,10 @@ void foo(void)
146 OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); 147 OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
147 OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); 148 OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
148#endif 149#endif
150
151 BLANK();
152 OFFSET(BP_scratch, boot_params, scratch);
153 OFFSET(BP_loadflags, boot_params, hdr.loadflags);
154 OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
155 OFFSET(BP_version, boot_params, hdr.version);
149} 156}
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 58fd54eb5577..18f500d185a2 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -51,6 +51,13 @@ struct resource code_resource = {
51 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 51 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
52}; 52};
53 53
54struct resource bss_resource = {
55 .name = "Kernel bss",
56 .start = 0,
57 .end = 0,
58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
59};
60
54static struct resource system_rom_resource = { 61static struct resource system_rom_resource = {
55 .name = "System ROM", 62 .name = "System ROM",
56 .start = 0xf0000, 63 .start = 0xf0000,
@@ -254,7 +261,9 @@ static void __init probe_roms(void)
254 * and also for regions reported as reserved by the e820. 261 * and also for regions reported as reserved by the e820.
255 */ 262 */
256static void __init 263static void __init
257legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource) 264legacy_init_iomem_resources(struct resource *code_resource,
265 struct resource *data_resource,
266 struct resource *bss_resource)
258{ 267{
259 int i; 268 int i;
260 269
@@ -287,6 +296,7 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
287 */ 296 */
288 request_resource(res, code_resource); 297 request_resource(res, code_resource);
289 request_resource(res, data_resource); 298 request_resource(res, data_resource);
299 request_resource(res, bss_resource);
290#ifdef CONFIG_KEXEC 300#ifdef CONFIG_KEXEC
291 if (crashk_res.start != crashk_res.end) 301 if (crashk_res.start != crashk_res.end)
292 request_resource(res, &crashk_res); 302 request_resource(res, &crashk_res);
@@ -307,9 +317,11 @@ static int __init request_standard_resources(void)
307 317
308 printk("Setting up standard PCI resources\n"); 318 printk("Setting up standard PCI resources\n");
309 if (efi_enabled) 319 if (efi_enabled)
310 efi_initialize_iomem_resources(&code_resource, &data_resource); 320 efi_initialize_iomem_resources(&code_resource,
321 &data_resource, &bss_resource);
311 else 322 else
312 legacy_init_iomem_resources(&code_resource, &data_resource); 323 legacy_init_iomem_resources(&code_resource,
324 &data_resource, &bss_resource);
313 325
314 /* EFI systems may still have VGA */ 326 /* EFI systems may still have VGA */
315 request_resource(&iomem_resource, &video_ram_resource); 327 request_resource(&iomem_resource, &video_ram_resource);
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 57616865d8a0..04698e0b056c 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -47,7 +47,7 @@ unsigned long end_pfn_map;
47 */ 47 */
48static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; 48static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
49 49
50extern struct resource code_resource, data_resource; 50extern struct resource code_resource, data_resource, bss_resource;
51 51
52/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 52/* Check for some hardcoded bad areas that early boot is not allowed to touch */
53static inline int bad_addr(unsigned long *addrp, unsigned long size) 53static inline int bad_addr(unsigned long *addrp, unsigned long size)
@@ -225,6 +225,7 @@ void __init e820_reserve_resources(void)
225 */ 225 */
226 request_resource(res, &code_resource); 226 request_resource(res, &code_resource);
227 request_resource(res, &data_resource); 227 request_resource(res, &data_resource);
228 request_resource(res, &bss_resource);
228#ifdef CONFIG_KEXEC 229#ifdef CONFIG_KEXEC
229 if (crashk_res.start != crashk_res.end) 230 if (crashk_res.start != crashk_res.end)
230 request_resource(res, &crashk_res); 231 request_resource(res, &crashk_res);
@@ -729,3 +730,22 @@ __init void e820_setup_gap(void)
729 printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", 730 printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
730 pci_mem_start, gapstart, gapsize); 731 pci_mem_start, gapstart, gapsize);
731} 732}
733
734int __init arch_get_ram_range(int slot, u64 *addr, u64 *size)
735{
736 int i;
737
738 if (slot < 0 || slot >= e820.nr_map)
739 return -1;
740 for (i = slot; i < e820.nr_map; i++) {
741 if (e820.map[i].type != E820_RAM)
742 continue;
743 break;
744 }
745 if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT))
746 return -1;
747 *addr = e820.map[i].addr;
748 *size = min_t(u64, e820.map[i].size + e820.map[i].addr,
749 max_pfn << PAGE_SHIFT) - *addr;
750 return i + 1;
751}
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
index b42558c48e9d..e2be78f49399 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/kernel/efi_32.c
@@ -603,7 +603,8 @@ void __init efi_enter_virtual_mode(void)
603 603
604void __init 604void __init
605efi_initialize_iomem_resources(struct resource *code_resource, 605efi_initialize_iomem_resources(struct resource *code_resource,
606 struct resource *data_resource) 606 struct resource *data_resource,
607 struct resource *bss_resource)
607{ 608{
608 struct resource *res; 609 struct resource *res;
609 efi_memory_desc_t *md; 610 efi_memory_desc_t *md;
@@ -675,6 +676,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
675 if (md->type == EFI_CONVENTIONAL_MEMORY) { 676 if (md->type == EFI_CONVENTIONAL_MEMORY) {
676 request_resource(res, code_resource); 677 request_resource(res, code_resource);
677 request_resource(res, data_resource); 678 request_resource(res, data_resource);
679 request_resource(res, bss_resource);
678#ifdef CONFIG_KEXEC 680#ifdef CONFIG_KEXEC
679 request_resource(res, &crashk_res); 681 request_resource(res, &crashk_res);
680#endif 682#endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 39677965e161..00b1c2c56454 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -79,22 +79,30 @@ INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_
79 */ 79 */
80.section .text.head,"ax",@progbits 80.section .text.head,"ax",@progbits
81ENTRY(startup_32) 81ENTRY(startup_32)
82 /* check to see if KEEP_SEGMENTS flag is meaningful */
83 cmpw $0x207, BP_version(%esi)
84 jb 1f
85
86 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
87 us to not reload segments */
88 testb $(1<<6), BP_loadflags(%esi)
89 jnz 2f
82 90
83/* 91/*
84 * Set segments to known values. 92 * Set segments to known values.
85 */ 93 */
86 cld 941: lgdt boot_gdt_descr - __PAGE_OFFSET
87 lgdt boot_gdt_descr - __PAGE_OFFSET
88 movl $(__BOOT_DS),%eax 95 movl $(__BOOT_DS),%eax
89 movl %eax,%ds 96 movl %eax,%ds
90 movl %eax,%es 97 movl %eax,%es
91 movl %eax,%fs 98 movl %eax,%fs
92 movl %eax,%gs 99 movl %eax,%gs
1002:
93 101
94/* 102/*
95 * Clear BSS first so that there are no surprises... 103 * Clear BSS first so that there are no surprises...
96 * No need to cld as DF is already clear from cld above...
97 */ 104 */
105 cld
98 xorl %eax,%eax 106 xorl %eax,%eax
99 movl $__bss_start - __PAGE_OFFSET,%edi 107 movl $__bss_start - __PAGE_OFFSET,%edi
100 movl $__bss_stop - __PAGE_OFFSET,%ecx 108 movl $__bss_stop - __PAGE_OFFSET,%ecx
@@ -128,6 +136,35 @@ ENTRY(startup_32)
128 movsl 136 movsl
1291: 1371:
130 138
139#ifdef CONFIG_PARAVIRT
140 cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET)
141 jb default_entry
142
143 /* Paravirt-compatible boot parameters. Look to see what architecture
144 we're booting under. */
145 movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax
146 cmpl $num_subarch_entries, %eax
147 jae bad_subarch
148
149 movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax
150 subl $__PAGE_OFFSET, %eax
151 jmp *%eax
152
153bad_subarch:
154WEAK(lguest_entry)
155WEAK(xen_entry)
156 /* Unknown implementation; there's really
157 nothing we can do at this point. */
158 ud2a
159.data
160subarch_entries:
161 .long default_entry /* normal x86/PC */
162 .long lguest_entry /* lguest hypervisor */
163 .long xen_entry /* Xen hypervisor */
164num_subarch_entries = (. - subarch_entries) / 4
165.previous
166#endif /* CONFIG_PARAVIRT */
167
131/* 168/*
132 * Initialize page tables. This creates a PDE and a set of page 169 * Initialize page tables. This creates a PDE and a set of page
133 * tables, which are located immediately beyond _end. The variable 170 * tables, which are located immediately beyond _end. The variable
@@ -140,6 +177,7 @@ ENTRY(startup_32)
140 */ 177 */
141page_pde_offset = (__PAGE_OFFSET >> 20); 178page_pde_offset = (__PAGE_OFFSET >> 20);
142 179
180default_entry:
143 movl $(pg0 - __PAGE_OFFSET), %edi 181 movl $(pg0 - __PAGE_OFFSET), %edi
144 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 182 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
145 movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 183 movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index b3c2d268d708..953328b55a30 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -31,6 +31,7 @@
31#include <linux/sysdev.h> 31#include <linux/sysdev.h>
32#include <linux/msi.h> 32#include <linux/msi.h>
33#include <linux/htirq.h> 33#include <linux/htirq.h>
34#include <linux/dmar.h>
34#ifdef CONFIG_ACPI 35#ifdef CONFIG_ACPI
35#include <acpi/acpi_bus.h> 36#include <acpi/acpi_bus.h>
36#endif 37#endif
@@ -2031,8 +2032,64 @@ void arch_teardown_msi_irq(unsigned int irq)
2031 destroy_irq(irq); 2032 destroy_irq(irq);
2032} 2033}
2033 2034
2034#endif /* CONFIG_PCI_MSI */ 2035#ifdef CONFIG_DMAR
2036#ifdef CONFIG_SMP
2037static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2038{
2039 struct irq_cfg *cfg = irq_cfg + irq;
2040 struct msi_msg msg;
2041 unsigned int dest;
2042 cpumask_t tmp;
2043
2044 cpus_and(tmp, mask, cpu_online_map);
2045 if (cpus_empty(tmp))
2046 return;
2047
2048 if (assign_irq_vector(irq, mask))
2049 return;
2050
2051 cpus_and(tmp, cfg->domain, mask);
2052 dest = cpu_mask_to_apicid(tmp);
2053
2054 dmar_msi_read(irq, &msg);
2055
2056 msg.data &= ~MSI_DATA_VECTOR_MASK;
2057 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2058 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2059 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2060
2061 dmar_msi_write(irq, &msg);
2062 irq_desc[irq].affinity = mask;
2063}
2064#endif /* CONFIG_SMP */
2065
2066struct irq_chip dmar_msi_type = {
2067 .name = "DMAR_MSI",
2068 .unmask = dmar_msi_unmask,
2069 .mask = dmar_msi_mask,
2070 .ack = ack_apic_edge,
2071#ifdef CONFIG_SMP
2072 .set_affinity = dmar_msi_set_affinity,
2073#endif
2074 .retrigger = ioapic_retrigger_irq,
2075};
2076
2077int arch_setup_dmar_msi(unsigned int irq)
2078{
2079 int ret;
2080 struct msi_msg msg;
2081
2082 ret = msi_compose_msg(NULL, irq, &msg);
2083 if (ret < 0)
2084 return ret;
2085 dmar_msi_write(irq, &msg);
2086 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2087 "edge");
2088 return 0;
2089}
2090#endif
2035 2091
2092#endif /* CONFIG_PCI_MSI */
2036/* 2093/*
2037 * Hypertransport interrupt support 2094 * Hypertransport interrupt support
2038 */ 2095 */
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 5098f58063a5..1a20fe31338b 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -411,8 +411,10 @@ static int calgary_nontranslate_map_sg(struct device* dev,
411 int i; 411 int i;
412 412
413 for_each_sg(sg, s, nelems, i) { 413 for_each_sg(sg, s, nelems, i) {
414 BUG_ON(!s->page); 414 struct page *p = sg_page(s);
415 s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 415
416 BUG_ON(!p);
417 s->dma_address = virt_to_bus(sg_virt(s));
416 s->dma_length = s->length; 418 s->dma_length = s->length;
417 } 419 }
418 return nelems; 420 return nelems;
@@ -432,9 +434,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
432 return calgary_nontranslate_map_sg(dev, sg, nelems, direction); 434 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
433 435
434 for_each_sg(sg, s, nelems, i) { 436 for_each_sg(sg, s, nelems, i) {
435 BUG_ON(!s->page); 437 BUG_ON(!sg_page(s));
436 438
437 vaddr = (unsigned long)page_address(s->page) + s->offset; 439 vaddr = (unsigned long) sg_virt(s);
438 npages = num_dma_pages(vaddr, s->length); 440 npages = num_dma_pages(vaddr, s->length);
439 441
440 entry = iommu_range_alloc(tbl, npages); 442 entry = iommu_range_alloc(tbl, npages);
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index afaf9f12c032..393e2725a6e3 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -7,6 +7,7 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/dmar.h>
10#include <asm/io.h> 11#include <asm/io.h>
11#include <asm/iommu.h> 12#include <asm/iommu.h>
12#include <asm/calgary.h> 13#include <asm/calgary.h>
@@ -305,6 +306,8 @@ void __init pci_iommu_alloc(void)
305 detect_calgary(); 306 detect_calgary();
306#endif 307#endif
307 308
309 detect_intel_iommu();
310
308#ifdef CONFIG_SWIOTLB 311#ifdef CONFIG_SWIOTLB
309 pci_swiotlb_init(); 312 pci_swiotlb_init();
310#endif 313#endif
@@ -316,6 +319,8 @@ static int __init pci_iommu_init(void)
316 calgary_iommu_init(); 319 calgary_iommu_init();
317#endif 320#endif
318 321
322 intel_iommu_init();
323
319#ifdef CONFIG_IOMMU 324#ifdef CONFIG_IOMMU
320 gart_iommu_init(); 325 gart_iommu_init();
321#endif 326#endif
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 5cdfab65e93f..c56e9ee64964 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -302,7 +302,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
302#endif 302#endif
303 303
304 for_each_sg(sg, s, nents, i) { 304 for_each_sg(sg, s, nents, i) {
305 unsigned long addr = page_to_phys(s->page) + s->offset; 305 unsigned long addr = sg_phys(s);
306 if (nonforced_iommu(dev, addr, s->length)) { 306 if (nonforced_iommu(dev, addr, s->length)) {
307 addr = dma_map_area(dev, addr, s->length, dir); 307 addr = dma_map_area(dev, addr, s->length, dir);
308 if (addr == bad_dma_address) { 308 if (addr == bad_dma_address) {
@@ -397,7 +397,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
397 start_sg = sgmap = sg; 397 start_sg = sgmap = sg;
398 ps = NULL; /* shut up gcc */ 398 ps = NULL; /* shut up gcc */
399 for_each_sg(sg, s, nents, i) { 399 for_each_sg(sg, s, nents, i) {
400 dma_addr_t addr = page_to_phys(s->page) + s->offset; 400 dma_addr_t addr = sg_phys(s);
401 s->dma_address = addr; 401 s->dma_address = addr;
402 BUG_ON(s->length == 0); 402 BUG_ON(s->length == 0);
403 403
diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
index e85d4360360c..faf70bdca335 100644
--- a/arch/x86/kernel/pci-nommu_64.c
+++ b/arch/x86/kernel/pci-nommu_64.c
@@ -62,8 +62,8 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
62 int i; 62 int i;
63 63
64 for_each_sg(sg, s, nents, i) { 64 for_each_sg(sg, s, nents, i) {
65 BUG_ON(!s->page); 65 BUG_ON(!sg_page(s));
66 s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 66 s->dma_address = virt_to_bus(sg_virt(s));
67 if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) 67 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
68 return 0; 68 return 0;
69 s->dma_length = s->length; 69 s->dma_length = s->length;
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index ba2e165a8a0f..cc0e91447b76 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -60,6 +60,7 @@
60#include <asm/vmi.h> 60#include <asm/vmi.h>
61#include <setup_arch.h> 61#include <setup_arch.h>
62#include <bios_ebda.h> 62#include <bios_ebda.h>
63#include <asm/cacheflush.h>
63 64
64/* This value is set up by the early boot code to point to the value 65/* This value is set up by the early boot code to point to the value
65 immediately after the boot time page tables. It contains a *physical* 66 immediately after the boot time page tables. It contains a *physical*
@@ -73,6 +74,7 @@ int disable_pse __devinitdata = 0;
73 */ 74 */
74extern struct resource code_resource; 75extern struct resource code_resource;
75extern struct resource data_resource; 76extern struct resource data_resource;
77extern struct resource bss_resource;
76 78
77/* cpu data as detected by the assembly code in head.S */ 79/* cpu data as detected by the assembly code in head.S */
78struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 80struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
@@ -600,6 +602,8 @@ void __init setup_arch(char **cmdline_p)
600 code_resource.end = virt_to_phys(_etext)-1; 602 code_resource.end = virt_to_phys(_etext)-1;
601 data_resource.start = virt_to_phys(_etext); 603 data_resource.start = virt_to_phys(_etext);
602 data_resource.end = virt_to_phys(_edata)-1; 604 data_resource.end = virt_to_phys(_edata)-1;
605 bss_resource.start = virt_to_phys(&__bss_start);
606 bss_resource.end = virt_to_phys(&__bss_stop)-1;
603 607
604 parse_early_param(); 608 parse_early_param();
605 609
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 31322d42eaae..e7a9e36bd52d 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -58,6 +58,7 @@
58#include <asm/numa.h> 58#include <asm/numa.h>
59#include <asm/sections.h> 59#include <asm/sections.h>
60#include <asm/dmi.h> 60#include <asm/dmi.h>
61#include <asm/cacheflush.h>
61 62
62/* 63/*
63 * Machine setup.. 64 * Machine setup..
@@ -133,6 +134,12 @@ struct resource code_resource = {
133 .end = 0, 134 .end = 0,
134 .flags = IORESOURCE_RAM, 135 .flags = IORESOURCE_RAM,
135}; 136};
137struct resource bss_resource = {
138 .name = "Kernel bss",
139 .start = 0,
140 .end = 0,
141 .flags = IORESOURCE_RAM,
142};
136 143
137#ifdef CONFIG_PROC_VMCORE 144#ifdef CONFIG_PROC_VMCORE
138/* elfcorehdr= specifies the location of elf core header 145/* elfcorehdr= specifies the location of elf core header
@@ -276,6 +283,8 @@ void __init setup_arch(char **cmdline_p)
276 code_resource.end = virt_to_phys(&_etext)-1; 283 code_resource.end = virt_to_phys(&_etext)-1;
277 data_resource.start = virt_to_phys(&_etext); 284 data_resource.start = virt_to_phys(&_etext);
278 data_resource.end = virt_to_phys(&_edata)-1; 285 data_resource.end = virt_to_phys(&_edata)-1;
286 bss_resource.start = virt_to_phys(&__bss_start);
287 bss_resource.end = virt_to_phys(&__bss_stop)-1;
279 288
280 early_identify_cpu(&boot_cpu_data); 289 early_identify_cpu(&boot_cpu_data);
281 290
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index c7b7dfe1d405..c40afbaaf93d 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -61,10 +61,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
61 return base; 61 return base;
62} 62}
63 63
64static void cache_flush_page(void *adr) 64void clflush_cache_range(void *adr, int size)
65{ 65{
66 int i; 66 int i;
67 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 67 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
68 clflush(adr+i); 68 clflush(adr+i);
69} 69}
70 70
@@ -80,7 +80,7 @@ static void flush_kernel_map(void *arg)
80 asm volatile("wbinvd" ::: "memory"); 80 asm volatile("wbinvd" ::: "memory");
81 else list_for_each_entry(pg, l, lru) { 81 else list_for_each_entry(pg, l, lru) {
82 void *adr = page_address(pg); 82 void *adr = page_address(pg);
83 cache_flush_page(adr); 83 clflush_cache_range(adr, PAGE_SIZE);
84 } 84 }
85 __flush_tlb_all(); 85 __flush_tlb_all();
86} 86}
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index aab25f3ba3ce..c2d24991bb2b 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -750,6 +750,38 @@ config PCI_DOMAINS
750 depends on PCI 750 depends on PCI
751 default y 751 default y
752 752
753config DMAR
754 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
755 depends on PCI_MSI && ACPI && EXPERIMENTAL
756 default y
757 help
758 DMA remapping (DMAR) devices support enables independent address
759 translations for Direct Memory Access (DMA) from devices.
760 These DMA remapping devices are reported via ACPI tables
761 and include PCI device scope covered by these DMA
762 remapping devices.
763
764config DMAR_GFX_WA
765 bool "Support for Graphics workaround"
766 depends on DMAR
767 default y
768 help
769 Current Graphics drivers tend to use physical address
770 for DMA and avoid using DMA APIs. Setting this config
771 option permits the IOMMU driver to set a unity map for
772 all the OS-visible memory. Hence the driver can continue
773 to use physical addresses for DMA.
774
775config DMAR_FLOPPY_WA
776 bool
777 depends on DMAR
778 default y
779 help
780 Floppy disk drivers are know to bypass DMA API calls
781 thereby failing to work when IOMMU is enabled. This
782 workaround will setup a 1:1 mapping for the first
783 16M to make floppy (an ISA device) work.
784
753source "drivers/pci/pcie/Kconfig" 785source "drivers/pci/pcie/Kconfig"
754 786
755source "drivers/pci/Kconfig" 787source "drivers/pci/Kconfig"
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 8025d646ab30..de5ba479c224 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1351,11 +1351,22 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1351new_segment: 1351new_segment:
1352 if (!sg) 1352 if (!sg)
1353 sg = sglist; 1353 sg = sglist;
1354 else 1354 else {
1355 /*
1356 * If the driver previously mapped a shorter
1357 * list, we could see a termination bit
1358 * prematurely unless it fully inits the sg
1359 * table on each mapping. We KNOW that there
1360 * must be more entries here or the driver
1361 * would be buggy, so force clear the
1362 * termination bit to avoid doing a full
1363 * sg_init_table() in drivers for each command.
1364 */
1365 sg->page_link &= ~0x02;
1355 sg = sg_next(sg); 1366 sg = sg_next(sg);
1367 }
1356 1368
1357 memset(sg, 0, sizeof(*sg)); 1369 sg_set_page(sg, bvec->bv_page);
1358 sg->page = bvec->bv_page;
1359 sg->length = nbytes; 1370 sg->length = nbytes;
1360 sg->offset = bvec->bv_offset; 1371 sg->offset = bvec->bv_offset;
1361 nsegs++; 1372 nsegs++;
@@ -1363,6 +1374,9 @@ new_segment:
1363 bvprv = bvec; 1374 bvprv = bvec;
1364 } /* segments in rq */ 1375 } /* segments in rq */
1365 1376
1377 if (sg)
1378 __sg_mark_end(sg);
1379
1366 return nsegs; 1380 return nsegs;
1367} 1381}
1368 1382
diff --git a/crypto/digest.c b/crypto/digest.c
index e56de6748b15..8871dec8cae7 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -41,7 +41,7 @@ static int update2(struct hash_desc *desc,
41 return 0; 41 return 0;
42 42
43 for (;;) { 43 for (;;) {
44 struct page *pg = sg->page; 44 struct page *pg = sg_page(sg);
45 unsigned int offset = sg->offset; 45 unsigned int offset = sg->offset;
46 unsigned int l = sg->length; 46 unsigned int l = sg->length;
47 47
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 8802fb6dd5a6..e4eb6ac53b5c 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -159,7 +159,8 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
159 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 159 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
160 160
161 sg_set_buf(sg1, ipad, bs); 161 sg_set_buf(sg1, ipad, bs);
162 sg1[1].page = (void *)sg; 162
163 sg_set_page(&sg[1], (void *) sg);
163 sg1[1].length = 0; 164 sg1[1].length = 0;
164 sg_set_buf(sg2, opad, bs + ds); 165 sg_set_buf(sg2, opad, bs + ds);
165 166
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index d6852c33cfb7..b9bbda0bb9f9 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -54,7 +54,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
54 if (out) { 54 if (out) {
55 struct page *page; 55 struct page *page;
56 56
57 page = walk->sg->page + ((walk->offset - 1) >> PAGE_SHIFT); 57 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
58 flush_dcache_page(page); 58 flush_dcache_page(page);
59 } 59 }
60 60
diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h
index 9c73e37a42ce..87ed681cceba 100644
--- a/crypto/scatterwalk.h
+++ b/crypto/scatterwalk.h
@@ -22,13 +22,13 @@
22 22
23static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) 23static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
24{ 24{
25 return (++sg)->length ? sg : (void *)sg->page; 25 return (++sg)->length ? sg : (void *) sg_page(sg);
26} 26}
27 27
28static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, 28static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
29 struct scatter_walk *walk_out) 29 struct scatter_walk *walk_out)
30{ 30{
31 return !(((walk_in->sg->page - walk_out->sg->page) << PAGE_SHIFT) + 31 return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
32 (int)(walk_in->offset - walk_out->offset)); 32 (int)(walk_in->offset - walk_out->offset));
33} 33}
34 34
@@ -60,7 +60,7 @@ static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
60 60
61static inline struct page *scatterwalk_page(struct scatter_walk *walk) 61static inline struct page *scatterwalk_page(struct scatter_walk *walk)
62{ 62{
63 return walk->sg->page + (walk->offset >> PAGE_SHIFT); 63 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
64} 64}
65 65
66static inline void scatterwalk_unmap(void *vaddr, int out) 66static inline void scatterwalk_unmap(void *vaddr, int out)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 18d489c8b935..d741c63af42c 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -317,7 +317,7 @@ static void test_cipher(char *algo, int enc,
317 goto out; 317 goto out;
318 } 318 }
319 319
320 q = kmap(sg[0].page) + sg[0].offset; 320 q = kmap(sg_page(&sg[0])) + sg[0].offset;
321 hexdump(q, cipher_tv[i].rlen); 321 hexdump(q, cipher_tv[i].rlen);
322 322
323 printk("%s\n", 323 printk("%s\n",
@@ -390,7 +390,7 @@ static void test_cipher(char *algo, int enc,
390 temp = 0; 390 temp = 0;
391 for (k = 0; k < cipher_tv[i].np; k++) { 391 for (k = 0; k < cipher_tv[i].np; k++) {
392 printk("page %u\n", k); 392 printk("page %u\n", k);
393 q = kmap(sg[k].page) + sg[k].offset; 393 q = kmap(sg_page(&sg[k])) + sg[k].offset;
394 hexdump(q, cipher_tv[i].tap[k]); 394 hexdump(q, cipher_tv[i].tap[k]);
395 printk("%s\n", 395 printk("%s\n",
396 memcmp(q, cipher_tv[i].result + temp, 396 memcmp(q, cipher_tv[i].result + temp,
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 9f502b86e0ea..ac68f3b62fde 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -120,7 +120,7 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
120 120
121 do { 121 do {
122 122
123 struct page *pg = sg[i].page; 123 struct page *pg = sg_page(&sg[i]);
124 unsigned int offset = sg[i].offset; 124 unsigned int offset = sg[i].offset;
125 unsigned int slen = sg[i].length; 125 unsigned int slen = sg[i].length;
126 126
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 629eadbd0ec0..69092bce1ada 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4296,7 +4296,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4296 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len; 4296 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4297 if (pad_buf) { 4297 if (pad_buf) {
4298 struct scatterlist *psg = &qc->pad_sgent; 4298 struct scatterlist *psg = &qc->pad_sgent;
4299 void *addr = kmap_atomic(psg->page, KM_IRQ0); 4299 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4300 memcpy(addr + psg->offset, pad_buf, qc->pad_len); 4300 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4301 kunmap_atomic(addr, KM_IRQ0); 4301 kunmap_atomic(addr, KM_IRQ0);
4302 } 4302 }
@@ -4686,11 +4686,11 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
4686 * data in this function or read data in ata_sg_clean. 4686 * data in this function or read data in ata_sg_clean.
4687 */ 4687 */
4688 offset = lsg->offset + lsg->length - qc->pad_len; 4688 offset = lsg->offset + lsg->length - qc->pad_len;
4689 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); 4689 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
4690 psg->offset = offset_in_page(offset); 4690 psg->offset = offset_in_page(offset);
4691 4691
4692 if (qc->tf.flags & ATA_TFLAG_WRITE) { 4692 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4693 void *addr = kmap_atomic(psg->page, KM_IRQ0); 4693 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4694 memcpy(pad_buf, addr + psg->offset, qc->pad_len); 4694 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4695 kunmap_atomic(addr, KM_IRQ0); 4695 kunmap_atomic(addr, KM_IRQ0);
4696 } 4696 }
@@ -4836,7 +4836,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
4836 if (qc->curbytes == qc->nbytes - qc->sect_size) 4836 if (qc->curbytes == qc->nbytes - qc->sect_size)
4837 ap->hsm_task_state = HSM_ST_LAST; 4837 ap->hsm_task_state = HSM_ST_LAST;
4838 4838
4839 page = qc->cursg->page; 4839 page = sg_page(qc->cursg);
4840 offset = qc->cursg->offset + qc->cursg_ofs; 4840 offset = qc->cursg->offset + qc->cursg_ofs;
4841 4841
4842 /* get the current page and offset */ 4842 /* get the current page and offset */
@@ -4988,7 +4988,7 @@ next_sg:
4988 4988
4989 sg = qc->cursg; 4989 sg = qc->cursg;
4990 4990
4991 page = sg->page; 4991 page = sg_page(sg);
4992 offset = sg->offset + qc->cursg_ofs; 4992 offset = sg->offset + qc->cursg_ofs;
4993 4993
4994 /* get the current page and offset */ 4994 /* get the current page and offset */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9fbb39cd0f58..5b758b9ad0b8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1544,7 +1544,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1544 struct scatterlist *sg = scsi_sglist(cmd); 1544 struct scatterlist *sg = scsi_sglist(cmd);
1545 1545
1546 if (sg) { 1546 if (sg) {
1547 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1547 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1548 buflen = sg->length; 1548 buflen = sg->length;
1549 } else { 1549 } else {
1550 buf = NULL; 1550 buf = NULL;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c41d0728efe2..7868707c7eda 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -137,7 +137,7 @@ static ssize_t show_mem_state(struct sys_device *dev, char *buf)
137 return len; 137 return len;
138} 138}
139 139
140static inline int memory_notify(unsigned long val, void *v) 140int memory_notify(unsigned long val, void *v)
141{ 141{
142 return blocking_notifier_call_chain(&memory_chain, val, v); 142 return blocking_notifier_call_chain(&memory_chain, val, v);
143} 143}
@@ -183,7 +183,6 @@ memory_block_action(struct memory_block *mem, unsigned long action)
183 break; 183 break;
184 case MEM_OFFLINE: 184 case MEM_OFFLINE:
185 mem->state = MEM_GOING_OFFLINE; 185 mem->state = MEM_GOING_OFFLINE;
186 memory_notify(MEM_GOING_OFFLINE, NULL);
187 start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; 186 start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
188 ret = remove_memory(start_paddr, 187 ret = remove_memory(start_paddr,
189 PAGES_PER_SECTION << PAGE_SHIFT); 188 PAGES_PER_SECTION << PAGE_SHIFT);
@@ -191,7 +190,6 @@ memory_block_action(struct memory_block *mem, unsigned long action)
191 mem->state = old_state; 190 mem->state = old_state;
192 break; 191 break;
193 } 192 }
194 memory_notify(MEM_MAPPING_INVALID, NULL);
195 break; 193 break;
196 default: 194 default:
197 printk(KERN_WARNING "%s(%p, %ld) unknown action: %ld\n", 195 printk(KERN_WARNING "%s(%p, %ld) unknown action: %ld\n",
@@ -199,11 +197,6 @@ memory_block_action(struct memory_block *mem, unsigned long action)
199 WARN_ON(1); 197 WARN_ON(1);
200 ret = -EINVAL; 198 ret = -EINVAL;
201 } 199 }
202 /*
203 * For now, only notify on successful memory operations
204 */
205 if (!ret)
206 memory_notify(action, NULL);
207 200
208 return ret; 201 return ret;
209} 202}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 84d6aa500e26..53505422867c 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -345,6 +345,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
345 Command->V1.ScatterGatherList = 345 Command->V1.ScatterGatherList =
346 (DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU; 346 (DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
347 Command->V1.ScatterGatherListDMA = ScatterGatherDMA; 347 Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
348 sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
348 } else { 349 } else {
349 Command->cmd_sglist = Command->V2.ScatterList; 350 Command->cmd_sglist = Command->V2.ScatterList;
350 Command->V2.ScatterGatherList = 351 Command->V2.ScatterGatherList =
@@ -353,6 +354,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
353 Command->V2.RequestSense = 354 Command->V2.RequestSense =
354 (DAC960_SCSI_RequestSense_T *)RequestSenseCPU; 355 (DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
355 Command->V2.RequestSenseDMA = RequestSenseDMA; 356 Command->V2.RequestSenseDMA = RequestSenseDMA;
357 sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
356 } 358 }
357 } 359 }
358 return true; 360 return true;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 7c2cfde08f18..5a6fe17fc638 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2610,7 +2610,7 @@ static void do_cciss_request(struct request_queue *q)
2610 (int)creq->nr_sectors); 2610 (int)creq->nr_sectors);
2611#endif /* CCISS_DEBUG */ 2611#endif /* CCISS_DEBUG */
2612 2612
2613 memset(tmp_sg, 0, sizeof(tmp_sg)); 2613 sg_init_table(tmp_sg, MAXSGENTRIES);
2614 seg = blk_rq_map_sg(q, creq, tmp_sg); 2614 seg = blk_rq_map_sg(q, creq, tmp_sg);
2615 2615
2616 /* get the DMA records for the setup */ 2616 /* get the DMA records for the setup */
@@ -2621,7 +2621,7 @@ static void do_cciss_request(struct request_queue *q)
2621 2621
2622 for (i = 0; i < seg; i++) { 2622 for (i = 0; i < seg; i++) {
2623 c->SG[i].Len = tmp_sg[i].length; 2623 c->SG[i].Len = tmp_sg[i].length;
2624 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page, 2624 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2625 tmp_sg[i].offset, 2625 tmp_sg[i].offset,
2626 tmp_sg[i].length, dir); 2626 tmp_sg[i].length, dir);
2627 c->SG[i].Addr.lower = temp64.val32.lower; 2627 c->SG[i].Addr.lower = temp64.val32.lower;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 568603d3043e..efab27fa1083 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -918,6 +918,7 @@ queue_next:
918DBGPX( 918DBGPX(
919 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 919 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
920); 920);
921 sg_init_table(tmp_sg, SG_MAX);
921 seg = blk_rq_map_sg(q, creq, tmp_sg); 922 seg = blk_rq_map_sg(q, creq, tmp_sg);
922 923
923 /* Now do all the DMA Mappings */ 924 /* Now do all the DMA Mappings */
@@ -929,7 +930,7 @@ DBGPX(
929 { 930 {
930 c->req.sg[i].size = tmp_sg[i].length; 931 c->req.sg[i].size = tmp_sg[i].length;
931 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev, 932 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
932 tmp_sg[i].page, 933 sg_page(&tmp_sg[i]),
933 tmp_sg[i].offset, 934 tmp_sg[i].offset,
934 tmp_sg[i].length, dir); 935 tmp_sg[i].length, dir);
935 } 936 }
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 40535036e893..1b58b010797f 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -26,6 +26,7 @@
26#include <linux/crypto.h> 26#include <linux/crypto.h>
27#include <linux/blkdev.h> 27#include <linux/blkdev.h>
28#include <linux/loop.h> 28#include <linux/loop.h>
29#include <linux/scatterlist.h>
29#include <asm/semaphore.h> 30#include <asm/semaphore.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31 32
@@ -119,14 +120,17 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
119 .tfm = tfm, 120 .tfm = tfm,
120 .flags = CRYPTO_TFM_REQ_MAY_SLEEP, 121 .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
121 }; 122 };
122 struct scatterlist sg_out = { NULL, }; 123 struct scatterlist sg_out;
123 struct scatterlist sg_in = { NULL, }; 124 struct scatterlist sg_in;
124 125
125 encdec_cbc_t encdecfunc; 126 encdec_cbc_t encdecfunc;
126 struct page *in_page, *out_page; 127 struct page *in_page, *out_page;
127 unsigned in_offs, out_offs; 128 unsigned in_offs, out_offs;
128 int err; 129 int err;
129 130
131 sg_init_table(&sg_out, 1);
132 sg_init_table(&sg_in, 1);
133
130 if (cmd == READ) { 134 if (cmd == READ) {
131 in_page = raw_page; 135 in_page = raw_page;
132 in_offs = raw_off; 136 in_offs = raw_off;
@@ -146,11 +150,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
146 u32 iv[4] = { 0, }; 150 u32 iv[4] = { 0, };
147 iv[0] = cpu_to_le32(IV & 0xffffffff); 151 iv[0] = cpu_to_le32(IV & 0xffffffff);
148 152
149 sg_in.page = in_page; 153 sg_set_page(&sg_in, in_page);
150 sg_in.offset = in_offs; 154 sg_in.offset = in_offs;
151 sg_in.length = sz; 155 sg_in.length = sz;
152 156
153 sg_out.page = out_page; 157 sg_set_page(&sg_out, out_page);
154 sg_out.offset = out_offs; 158 sg_out.offset = out_offs;
155 sg_out.length = sz; 159 sg_out.length = sz;
156 160
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 317a790c153b..7276f7d207c2 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -388,6 +388,7 @@ static int __send_request(struct request *req)
388 op = VD_OP_BWRITE; 388 op = VD_OP_BWRITE;
389 } 389 }
390 390
391 sg_init_table(sg, port->ring_cookies);
391 nsg = blk_rq_map_sg(req->q, req, sg); 392 nsg = blk_rq_map_sg(req->q, req, sg);
392 393
393 len = 0; 394 len = 0;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 402209fec59a..282a69558e8a 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -522,6 +522,7 @@ static struct carm_request *carm_get_request(struct carm_host *host)
522 host->n_msgs++; 522 host->n_msgs++;
523 523
524 assert(host->n_msgs <= CARM_MAX_REQ); 524 assert(host->n_msgs <= CARM_MAX_REQ);
525 sg_init_table(crq->sg, CARM_MAX_REQ_SG);
525 return crq; 526 return crq;
526 } 527 }
527 528
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index c57dd2b3a0c8..14143f2c484d 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -25,6 +25,7 @@
25#include <linux/usb_usual.h> 25#include <linux/usb_usual.h>
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/scatterlist.h>
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29 30
30#define DRV_NAME "ub" 31#define DRV_NAME "ub"
@@ -656,6 +657,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
656 if ((cmd = ub_get_cmd(lun)) == NULL) 657 if ((cmd = ub_get_cmd(lun)) == NULL)
657 return -1; 658 return -1;
658 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 659 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
660 sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
659 661
660 blkdev_dequeue_request(rq); 662 blkdev_dequeue_request(rq);
661 663
@@ -1309,9 +1311,8 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1309 else 1311 else
1310 pipe = sc->send_bulk_pipe; 1312 pipe = sc->send_bulk_pipe;
1311 sc->last_pipe = pipe; 1313 sc->last_pipe = pipe;
1312 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, 1314 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1313 page_address(sg->page) + sg->offset, sg->length, 1315 sg->length, ub_urb_complete, sc);
1314 ub_urb_complete, sc);
1315 sc->work_urb.actual_length = 0; 1316 sc->work_urb.actual_length = 0;
1316 sc->work_urb.error_count = 0; 1317 sc->work_urb.error_count = 0;
1317 sc->work_urb.status = 0; 1318 sc->work_urb.status = 0;
@@ -1427,7 +1428,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1427 scmd->state = UB_CMDST_INIT; 1428 scmd->state = UB_CMDST_INIT;
1428 scmd->nsg = 1; 1429 scmd->nsg = 1;
1429 sg = &scmd->sgv[0]; 1430 sg = &scmd->sgv[0];
1430 sg->page = virt_to_page(sc->top_sense); 1431 sg_set_page(sg, virt_to_page(sc->top_sense));
1431 sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); 1432 sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
1432 sg->length = UB_SENSE_SIZE; 1433 sg->length = UB_SENSE_SIZE;
1433 scmd->len = UB_SENSE_SIZE; 1434 scmd->len = UB_SENSE_SIZE;
@@ -1863,7 +1864,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1863 cmd->state = UB_CMDST_INIT; 1864 cmd->state = UB_CMDST_INIT;
1864 cmd->nsg = 1; 1865 cmd->nsg = 1;
1865 sg = &cmd->sgv[0]; 1866 sg = &cmd->sgv[0];
1866 sg->page = virt_to_page(p); 1867 sg_set_page(sg, virt_to_page(p));
1867 sg->offset = (unsigned long)p & (PAGE_SIZE-1); 1868 sg->offset = (unsigned long)p & (PAGE_SIZE-1);
1868 sg->length = 8; 1869 sg->length = 8;
1869 cmd->len = 8; 1870 cmd->len = 8;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index e824b672e05a..ab5d404faa11 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -41,6 +41,7 @@
41#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/device.h> 43#include <linux/device.h>
44#include <linux/scatterlist.h>
44 45
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/vio.h> 47#include <asm/vio.h>
@@ -270,6 +271,7 @@ static int send_request(struct request *req)
270 d = req->rq_disk->private_data; 271 d = req->rq_disk->private_data;
271 272
272 /* Now build the scatter-gather list */ 273 /* Now build the scatter-gather list */
274 sg_init_table(sg, VIOMAXBLOCKDMA);
273 nsg = blk_rq_map_sg(req->q, req, sg); 275 nsg = blk_rq_map_sg(req->q, req, sg);
274 nsg = dma_map_sg(d->dev, sg, nsg, direction); 276 nsg = dma_map_sg(d->dev, sg, nsg, direction);
275 277
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index b9fbe6e7f9ae..075598e1c502 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -22,6 +22,30 @@ config BT_HCIUSB_SCO
22 22
23 Say Y here to compile support for SCO over HCI USB. 23 Say Y here to compile support for SCO over HCI USB.
24 24
25config BT_HCIBTUSB
26 tristate "HCI USB driver (alternate version)"
27 depends on USB && EXPERIMENTAL && BT_HCIUSB=n
28 help
29 Bluetooth HCI USB driver.
30 This driver is required if you want to use Bluetooth devices with
31 USB interface.
32
33 This driver is still experimental and has no SCO support.
34
35 Say Y here to compile support for Bluetooth USB devices into the
36 kernel or say M to compile it as module (btusb).
37
38config BT_HCIBTSDIO
39 tristate "HCI SDIO driver"
40 depends on MMC
41 help
42 Bluetooth HCI SDIO driver.
43 This driver is required if you want to use Bluetooth device with
44 SDIO interface.
45
46 Say Y here to compile support for Bluetooth SDIO devices into the
47 kernel or say M to compile it as module (btsdio).
48
25config BT_HCIUART 49config BT_HCIUART
26 tristate "HCI UART driver" 50 tristate "HCI UART driver"
27 help 51 help
@@ -55,6 +79,17 @@ config BT_HCIUART_BCSP
55 79
56 Say Y here to compile support for HCI BCSP protocol. 80 Say Y here to compile support for HCI BCSP protocol.
57 81
82config BT_HCIUART_LL
83 bool "HCILL protocol support"
84 depends on BT_HCIUART
85 help
86 HCILL (HCI Low Level) is a serial protocol for communication
87 between Bluetooth device and host. This protocol is required for
88 serial Bluetooth devices that are based on Texas Instruments'
89 BRF chips.
90
91 Say Y here to compile support for HCILL protocol.
92
58config BT_HCIBCM203X 93config BT_HCIBCM203X
59 tristate "HCI BCM203x USB driver" 94 tristate "HCI BCM203x USB driver"
60 depends on USB 95 depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 08c10e178e02..77444afbf107 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -13,7 +13,11 @@ obj-$(CONFIG_BT_HCIBT3C) += bt3c_cs.o
13obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o 13obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o
14obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o 14obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
15 15
16obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
17obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
18
16hci_uart-y := hci_ldisc.o 19hci_uart-y := hci_ldisc.o
17hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o 20hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
18hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o 21hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
22hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
19hci_uart-objs := $(hci_uart-y) 23hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 851de4d5b7de..bcf57927b7a8 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -503,10 +503,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst)
503 unsigned int iobase; 503 unsigned int iobase;
504 unsigned char reg; 504 unsigned char reg;
505 505
506 if (!info || !info->hdev) { 506 BUG_ON(!info->hdev);
507 BT_ERR("Call of irq %d for unknown device", irq);
508 return IRQ_NONE;
509 }
510 507
511 if (!test_bit(CARD_READY, &(info->hw_state))) 508 if (!test_bit(CARD_READY, &(info->hw_state)))
512 return IRQ_HANDLED; 509 return IRQ_HANDLED;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index e8ebd5d3de86..1375b5345a0a 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Digianswer Bluetooth USB driver 3 * Digianswer Bluetooth USB driver
4 * 4 *
5 * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org> 5 * Copyright (C) 2004-2007 Marcel Holtmann <marcel@holtmann.org>
6 * 6 *
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -21,13 +21,14 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/module.h>
25
26#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/slab.h> 27#include <linux/slab.h>
29#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <linux/skbuff.h>
31 32
32#include <linux/usb.h> 33#include <linux/usb.h>
33 34
@@ -39,7 +40,7 @@
39#define BT_DBG(D...) 40#define BT_DBG(D...)
40#endif 41#endif
41 42
42#define VERSION "0.8" 43#define VERSION "0.9"
43 44
44static int ignore = 0; 45static int ignore = 0;
45 46
@@ -52,393 +53,285 @@ static struct usb_device_id bpa10x_table[] = {
52 53
53MODULE_DEVICE_TABLE(usb, bpa10x_table); 54MODULE_DEVICE_TABLE(usb, bpa10x_table);
54 55
55#define BPA10X_CMD_EP 0x00
56#define BPA10X_EVT_EP 0x81
57#define BPA10X_TX_EP 0x02
58#define BPA10X_RX_EP 0x82
59
60#define BPA10X_CMD_BUF_SIZE 252
61#define BPA10X_EVT_BUF_SIZE 16
62#define BPA10X_TX_BUF_SIZE 384
63#define BPA10X_RX_BUF_SIZE 384
64
65struct bpa10x_data { 56struct bpa10x_data {
66 struct hci_dev *hdev; 57 struct hci_dev *hdev;
67 struct usb_device *udev; 58 struct usb_device *udev;
68 59
69 rwlock_t lock; 60 struct usb_anchor tx_anchor;
61 struct usb_anchor rx_anchor;
70 62
71 struct sk_buff_head cmd_queue; 63 struct sk_buff *rx_skb[2];
72 struct urb *cmd_urb;
73 struct urb *evt_urb;
74 struct sk_buff *evt_skb;
75 unsigned int evt_len;
76
77 struct sk_buff_head tx_queue;
78 struct urb *tx_urb;
79 struct urb *rx_urb;
80}; 64};
81 65
82#define HCI_VENDOR_HDR_SIZE 5 66#define HCI_VENDOR_HDR_SIZE 5
83 67
84struct hci_vendor_hdr { 68struct hci_vendor_hdr {
85 __u8 type; 69 __u8 type;
86 __le16 snum; 70 __le16 snum;
87 __le16 dlen; 71 __le16 dlen;
88} __attribute__ ((packed)); 72} __attribute__ ((packed));
89 73
90static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int count) 74static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
91{ 75{
92 struct hci_acl_hdr *ah; 76 struct bpa10x_data *data = hdev->driver_data;
93 struct hci_sco_hdr *sh; 77
94 struct hci_vendor_hdr *vh; 78 BT_DBG("%s queue %d buffer %p count %d", hdev->name,
95 struct sk_buff *skb; 79 queue, buf, count);
96 int len; 80
81 if (queue < 0 || queue > 1)
82 return -EILSEQ;
83
84 hdev->stat.byte_rx += count;
97 85
98 while (count) { 86 while (count) {
99 switch (*buf++) { 87 struct sk_buff *skb = data->rx_skb[queue];
100 case HCI_ACLDATA_PKT: 88 struct { __u8 type; int expect; } *scb;
101 ah = (struct hci_acl_hdr *) buf; 89 int type, len = 0;
102 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(ah->dlen);
103 skb = bt_skb_alloc(len, GFP_ATOMIC);
104 if (skb) {
105 memcpy(skb_put(skb, len), buf, len);
106 skb->dev = (void *) data->hdev;
107 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
108 hci_recv_frame(skb);
109 }
110 break;
111 90
112 case HCI_SCODATA_PKT: 91 if (!skb) {
113 sh = (struct hci_sco_hdr *) buf; 92 /* Start of the frame */
114 len = HCI_SCO_HDR_SIZE + sh->dlen; 93
115 skb = bt_skb_alloc(len, GFP_ATOMIC); 94 type = *((__u8 *) buf);
116 if (skb) { 95 count--; buf++;
117 memcpy(skb_put(skb, len), buf, len); 96
118 skb->dev = (void *) data->hdev; 97 switch (type) {
119 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 98 case HCI_EVENT_PKT:
120 hci_recv_frame(skb); 99 if (count >= HCI_EVENT_HDR_SIZE) {
100 struct hci_event_hdr *h = buf;
101 len = HCI_EVENT_HDR_SIZE + h->plen;
102 } else
103 return -EILSEQ;
104 break;
105
106 case HCI_ACLDATA_PKT:
107 if (count >= HCI_ACL_HDR_SIZE) {
108 struct hci_acl_hdr *h = buf;
109 len = HCI_ACL_HDR_SIZE +
110 __le16_to_cpu(h->dlen);
111 } else
112 return -EILSEQ;
113 break;
114
115 case HCI_SCODATA_PKT:
116 if (count >= HCI_SCO_HDR_SIZE) {
117 struct hci_sco_hdr *h = buf;
118 len = HCI_SCO_HDR_SIZE + h->dlen;
119 } else
120 return -EILSEQ;
121 break;
122
123 case HCI_VENDOR_PKT:
124 if (count >= HCI_VENDOR_HDR_SIZE) {
125 struct hci_vendor_hdr *h = buf;
126 len = HCI_VENDOR_HDR_SIZE +
127 __le16_to_cpu(h->dlen);
128 } else
129 return -EILSEQ;
130 break;
121 } 131 }
122 break;
123 132
124 case HCI_VENDOR_PKT:
125 vh = (struct hci_vendor_hdr *) buf;
126 len = HCI_VENDOR_HDR_SIZE + __le16_to_cpu(vh->dlen);
127 skb = bt_skb_alloc(len, GFP_ATOMIC); 133 skb = bt_skb_alloc(len, GFP_ATOMIC);
128 if (skb) { 134 if (!skb) {
129 memcpy(skb_put(skb, len), buf, len); 135 BT_ERR("%s no memory for packet", hdev->name);
130 skb->dev = (void *) data->hdev; 136 return -ENOMEM;
131 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
132 hci_recv_frame(skb);
133 } 137 }
134 break;
135
136 default:
137 len = count - 1;
138 break;
139 }
140 138
141 buf += len; 139 skb->dev = (void *) hdev;
142 count -= (len + 1);
143 }
144}
145
146static int bpa10x_recv_event(struct bpa10x_data *data, unsigned char *buf, int size)
147{
148 BT_DBG("data %p buf %p size %d", data, buf, size);
149 140
150 if (data->evt_skb) { 141 data->rx_skb[queue] = skb;
151 struct sk_buff *skb = data->evt_skb;
152 142
153 memcpy(skb_put(skb, size), buf, size); 143 scb = (void *) skb->cb;
144 scb->type = type;
145 scb->expect = len;
146 } else {
147 /* Continuation */
154 148
155 if (skb->len == data->evt_len) { 149 scb = (void *) skb->cb;
156 data->evt_skb = NULL; 150 len = scb->expect;
157 data->evt_len = 0;
158 hci_recv_frame(skb);
159 }
160 } else {
161 struct sk_buff *skb;
162 struct hci_event_hdr *hdr;
163 unsigned char pkt_type;
164 int pkt_len = 0;
165
166 if (size < HCI_EVENT_HDR_SIZE + 1) {
167 BT_ERR("%s event packet block with size %d is too short",
168 data->hdev->name, size);
169 return -EILSEQ;
170 } 151 }
171 152
172 pkt_type = *buf++; 153 len = min(len, count);
173 size--;
174
175 if (pkt_type != HCI_EVENT_PKT) {
176 BT_ERR("%s unexpected event packet start byte 0x%02x",
177 data->hdev->name, pkt_type);
178 return -EPROTO;
179 }
180 154
181 hdr = (struct hci_event_hdr *) buf; 155 memcpy(skb_put(skb, len), buf, len);
182 pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen;
183 156
184 skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); 157 scb->expect -= len;
185 if (!skb) {
186 BT_ERR("%s no memory for new event packet",
187 data->hdev->name);
188 return -ENOMEM;
189 }
190 158
191 skb->dev = (void *) data->hdev; 159 if (scb->expect == 0) {
192 bt_cb(skb)->pkt_type = pkt_type; 160 /* Complete frame */
193 161
194 memcpy(skb_put(skb, size), buf, size); 162 data->rx_skb[queue] = NULL;
195 163
196 if (pkt_len == size) { 164 bt_cb(skb)->pkt_type = scb->type;
197 hci_recv_frame(skb); 165 hci_recv_frame(skb);
198 } else {
199 data->evt_skb = skb;
200 data->evt_len = pkt_len;
201 } 166 }
167
168 count -= len; buf += len;
202 } 169 }
203 170
204 return 0; 171 return 0;
205} 172}
206 173
207static void bpa10x_wakeup(struct bpa10x_data *data) 174static void bpa10x_tx_complete(struct urb *urb)
208{ 175{
209 struct urb *urb; 176 struct sk_buff *skb = urb->context;
210 struct sk_buff *skb; 177 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
211 int err;
212 178
213 BT_DBG("data %p", data); 179 BT_DBG("%s urb %p status %d count %d", hdev->name,
180 urb, urb->status, urb->actual_length);
214 181
215 urb = data->cmd_urb; 182 if (!test_bit(HCI_RUNNING, &hdev->flags))
216 if (urb->status == -EINPROGRESS) 183 goto done;
217 skb = NULL; 184
185 if (!urb->status)
186 hdev->stat.byte_tx += urb->transfer_buffer_length;
218 else 187 else
219 skb = skb_dequeue(&data->cmd_queue); 188 hdev->stat.err_tx++;
220 189
221 if (skb) { 190done:
222 struct usb_ctrlrequest *cr; 191 kfree(urb->setup_packet);
223 192
224 if (skb->len > BPA10X_CMD_BUF_SIZE) { 193 kfree_skb(skb);
225 BT_ERR("%s command packet with size %d is too big", 194}
226 data->hdev->name, skb->len); 195
227 kfree_skb(skb); 196static void bpa10x_rx_complete(struct urb *urb)
228 return; 197{
229 } 198 struct hci_dev *hdev = urb->context;
199 struct bpa10x_data *data = hdev->driver_data;
200 int err;
230 201
231 cr = (struct usb_ctrlrequest *) urb->setup_packet; 202 BT_DBG("%s urb %p status %d count %d", hdev->name,
232 cr->wLength = __cpu_to_le16(skb->len); 203 urb, urb->status, urb->actual_length);
233 204
234 skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); 205 if (!test_bit(HCI_RUNNING, &hdev->flags))
235 urb->transfer_buffer_length = skb->len; 206 return;
236 207
237 err = usb_submit_urb(urb, GFP_ATOMIC); 208 if (urb->status == 0) {
238 if (err < 0 && err != -ENODEV) { 209 if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe),
239 BT_ERR("%s submit failed for command urb %p with error %d", 210 urb->transfer_buffer,
240 data->hdev->name, urb, err); 211 urb->actual_length) < 0) {
241 skb_queue_head(&data->cmd_queue, skb); 212 BT_ERR("%s corrupted event packet", hdev->name);
242 } else 213 hdev->stat.err_rx++;
243 kfree_skb(skb); 214 }
244 } 215 }
245 216
246 urb = data->tx_urb; 217 usb_anchor_urb(urb, &data->rx_anchor);
247 if (urb->status == -EINPROGRESS) 218
248 skb = NULL; 219 err = usb_submit_urb(urb, GFP_ATOMIC);
249 else 220 if (err < 0) {
250 skb = skb_dequeue(&data->tx_queue); 221 BT_ERR("%s urb %p failed to resubmit (%d)",
251 222 hdev->name, urb, -err);
252 if (skb) { 223 usb_unanchor_urb(urb);
253 skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
254 urb->transfer_buffer_length = skb->len;
255
256 err = usb_submit_urb(urb, GFP_ATOMIC);
257 if (err < 0 && err != -ENODEV) {
258 BT_ERR("%s submit failed for command urb %p with error %d",
259 data->hdev->name, urb, err);
260 skb_queue_head(&data->tx_queue, skb);
261 } else
262 kfree_skb(skb);
263 } 224 }
264} 225}
265 226
266static void bpa10x_complete(struct urb *urb) 227static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
267{ 228{
268 struct bpa10x_data *data = urb->context; 229 struct bpa10x_data *data = hdev->driver_data;
269 unsigned char *buf = urb->transfer_buffer; 230 struct urb *urb;
270 int err, count = urb->actual_length; 231 unsigned char *buf;
232 unsigned int pipe;
233 int err, size = 16;
271 234
272 BT_DBG("data %p urb %p buf %p count %d", data, urb, buf, count); 235 BT_DBG("%s", hdev->name);
273 236
274 read_lock(&data->lock); 237 urb = usb_alloc_urb(0, GFP_KERNEL);
238 if (!urb)
239 return -ENOMEM;
275 240
276 if (!test_bit(HCI_RUNNING, &data->hdev->flags)) 241 buf = kmalloc(size, GFP_KERNEL);
277 goto unlock; 242 if (!buf) {
243 usb_free_urb(urb);
244 return -ENOMEM;
245 }
278 246
279 if (urb->status < 0 || !count) 247 pipe = usb_rcvintpipe(data->udev, 0x81);
280 goto resubmit;
281 248
282 if (usb_pipein(urb->pipe)) { 249 usb_fill_int_urb(urb, data->udev, pipe, buf, size,
283 data->hdev->stat.byte_rx += count; 250 bpa10x_rx_complete, hdev, 1);
284 251
285 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) 252 urb->transfer_flags |= URB_FREE_BUFFER;
286 bpa10x_recv_event(data, buf, count);
287 253
288 if (usb_pipetype(urb->pipe) == PIPE_BULK) 254 usb_anchor_urb(urb, &data->rx_anchor);
289 bpa10x_recv_bulk(data, buf, count);
290 } else {
291 data->hdev->stat.byte_tx += count;
292 255
293 bpa10x_wakeup(data); 256 err = usb_submit_urb(urb, GFP_KERNEL);
257 if (err < 0) {
258 BT_ERR("%s urb %p submission failed (%d)",
259 hdev->name, urb, -err);
260 usb_unanchor_urb(urb);
261 kfree(buf);
294 } 262 }
295 263
296resubmit: 264 usb_free_urb(urb);
297 if (usb_pipein(urb->pipe)) {
298 err = usb_submit_urb(urb, GFP_ATOMIC);
299 if (err < 0 && err != -ENODEV) {
300 BT_ERR("%s urb %p type %d resubmit status %d",
301 data->hdev->name, urb, usb_pipetype(urb->pipe), err);
302 }
303 }
304 265
305unlock: 266 return err;
306 read_unlock(&data->lock);
307} 267}
308 268
309static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, 269static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
310 size_t size, gfp_t flags, void *data)
311{ 270{
271 struct bpa10x_data *data = hdev->driver_data;
312 struct urb *urb; 272 struct urb *urb;
313 struct usb_ctrlrequest *cr;
314 unsigned char *buf; 273 unsigned char *buf;
274 unsigned int pipe;
275 int err, size = 64;
315 276
316 BT_DBG("udev %p data %p", udev, data); 277 BT_DBG("%s", hdev->name);
317 278
318 urb = usb_alloc_urb(0, flags); 279 urb = usb_alloc_urb(0, GFP_KERNEL);
319 if (!urb) 280 if (!urb)
320 return NULL; 281 return -ENOMEM;
321 282
322 buf = kmalloc(size, flags); 283 buf = kmalloc(size, GFP_KERNEL);
323 if (!buf) { 284 if (!buf) {
324 usb_free_urb(urb); 285 usb_free_urb(urb);
325 return NULL; 286 return -ENOMEM;
326 } 287 }
327 288
328 switch (usb_pipetype(pipe)) { 289 pipe = usb_rcvbulkpipe(data->udev, 0x82);
329 case PIPE_CONTROL:
330 cr = kmalloc(sizeof(*cr), flags);
331 if (!cr) {
332 kfree(buf);
333 usb_free_urb(urb);
334 return NULL;
335 }
336 290
337 cr->bRequestType = USB_TYPE_VENDOR; 291 usb_fill_bulk_urb(urb, data->udev, pipe,
338 cr->bRequest = 0; 292 buf, size, bpa10x_rx_complete, hdev);
339 cr->wIndex = 0;
340 cr->wValue = 0;
341 cr->wLength = __cpu_to_le16(0);
342 293
343 usb_fill_control_urb(urb, udev, pipe, (void *) cr, buf, 0, bpa10x_complete, data); 294 urb->transfer_flags |= URB_FREE_BUFFER;
344 break;
345 295
346 case PIPE_INTERRUPT: 296 usb_anchor_urb(urb, &data->rx_anchor);
347 usb_fill_int_urb(urb, udev, pipe, buf, size, bpa10x_complete, data, 1);
348 break;
349 297
350 case PIPE_BULK: 298 err = usb_submit_urb(urb, GFP_KERNEL);
351 usb_fill_bulk_urb(urb, udev, pipe, buf, size, bpa10x_complete, data); 299 if (err < 0) {
352 break; 300 BT_ERR("%s urb %p submission failed (%d)",
353 301 hdev->name, urb, -err);
354 default: 302 usb_unanchor_urb(urb);
355 kfree(buf); 303 kfree(buf);
356 usb_free_urb(urb);
357 return NULL;
358 } 304 }
359 305
360 return urb;
361}
362
363static inline void bpa10x_free_urb(struct urb *urb)
364{
365 BT_DBG("urb %p", urb);
366
367 if (!urb)
368 return;
369
370 kfree(urb->setup_packet);
371 kfree(urb->transfer_buffer);
372
373 usb_free_urb(urb); 306 usb_free_urb(urb);
307
308 return err;
374} 309}
375 310
376static int bpa10x_open(struct hci_dev *hdev) 311static int bpa10x_open(struct hci_dev *hdev)
377{ 312{
378 struct bpa10x_data *data = hdev->driver_data; 313 struct bpa10x_data *data = hdev->driver_data;
379 struct usb_device *udev = data->udev;
380 unsigned long flags;
381 int err; 314 int err;
382 315
383 BT_DBG("hdev %p data %p", hdev, data); 316 BT_DBG("%s", hdev->name);
384 317
385 if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 318 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
386 return 0; 319 return 0;
387 320
388 data->cmd_urb = bpa10x_alloc_urb(udev, usb_sndctrlpipe(udev, BPA10X_CMD_EP), 321 err = bpa10x_submit_intr_urb(hdev);
389 BPA10X_CMD_BUF_SIZE, GFP_KERNEL, data); 322 if (err < 0)
390 if (!data->cmd_urb) { 323 goto error;
391 err = -ENOMEM;
392 goto done;
393 }
394
395 data->evt_urb = bpa10x_alloc_urb(udev, usb_rcvintpipe(udev, BPA10X_EVT_EP),
396 BPA10X_EVT_BUF_SIZE, GFP_KERNEL, data);
397 if (!data->evt_urb) {
398 bpa10x_free_urb(data->cmd_urb);
399 err = -ENOMEM;
400 goto done;
401 }
402
403 data->rx_urb = bpa10x_alloc_urb(udev, usb_rcvbulkpipe(udev, BPA10X_RX_EP),
404 BPA10X_RX_BUF_SIZE, GFP_KERNEL, data);
405 if (!data->rx_urb) {
406 bpa10x_free_urb(data->evt_urb);
407 bpa10x_free_urb(data->cmd_urb);
408 err = -ENOMEM;
409 goto done;
410 }
411
412 data->tx_urb = bpa10x_alloc_urb(udev, usb_sndbulkpipe(udev, BPA10X_TX_EP),
413 BPA10X_TX_BUF_SIZE, GFP_KERNEL, data);
414 if (!data->rx_urb) {
415 bpa10x_free_urb(data->rx_urb);
416 bpa10x_free_urb(data->evt_urb);
417 bpa10x_free_urb(data->cmd_urb);
418 err = -ENOMEM;
419 goto done;
420 }
421 324
422 write_lock_irqsave(&data->lock, flags); 325 err = bpa10x_submit_bulk_urb(hdev);
326 if (err < 0)
327 goto error;
423 328
424 err = usb_submit_urb(data->evt_urb, GFP_ATOMIC); 329 return 0;
425 if (err < 0) {
426 BT_ERR("%s submit failed for event urb %p with error %d",
427 data->hdev->name, data->evt_urb, err);
428 } else {
429 err = usb_submit_urb(data->rx_urb, GFP_ATOMIC);
430 if (err < 0) {
431 BT_ERR("%s submit failed for rx urb %p with error %d",
432 data->hdev->name, data->evt_urb, err);
433 usb_kill_urb(data->evt_urb);
434 }
435 }
436 330
437 write_unlock_irqrestore(&data->lock, flags); 331error:
332 usb_kill_anchored_urbs(&data->rx_anchor);
438 333
439done: 334 clear_bit(HCI_RUNNING, &hdev->flags);
440 if (err < 0)
441 clear_bit(HCI_RUNNING, &hdev->flags);
442 335
443 return err; 336 return err;
444} 337}
@@ -446,27 +339,13 @@ done:
446static int bpa10x_close(struct hci_dev *hdev) 339static int bpa10x_close(struct hci_dev *hdev)
447{ 340{
448 struct bpa10x_data *data = hdev->driver_data; 341 struct bpa10x_data *data = hdev->driver_data;
449 unsigned long flags;
450 342
451 BT_DBG("hdev %p data %p", hdev, data); 343 BT_DBG("%s", hdev->name);
452 344
453 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 345 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
454 return 0; 346 return 0;
455 347
456 write_lock_irqsave(&data->lock, flags); 348 usb_kill_anchored_urbs(&data->rx_anchor);
457
458 skb_queue_purge(&data->cmd_queue);
459 usb_kill_urb(data->cmd_urb);
460 usb_kill_urb(data->evt_urb);
461 usb_kill_urb(data->rx_urb);
462 usb_kill_urb(data->tx_urb);
463
464 write_unlock_irqrestore(&data->lock, flags);
465
466 bpa10x_free_urb(data->cmd_urb);
467 bpa10x_free_urb(data->evt_urb);
468 bpa10x_free_urb(data->rx_urb);
469 bpa10x_free_urb(data->tx_urb);
470 349
471 return 0; 350 return 0;
472} 351}
@@ -475,9 +354,9 @@ static int bpa10x_flush(struct hci_dev *hdev)
475{ 354{
476 struct bpa10x_data *data = hdev->driver_data; 355 struct bpa10x_data *data = hdev->driver_data;
477 356
478 BT_DBG("hdev %p data %p", hdev, data); 357 BT_DBG("%s", hdev->name);
479 358
480 skb_queue_purge(&data->cmd_queue); 359 usb_kill_anchored_urbs(&data->tx_anchor);
481 360
482 return 0; 361 return 0;
483} 362}
@@ -485,45 +364,78 @@ static int bpa10x_flush(struct hci_dev *hdev)
485static int bpa10x_send_frame(struct sk_buff *skb) 364static int bpa10x_send_frame(struct sk_buff *skb)
486{ 365{
487 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 366 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
488 struct bpa10x_data *data; 367 struct bpa10x_data *data = hdev->driver_data;
489 368 struct usb_ctrlrequest *dr;
490 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); 369 struct urb *urb;
370 unsigned int pipe;
371 int err;
491 372
492 if (!hdev) { 373 BT_DBG("%s", hdev->name);
493 BT_ERR("Frame for unknown HCI device");
494 return -ENODEV;
495 }
496 374
497 if (!test_bit(HCI_RUNNING, &hdev->flags)) 375 if (!test_bit(HCI_RUNNING, &hdev->flags))
498 return -EBUSY; 376 return -EBUSY;
499 377
500 data = hdev->driver_data; 378 urb = usb_alloc_urb(0, GFP_ATOMIC);
379 if (!urb)
380 return -ENOMEM;
501 381
502 /* Prepend skb with frame type */ 382 /* Prepend skb with frame type */
503 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 383 *skb_push(skb, 1) = bt_cb(skb)->pkt_type;
504 384
505 switch (bt_cb(skb)->pkt_type) { 385 switch (bt_cb(skb)->pkt_type) {
506 case HCI_COMMAND_PKT: 386 case HCI_COMMAND_PKT:
387 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
388 if (!dr) {
389 usb_free_urb(urb);
390 return -ENOMEM;
391 }
392
393 dr->bRequestType = USB_TYPE_VENDOR;
394 dr->bRequest = 0;
395 dr->wIndex = 0;
396 dr->wValue = 0;
397 dr->wLength = __cpu_to_le16(skb->len);
398
399 pipe = usb_sndctrlpipe(data->udev, 0x00);
400
401 usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
402 skb->data, skb->len, bpa10x_tx_complete, skb);
403
507 hdev->stat.cmd_tx++; 404 hdev->stat.cmd_tx++;
508 skb_queue_tail(&data->cmd_queue, skb);
509 break; 405 break;
510 406
511 case HCI_ACLDATA_PKT: 407 case HCI_ACLDATA_PKT:
408 pipe = usb_sndbulkpipe(data->udev, 0x02);
409
410 usb_fill_bulk_urb(urb, data->udev, pipe,
411 skb->data, skb->len, bpa10x_tx_complete, skb);
412
512 hdev->stat.acl_tx++; 413 hdev->stat.acl_tx++;
513 skb_queue_tail(&data->tx_queue, skb);
514 break; 414 break;
515 415
516 case HCI_SCODATA_PKT: 416 case HCI_SCODATA_PKT:
417 pipe = usb_sndbulkpipe(data->udev, 0x02);
418
419 usb_fill_bulk_urb(urb, data->udev, pipe,
420 skb->data, skb->len, bpa10x_tx_complete, skb);
421
517 hdev->stat.sco_tx++; 422 hdev->stat.sco_tx++;
518 skb_queue_tail(&data->tx_queue, skb);
519 break; 423 break;
520 };
521 424
522 read_lock(&data->lock); 425 default:
426 return -EILSEQ;
427 }
428
429 usb_anchor_urb(urb, &data->tx_anchor);
523 430
524 bpa10x_wakeup(data); 431 err = usb_submit_urb(urb, GFP_ATOMIC);
432 if (err < 0) {
433 BT_ERR("%s urb %p submission failed", hdev->name, urb);
434 kfree(urb->setup_packet);
435 usb_unanchor_urb(urb);
436 }
525 437
526 read_unlock(&data->lock); 438 usb_free_urb(urb);
527 439
528 return 0; 440 return 0;
529} 441}
@@ -532,16 +444,17 @@ static void bpa10x_destruct(struct hci_dev *hdev)
532{ 444{
533 struct bpa10x_data *data = hdev->driver_data; 445 struct bpa10x_data *data = hdev->driver_data;
534 446
535 BT_DBG("hdev %p data %p", hdev, data); 447 BT_DBG("%s", hdev->name);
536 448
449 kfree(data->rx_skb[0]);
450 kfree(data->rx_skb[1]);
537 kfree(data); 451 kfree(data);
538} 452}
539 453
540static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) 454static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
541{ 455{
542 struct usb_device *udev = interface_to_usbdev(intf);
543 struct hci_dev *hdev;
544 struct bpa10x_data *data; 456 struct bpa10x_data *data;
457 struct hci_dev *hdev;
545 int err; 458 int err;
546 459
547 BT_DBG("intf %p id %p", intf, id); 460 BT_DBG("intf %p id %p", intf, id);
@@ -549,48 +462,43 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
549 if (ignore) 462 if (ignore)
550 return -ENODEV; 463 return -ENODEV;
551 464
552 if (intf->cur_altsetting->desc.bInterfaceNumber > 0) 465 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
553 return -ENODEV; 466 return -ENODEV;
554 467
555 data = kzalloc(sizeof(*data), GFP_KERNEL); 468 data = kzalloc(sizeof(*data), GFP_KERNEL);
556 if (!data) { 469 if (!data)
557 BT_ERR("Can't allocate data structure");
558 return -ENOMEM; 470 return -ENOMEM;
559 }
560
561 data->udev = udev;
562 471
563 rwlock_init(&data->lock); 472 data->udev = interface_to_usbdev(intf);
564 473
565 skb_queue_head_init(&data->cmd_queue); 474 init_usb_anchor(&data->tx_anchor);
566 skb_queue_head_init(&data->tx_queue); 475 init_usb_anchor(&data->rx_anchor);
567 476
568 hdev = hci_alloc_dev(); 477 hdev = hci_alloc_dev();
569 if (!hdev) { 478 if (!hdev) {
570 BT_ERR("Can't allocate HCI device");
571 kfree(data); 479 kfree(data);
572 return -ENOMEM; 480 return -ENOMEM;
573 } 481 }
574 482
575 data->hdev = hdev;
576
577 hdev->type = HCI_USB; 483 hdev->type = HCI_USB;
578 hdev->driver_data = data; 484 hdev->driver_data = data;
485
486 data->hdev = hdev;
487
579 SET_HCIDEV_DEV(hdev, &intf->dev); 488 SET_HCIDEV_DEV(hdev, &intf->dev);
580 489
581 hdev->open = bpa10x_open; 490 hdev->open = bpa10x_open;
582 hdev->close = bpa10x_close; 491 hdev->close = bpa10x_close;
583 hdev->flush = bpa10x_flush; 492 hdev->flush = bpa10x_flush;
584 hdev->send = bpa10x_send_frame; 493 hdev->send = bpa10x_send_frame;
585 hdev->destruct = bpa10x_destruct; 494 hdev->destruct = bpa10x_destruct;
586 495
587 hdev->owner = THIS_MODULE; 496 hdev->owner = THIS_MODULE;
588 497
589 err = hci_register_dev(hdev); 498 err = hci_register_dev(hdev);
590 if (err < 0) { 499 if (err < 0) {
591 BT_ERR("Can't register HCI device");
592 kfree(data);
593 hci_free_dev(hdev); 500 hci_free_dev(hdev);
501 kfree(data);
594 return err; 502 return err;
595 } 503 }
596 504
@@ -602,19 +510,17 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
602static void bpa10x_disconnect(struct usb_interface *intf) 510static void bpa10x_disconnect(struct usb_interface *intf)
603{ 511{
604 struct bpa10x_data *data = usb_get_intfdata(intf); 512 struct bpa10x_data *data = usb_get_intfdata(intf);
605 struct hci_dev *hdev = data->hdev;
606 513
607 BT_DBG("intf %p", intf); 514 BT_DBG("intf %p", intf);
608 515
609 if (!hdev) 516 if (!data)
610 return; 517 return;
611 518
612 usb_set_intfdata(intf, NULL); 519 usb_set_intfdata(intf, NULL);
613 520
614 if (hci_unregister_dev(hdev) < 0) 521 hci_unregister_dev(data->hdev);
615 BT_ERR("Can't unregister HCI device %s", hdev->name);
616 522
617 hci_free_dev(hdev); 523 hci_free_dev(data->hdev);
618} 524}
619 525
620static struct usb_driver bpa10x_driver = { 526static struct usb_driver bpa10x_driver = {
@@ -626,15 +532,9 @@ static struct usb_driver bpa10x_driver = {
626 532
627static int __init bpa10x_init(void) 533static int __init bpa10x_init(void)
628{ 534{
629 int err;
630
631 BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION); 535 BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION);
632 536
633 err = usb_register(&bpa10x_driver); 537 return usb_register(&bpa10x_driver);
634 if (err < 0)
635 BT_ERR("Failed to register USB driver");
636
637 return err;
638} 538}
639 539
640static void __exit bpa10x_exit(void) 540static void __exit bpa10x_exit(void)
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 39516074636b..a18f9b8c9e12 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -344,10 +344,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
344 unsigned int iobase; 344 unsigned int iobase;
345 int iir; 345 int iir;
346 346
347 if (!info || !info->hdev) { 347 BUG_ON(!info->hdev);
348 BT_ERR("Call of irq %d for unknown device", irq);
349 return IRQ_NONE;
350 }
351 348
352 iobase = info->p_dev->io.BasePort1; 349 iobase = info->p_dev->io.BasePort1;
353 350
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
new file mode 100644
index 000000000000..b786f6187902
--- /dev/null
+++ b/drivers/bluetooth/btsdio.c
@@ -0,0 +1,406 @@
1/*
2 *
3 * Generic Bluetooth SDIO driver
4 *
5 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
6 * Copyright (C) 2007 Marcel Holtmann <marcel@holtmann.org>
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/skbuff.h>
33
34#include <linux/mmc/sdio_ids.h>
35#include <linux/mmc/sdio_func.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39
40#ifndef CONFIG_BT_HCIBTSDIO_DEBUG
41#undef BT_DBG
42#define BT_DBG(D...)
43#endif
44
45#define VERSION "0.1"
46
47static const struct sdio_device_id btsdio_table[] = {
48 /* Generic Bluetooth Type-A SDIO device */
49 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) },
50
51 /* Generic Bluetooth Type-B SDIO device */
52 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
53
54 { } /* Terminating entry */
55};
56
57MODULE_DEVICE_TABLE(sdio, btsdio_table);
58
59struct btsdio_data {
60 struct hci_dev *hdev;
61 struct sdio_func *func;
62
63 struct work_struct work;
64
65 struct sk_buff_head txq;
66};
67
68#define REG_RDAT 0x00 /* Receiver Data */
69#define REG_TDAT 0x00 /* Transmitter Data */
70#define REG_PC_RRT 0x10 /* Read Packet Control */
71#define REG_PC_WRT 0x11 /* Write Packet Control */
72#define REG_RTC_STAT 0x12 /* Retry Control Status */
73#define REG_RTC_SET 0x12 /* Retry Control Set */
74#define REG_INTRD 0x13 /* Interrupt Indication */
75#define REG_CL_INTRD 0x13 /* Interrupt Clear */
76#define REG_EN_INTRD 0x14 /* Interrupt Enable */
77#define REG_MD_STAT 0x20 /* Bluetooth Mode Status */
78
79static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb)
80{
81 int err;
82
83 BT_DBG("%s", data->hdev->name);
84
85 /* Prepend Type-A header */
86 skb_push(skb, 4);
87 skb->data[0] = (skb->len & 0x0000ff);
88 skb->data[1] = (skb->len & 0x00ff00) >> 8;
89 skb->data[2] = (skb->len & 0xff0000) >> 16;
90 skb->data[3] = bt_cb(skb)->pkt_type;
91
92 err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len);
93 if (err < 0) {
94 sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL);
95 return err;
96 }
97
98 data->hdev->stat.byte_tx += skb->len;
99
100 kfree_skb(skb);
101
102 return 0;
103}
104
105static void btsdio_work(struct work_struct *work)
106{
107 struct btsdio_data *data = container_of(work, struct btsdio_data, work);
108 struct sk_buff *skb;
109 int err;
110
111 BT_DBG("%s", data->hdev->name);
112
113 sdio_claim_host(data->func);
114
115 while ((skb = skb_dequeue(&data->txq))) {
116 err = btsdio_tx_packet(data, skb);
117 if (err < 0) {
118 data->hdev->stat.err_tx++;
119 skb_queue_head(&data->txq, skb);
120 break;
121 }
122 }
123
124 sdio_release_host(data->func);
125}
126
127static int btsdio_rx_packet(struct btsdio_data *data)
128{
129 u8 hdr[4] __attribute__ ((aligned(4)));
130 struct sk_buff *skb;
131 int err, len;
132
133 BT_DBG("%s", data->hdev->name);
134
135 err = sdio_readsb(data->func, hdr, REG_RDAT, 4);
136 if (err < 0)
137 return err;
138
139 len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16);
140 if (len < 4 || len > 65543)
141 return -EILSEQ;
142
143 skb = bt_skb_alloc(len - 4, GFP_KERNEL);
144 if (!skb) {
145 /* Out of memory. Prepare a read retry and just
146 * return with the expectation that the next time
147 * we're called we'll have more memory. */
148 return -ENOMEM;
149 }
150
151 skb_put(skb, len - 4);
152
153 err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4);
154 if (err < 0) {
155 kfree(skb);
156 return err;
157 }
158
159 data->hdev->stat.byte_rx += len;
160
161 skb->dev = (void *) data->hdev;
162 bt_cb(skb)->pkt_type = hdr[3];
163
164 err = hci_recv_frame(skb);
165 if (err < 0) {
166 kfree(skb);
167 return err;
168 }
169
170 sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
171
172 return 0;
173}
174
175static void btsdio_interrupt(struct sdio_func *func)
176{
177 struct btsdio_data *data = sdio_get_drvdata(func);
178 int intrd;
179
180 BT_DBG("%s", data->hdev->name);
181
182 intrd = sdio_readb(func, REG_INTRD, NULL);
183 if (intrd & 0x01) {
184 sdio_writeb(func, 0x01, REG_CL_INTRD, NULL);
185
186 if (btsdio_rx_packet(data) < 0) {
187 data->hdev->stat.err_rx++;
188 sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL);
189 }
190 }
191}
192
193static int btsdio_open(struct hci_dev *hdev)
194{
195 struct btsdio_data *data = hdev->driver_data;
196 int err;
197
198 BT_DBG("%s", hdev->name);
199
200 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
201 return 0;
202
203 sdio_claim_host(data->func);
204
205 err = sdio_enable_func(data->func);
206 if (err < 0) {
207 clear_bit(HCI_RUNNING, &hdev->flags);
208 goto release;
209 }
210
211 err = sdio_claim_irq(data->func, btsdio_interrupt);
212 if (err < 0) {
213 sdio_disable_func(data->func);
214 clear_bit(HCI_RUNNING, &hdev->flags);
215 goto release;
216 }
217
218 if (data->func->class == SDIO_CLASS_BT_B)
219 sdio_writeb(data->func, 0x00, REG_MD_STAT, NULL);
220
221 sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL);
222
223release:
224 sdio_release_host(data->func);
225
226 return err;
227}
228
229static int btsdio_close(struct hci_dev *hdev)
230{
231 struct btsdio_data *data = hdev->driver_data;
232
233 BT_DBG("%s", hdev->name);
234
235 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
236 return 0;
237
238 sdio_claim_host(data->func);
239
240 sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL);
241
242 sdio_release_irq(data->func);
243 sdio_disable_func(data->func);
244
245 sdio_release_host(data->func);
246
247 return 0;
248}
249
250static int btsdio_flush(struct hci_dev *hdev)
251{
252 struct btsdio_data *data = hdev->driver_data;
253
254 BT_DBG("%s", hdev->name);
255
256 skb_queue_purge(&data->txq);
257
258 return 0;
259}
260
261static int btsdio_send_frame(struct sk_buff *skb)
262{
263 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
264 struct btsdio_data *data = hdev->driver_data;
265
266 BT_DBG("%s", hdev->name);
267
268 if (!test_bit(HCI_RUNNING, &hdev->flags))
269 return -EBUSY;
270
271 switch (bt_cb(skb)->pkt_type) {
272 case HCI_COMMAND_PKT:
273 hdev->stat.cmd_tx++;
274 break;
275
276 case HCI_ACLDATA_PKT:
277 hdev->stat.acl_tx++;
278 break;
279
280 case HCI_SCODATA_PKT:
281 hdev->stat.sco_tx++;
282 break;
283
284 default:
285 return -EILSEQ;
286 }
287
288 skb_queue_tail(&data->txq, skb);
289
290 schedule_work(&data->work);
291
292 return 0;
293}
294
295static void btsdio_destruct(struct hci_dev *hdev)
296{
297 struct btsdio_data *data = hdev->driver_data;
298
299 BT_DBG("%s", hdev->name);
300
301 kfree(data);
302}
303
304static int btsdio_probe(struct sdio_func *func,
305 const struct sdio_device_id *id)
306{
307 struct btsdio_data *data;
308 struct hci_dev *hdev;
309 struct sdio_func_tuple *tuple = func->tuples;
310 int err;
311
312 BT_DBG("func %p id %p class 0x%04x", func, id, func->class);
313
314 while (tuple) {
315 BT_DBG("code 0x%x size %d", tuple->code, tuple->size);
316 tuple = tuple->next;
317 }
318
319 data = kzalloc(sizeof(*data), GFP_KERNEL);
320 if (!data)
321 return -ENOMEM;
322
323 data->func = func;
324
325 INIT_WORK(&data->work, btsdio_work);
326
327 skb_queue_head_init(&data->txq);
328
329 hdev = hci_alloc_dev();
330 if (!hdev) {
331 kfree(data);
332 return -ENOMEM;
333 }
334
335 hdev->type = HCI_SDIO;
336 hdev->driver_data = data;
337
338 data->hdev = hdev;
339
340 SET_HCIDEV_DEV(hdev, &func->dev);
341
342 hdev->open = btsdio_open;
343 hdev->close = btsdio_close;
344 hdev->flush = btsdio_flush;
345 hdev->send = btsdio_send_frame;
346 hdev->destruct = btsdio_destruct;
347
348 hdev->owner = THIS_MODULE;
349
350 err = hci_register_dev(hdev);
351 if (err < 0) {
352 hci_free_dev(hdev);
353 kfree(data);
354 return err;
355 }
356
357 sdio_set_drvdata(func, data);
358
359 return 0;
360}
361
362static void btsdio_remove(struct sdio_func *func)
363{
364 struct btsdio_data *data = sdio_get_drvdata(func);
365 struct hci_dev *hdev;
366
367 BT_DBG("func %p", func);
368
369 if (!data)
370 return;
371
372 hdev = data->hdev;
373
374 sdio_set_drvdata(func, NULL);
375
376 hci_unregister_dev(hdev);
377
378 hci_free_dev(hdev);
379}
380
381static struct sdio_driver btsdio_driver = {
382 .name = "btsdio",
383 .probe = btsdio_probe,
384 .remove = btsdio_remove,
385 .id_table = btsdio_table,
386};
387
388static int __init btsdio_init(void)
389{
390 BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION);
391
392 return sdio_register_driver(&btsdio_driver);
393}
394
395static void __exit btsdio_exit(void)
396{
397 sdio_unregister_driver(&btsdio_driver);
398}
399
400module_init(btsdio_init);
401module_exit(btsdio_exit);
402
403MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
404MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION);
405MODULE_VERSION(VERSION);
406MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index d7d2ea0d86a1..08f48d577aba 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -294,10 +294,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
294 int boguscount = 0; 294 int boguscount = 0;
295 int iir, lsr; 295 int iir, lsr;
296 296
297 if (!info || !info->hdev) { 297 BUG_ON(!info->hdev);
298 BT_ERR("Call of irq %d for unknown device", irq);
299 return IRQ_NONE;
300 }
301 298
302 iobase = info->p_dev->io.BasePort1; 299 iobase = info->p_dev->io.BasePort1;
303 300
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
new file mode 100644
index 000000000000..12e108914f19
--- /dev/null
+++ b/drivers/bluetooth/btusb.c
@@ -0,0 +1,564 @@
1/*
2 *
3 * Generic Bluetooth USB driver
4 *
5 * Copyright (C) 2005-2007 Marcel Holtmann <marcel@holtmann.org>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/skbuff.h>
32
33#include <linux/usb.h>
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
38//#define CONFIG_BT_HCIBTUSB_DEBUG
39#ifndef CONFIG_BT_HCIBTUSB_DEBUG
40#undef BT_DBG
41#define BT_DBG(D...)
42#endif
43
44#define VERSION "0.1"
45
46static struct usb_device_id btusb_table[] = {
47 /* Generic Bluetooth USB device */
48 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
49
50 { } /* Terminating entry */
51};
52
53MODULE_DEVICE_TABLE(usb, btusb_table);
54
55static struct usb_device_id blacklist_table[] = {
56 { } /* Terminating entry */
57};
58
59#define BTUSB_INTR_RUNNING 0
60#define BTUSB_BULK_RUNNING 1
61
62struct btusb_data {
63 struct hci_dev *hdev;
64 struct usb_device *udev;
65
66 spinlock_t lock;
67
68 unsigned long flags;
69
70 struct work_struct work;
71
72 struct usb_anchor tx_anchor;
73 struct usb_anchor intr_anchor;
74 struct usb_anchor bulk_anchor;
75
76 struct usb_endpoint_descriptor *intr_ep;
77 struct usb_endpoint_descriptor *bulk_tx_ep;
78 struct usb_endpoint_descriptor *bulk_rx_ep;
79};
80
81static void btusb_intr_complete(struct urb *urb)
82{
83 struct hci_dev *hdev = urb->context;
84 struct btusb_data *data = hdev->driver_data;
85 int err;
86
87 BT_DBG("%s urb %p status %d count %d", hdev->name,
88 urb, urb->status, urb->actual_length);
89
90 if (!test_bit(HCI_RUNNING, &hdev->flags))
91 return;
92
93 if (urb->status == 0) {
94 if (hci_recv_fragment(hdev, HCI_EVENT_PKT,
95 urb->transfer_buffer,
96 urb->actual_length) < 0) {
97 BT_ERR("%s corrupted event packet", hdev->name);
98 hdev->stat.err_rx++;
99 }
100 }
101
102 if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
103 return;
104
105 usb_anchor_urb(urb, &data->intr_anchor);
106
107 err = usb_submit_urb(urb, GFP_ATOMIC);
108 if (err < 0) {
109 BT_ERR("%s urb %p failed to resubmit (%d)",
110 hdev->name, urb, -err);
111 usb_unanchor_urb(urb);
112 }
113}
114
115static inline int btusb_submit_intr_urb(struct hci_dev *hdev)
116{
117 struct btusb_data *data = hdev->driver_data;
118 struct urb *urb;
119 unsigned char *buf;
120 unsigned int pipe;
121 int err, size;
122
123 BT_DBG("%s", hdev->name);
124
125 urb = usb_alloc_urb(0, GFP_ATOMIC);
126 if (!urb)
127 return -ENOMEM;
128
129 size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
130
131 buf = kmalloc(size, GFP_ATOMIC);
132 if (!buf) {
133 usb_free_urb(urb);
134 return -ENOMEM;
135 }
136
137 pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress);
138
139 usb_fill_int_urb(urb, data->udev, pipe, buf, size,
140 btusb_intr_complete, hdev,
141 data->intr_ep->bInterval);
142
143 urb->transfer_flags |= URB_FREE_BUFFER;
144
145 usb_anchor_urb(urb, &data->intr_anchor);
146
147 err = usb_submit_urb(urb, GFP_ATOMIC);
148 if (err < 0) {
149 BT_ERR("%s urb %p submission failed (%d)",
150 hdev->name, urb, -err);
151 usb_unanchor_urb(urb);
152 kfree(buf);
153 }
154
155 usb_free_urb(urb);
156
157 return err;
158}
159
160static void btusb_bulk_complete(struct urb *urb)
161{
162 struct hci_dev *hdev = urb->context;
163 struct btusb_data *data = hdev->driver_data;
164 int err;
165
166 BT_DBG("%s urb %p status %d count %d", hdev->name,
167 urb, urb->status, urb->actual_length);
168
169 if (!test_bit(HCI_RUNNING, &hdev->flags))
170 return;
171
172 if (urb->status == 0) {
173 if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT,
174 urb->transfer_buffer,
175 urb->actual_length) < 0) {
176 BT_ERR("%s corrupted ACL packet", hdev->name);
177 hdev->stat.err_rx++;
178 }
179 }
180
181 if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
182 return;
183
184 usb_anchor_urb(urb, &data->bulk_anchor);
185
186 err = usb_submit_urb(urb, GFP_ATOMIC);
187 if (err < 0) {
188 BT_ERR("%s urb %p failed to resubmit (%d)",
189 hdev->name, urb, -err);
190 usb_unanchor_urb(urb);
191 }
192}
193
194static inline int btusb_submit_bulk_urb(struct hci_dev *hdev)
195{
196 struct btusb_data *data = hdev->driver_data;
197 struct urb *urb;
198 unsigned char *buf;
199 unsigned int pipe;
200 int err, size;
201
202 BT_DBG("%s", hdev->name);
203
204 urb = usb_alloc_urb(0, GFP_KERNEL);
205 if (!urb)
206 return -ENOMEM;
207
208 size = le16_to_cpu(data->bulk_rx_ep->wMaxPacketSize);
209
210 buf = kmalloc(size, GFP_KERNEL);
211 if (!buf) {
212 usb_free_urb(urb);
213 return -ENOMEM;
214 }
215
216 pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress);
217
218 usb_fill_bulk_urb(urb, data->udev, pipe,
219 buf, size, btusb_bulk_complete, hdev);
220
221 urb->transfer_flags |= URB_FREE_BUFFER;
222
223 usb_anchor_urb(urb, &data->bulk_anchor);
224
225 err = usb_submit_urb(urb, GFP_KERNEL);
226 if (err < 0) {
227 BT_ERR("%s urb %p submission failed (%d)",
228 hdev->name, urb, -err);
229 usb_unanchor_urb(urb);
230 kfree(buf);
231 }
232
233 usb_free_urb(urb);
234
235 return err;
236}
237
238static void btusb_tx_complete(struct urb *urb)
239{
240 struct sk_buff *skb = urb->context;
241 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
242
243 BT_DBG("%s urb %p status %d count %d", hdev->name,
244 urb, urb->status, urb->actual_length);
245
246 if (!test_bit(HCI_RUNNING, &hdev->flags))
247 goto done;
248
249 if (!urb->status)
250 hdev->stat.byte_tx += urb->transfer_buffer_length;
251 else
252 hdev->stat.err_tx++;
253
254done:
255 kfree(urb->setup_packet);
256
257 kfree_skb(skb);
258}
259
260static int btusb_open(struct hci_dev *hdev)
261{
262 struct btusb_data *data = hdev->driver_data;
263 int err;
264
265 BT_DBG("%s", hdev->name);
266
267 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
268 return 0;
269
270 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
271 return 0;
272
273 err = btusb_submit_intr_urb(hdev);
274 if (err < 0) {
275 clear_bit(BTUSB_INTR_RUNNING, &hdev->flags);
276 clear_bit(HCI_RUNNING, &hdev->flags);
277 }
278
279 return err;
280}
281
282static int btusb_close(struct hci_dev *hdev)
283{
284 struct btusb_data *data = hdev->driver_data;
285
286 BT_DBG("%s", hdev->name);
287
288 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
289 return 0;
290
291 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
292 usb_kill_anchored_urbs(&data->bulk_anchor);
293
294 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
295 usb_kill_anchored_urbs(&data->intr_anchor);
296
297 return 0;
298}
299
300static int btusb_flush(struct hci_dev *hdev)
301{
302 struct btusb_data *data = hdev->driver_data;
303
304 BT_DBG("%s", hdev->name);
305
306 usb_kill_anchored_urbs(&data->tx_anchor);
307
308 return 0;
309}
310
311static int btusb_send_frame(struct sk_buff *skb)
312{
313 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
314 struct btusb_data *data = hdev->driver_data;
315 struct usb_ctrlrequest *dr;
316 struct urb *urb;
317 unsigned int pipe;
318 int err;
319
320 BT_DBG("%s", hdev->name);
321
322 if (!test_bit(HCI_RUNNING, &hdev->flags))
323 return -EBUSY;
324
325 switch (bt_cb(skb)->pkt_type) {
326 case HCI_COMMAND_PKT:
327 urb = usb_alloc_urb(0, GFP_ATOMIC);
328 if (!urb)
329 return -ENOMEM;
330
331 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
332 if (!dr) {
333 usb_free_urb(urb);
334 return -ENOMEM;
335 }
336
337 dr->bRequestType = USB_TYPE_CLASS;
338 dr->bRequest = 0;
339 dr->wIndex = 0;
340 dr->wValue = 0;
341 dr->wLength = __cpu_to_le16(skb->len);
342
343 pipe = usb_sndctrlpipe(data->udev, 0x00);
344
345 usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
346 skb->data, skb->len, btusb_tx_complete, skb);
347
348 hdev->stat.cmd_tx++;
349 break;
350
351 case HCI_ACLDATA_PKT:
352 urb = usb_alloc_urb(0, GFP_ATOMIC);
353 if (!urb)
354 return -ENOMEM;
355
356 pipe = usb_sndbulkpipe(data->udev,
357 data->bulk_tx_ep->bEndpointAddress);
358
359 usb_fill_bulk_urb(urb, data->udev, pipe,
360 skb->data, skb->len, btusb_tx_complete, skb);
361
362 hdev->stat.acl_tx++;
363 break;
364
365 case HCI_SCODATA_PKT:
366 hdev->stat.sco_tx++;
367 kfree_skb(skb);
368 return 0;
369
370 default:
371 return -EILSEQ;
372 }
373
374 usb_anchor_urb(urb, &data->tx_anchor);
375
376 err = usb_submit_urb(urb, GFP_ATOMIC);
377 if (err < 0) {
378 BT_ERR("%s urb %p submission failed", hdev->name, urb);
379 kfree(urb->setup_packet);
380 usb_unanchor_urb(urb);
381 }
382
383 usb_free_urb(urb);
384
385 return err;
386}
387
388static void btusb_destruct(struct hci_dev *hdev)
389{
390 struct btusb_data *data = hdev->driver_data;
391
392 BT_DBG("%s", hdev->name);
393
394 kfree(data);
395}
396
397static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
398{
399 struct btusb_data *data = hdev->driver_data;
400
401 BT_DBG("%s evt %d", hdev->name, evt);
402
403 if (evt == HCI_NOTIFY_CONN_ADD || evt == HCI_NOTIFY_CONN_DEL)
404 schedule_work(&data->work);
405}
406
407static void btusb_work(struct work_struct *work)
408{
409 struct btusb_data *data = container_of(work, struct btusb_data, work);
410 struct hci_dev *hdev = data->hdev;
411
412 if (hdev->conn_hash.acl_num == 0) {
413 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
414 usb_kill_anchored_urbs(&data->bulk_anchor);
415 return;
416 }
417
418 if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) {
419 if (btusb_submit_bulk_urb(hdev) < 0)
420 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
421 else
422 btusb_submit_bulk_urb(hdev);
423 }
424}
425
426static int btusb_probe(struct usb_interface *intf,
427 const struct usb_device_id *id)
428{
429 struct usb_endpoint_descriptor *ep_desc;
430 struct btusb_data *data;
431 struct hci_dev *hdev;
432 int i, err;
433
434 BT_DBG("intf %p id %p", intf, id);
435
436 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
437 return -ENODEV;
438
439 if (!id->driver_info) {
440 const struct usb_device_id *match;
441 match = usb_match_id(intf, blacklist_table);
442 if (match)
443 id = match;
444 }
445
446 data = kzalloc(sizeof(*data), GFP_KERNEL);
447 if (!data)
448 return -ENOMEM;
449
450 for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
451 ep_desc = &intf->cur_altsetting->endpoint[i].desc;
452
453 if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
454 data->intr_ep = ep_desc;
455 continue;
456 }
457
458 if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
459 data->bulk_tx_ep = ep_desc;
460 continue;
461 }
462
463 if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
464 data->bulk_rx_ep = ep_desc;
465 continue;
466 }
467 }
468
469 if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
470 kfree(data);
471 return -ENODEV;
472 }
473
474 data->udev = interface_to_usbdev(intf);
475
476 spin_lock_init(&data->lock);
477
478 INIT_WORK(&data->work, btusb_work);
479
480 init_usb_anchor(&data->tx_anchor);
481 init_usb_anchor(&data->intr_anchor);
482 init_usb_anchor(&data->bulk_anchor);
483
484 hdev = hci_alloc_dev();
485 if (!hdev) {
486 kfree(data);
487 return -ENOMEM;
488 }
489
490 hdev->type = HCI_USB;
491 hdev->driver_data = data;
492
493 data->hdev = hdev;
494
495 SET_HCIDEV_DEV(hdev, &intf->dev);
496
497 hdev->open = btusb_open;
498 hdev->close = btusb_close;
499 hdev->flush = btusb_flush;
500 hdev->send = btusb_send_frame;
501 hdev->destruct = btusb_destruct;
502 hdev->notify = btusb_notify;
503
504 hdev->owner = THIS_MODULE;
505
506 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
507
508 err = hci_register_dev(hdev);
509 if (err < 0) {
510 hci_free_dev(hdev);
511 kfree(data);
512 return err;
513 }
514
515 usb_set_intfdata(intf, data);
516
517 return 0;
518}
519
520static void btusb_disconnect(struct usb_interface *intf)
521{
522 struct btusb_data *data = usb_get_intfdata(intf);
523 struct hci_dev *hdev;
524
525 BT_DBG("intf %p", intf);
526
527 if (!data)
528 return;
529
530 hdev = data->hdev;
531
532 usb_set_intfdata(intf, NULL);
533
534 hci_unregister_dev(hdev);
535
536 hci_free_dev(hdev);
537}
538
539static struct usb_driver btusb_driver = {
540 .name = "btusb",
541 .probe = btusb_probe,
542 .disconnect = btusb_disconnect,
543 .id_table = btusb_table,
544};
545
546static int __init btusb_init(void)
547{
548 BT_INFO("Generic Bluetooth USB driver ver %s", VERSION);
549
550 return usb_register(&btusb_driver);
551}
552
553static void __exit btusb_exit(void)
554{
555 usb_deregister(&btusb_driver);
556}
557
558module_init(btusb_init);
559module_exit(btusb_exit);
560
561MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
562MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION);
563MODULE_VERSION(VERSION);
564MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 7f9c54b9964a..dae45cdf02b2 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -298,10 +298,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
298 int boguscount = 0; 298 int boguscount = 0;
299 int iir, lsr; 299 int iir, lsr;
300 300
301 if (!info || !info->hdev) { 301 BUG_ON(!info->hdev);
302 BT_ERR("Call of irq %d for unknown device", irq);
303 return IRQ_NONE;
304 }
305 302
306 iobase = info->p_dev->io.BasePort1; 303 iobase = info->p_dev->io.BasePort1;
307 304
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d66064ccb31c..696f7528f022 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -237,7 +237,8 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
237 if (hciextn && chan == 5) { 237 if (hciextn && chan == 5) {
238 struct hci_command_hdr *hdr = (struct hci_command_hdr *) data; 238 struct hci_command_hdr *hdr = (struct hci_command_hdr *) data;
239 239
240 if (hci_opcode_ogf(__le16_to_cpu(hdr->opcode)) == OGF_VENDOR_CMD) { 240 /* Vendor specific commands */
241 if (hci_opcode_ogf(__le16_to_cpu(hdr->opcode)) == 0x3f) {
241 u8 desc = *(data + HCI_COMMAND_HDR_SIZE); 242 u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
242 if ((desc & 0xf0) == 0xc0) { 243 if ((desc & 0xf0) == 0xc0) {
243 data += HCI_COMMAND_HDR_SIZE + 1; 244 data += HCI_COMMAND_HDR_SIZE + 1;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 6055b9c0ac0f..e68821d074b0 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -549,7 +549,10 @@ static int __init hci_uart_init(void)
549#ifdef CONFIG_BT_HCIUART_BCSP 549#ifdef CONFIG_BT_HCIUART_BCSP
550 bcsp_init(); 550 bcsp_init();
551#endif 551#endif
552 552#ifdef CONFIG_BT_HCIUART_LL
553 ll_init();
554#endif
555
553 return 0; 556 return 0;
554} 557}
555 558
@@ -563,6 +566,9 @@ static void __exit hci_uart_exit(void)
563#ifdef CONFIG_BT_HCIUART_BCSP 566#ifdef CONFIG_BT_HCIUART_BCSP
564 bcsp_deinit(); 567 bcsp_deinit();
565#endif 568#endif
569#ifdef CONFIG_BT_HCIUART_LL
570 ll_deinit();
571#endif
566 572
567 /* Release tty registration of line discipline */ 573 /* Release tty registration of line discipline */
568 if ((err = tty_unregister_ldisc(N_HCI))) 574 if ((err = tty_unregister_ldisc(N_HCI)))
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
new file mode 100644
index 000000000000..8c3e62a17b4a
--- /dev/null
+++ b/drivers/bluetooth/hci_ll.c
@@ -0,0 +1,531 @@
1/*
2 * Texas Instruments' Bluetooth HCILL UART protocol
3 *
4 * HCILL (HCI Low Level) is a Texas Instruments' power management
5 * protocol extension to H4.
6 *
7 * Copyright (C) 2007 Texas Instruments, Inc.
8 *
9 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
10 *
11 * Acknowledgements:
12 * This file is based on hci_h4.c, which was written
13 * by Maxim Krasnyansky and Marcel Holtmann.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2
17 * as published by the Free Software Foundation
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32
33#include <linux/init.h>
34#include <linux/sched.h>
35#include <linux/types.h>
36#include <linux/fcntl.h>
37#include <linux/interrupt.h>
38#include <linux/ptrace.h>
39#include <linux/poll.h>
40
41#include <linux/slab.h>
42#include <linux/tty.h>
43#include <linux/errno.h>
44#include <linux/string.h>
45#include <linux/signal.h>
46#include <linux/ioctl.h>
47#include <linux/skbuff.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
51
52#include "hci_uart.h"
53
54/* HCILL commands */
55#define HCILL_GO_TO_SLEEP_IND 0x30
56#define HCILL_GO_TO_SLEEP_ACK 0x31
57#define HCILL_WAKE_UP_IND 0x32
58#define HCILL_WAKE_UP_ACK 0x33
59
60/* HCILL receiver States */
61#define HCILL_W4_PACKET_TYPE 0
62#define HCILL_W4_EVENT_HDR 1
63#define HCILL_W4_ACL_HDR 2
64#define HCILL_W4_SCO_HDR 3
65#define HCILL_W4_DATA 4
66
67/* HCILL states */
68enum hcill_states_e {
69 HCILL_ASLEEP,
70 HCILL_ASLEEP_TO_AWAKE,
71 HCILL_AWAKE,
72 HCILL_AWAKE_TO_ASLEEP
73};
74
75struct hcill_cmd {
76 u8 cmd;
77} __attribute__((packed));
78
79struct ll_struct {
80 unsigned long rx_state;
81 unsigned long rx_count;
82 struct sk_buff *rx_skb;
83 struct sk_buff_head txq;
84 spinlock_t hcill_lock; /* HCILL state lock */
85 unsigned long hcill_state; /* HCILL power state */
86 struct sk_buff_head tx_wait_q; /* HCILL wait queue */
87};
88
89/*
90 * Builds and sends an HCILL command packet.
91 * These are very simple packets with only 1 cmd byte
92 */
93static int send_hcill_cmd(u8 cmd, struct hci_uart *hu)
94{
95 int err = 0;
96 struct sk_buff *skb = NULL;
97 struct ll_struct *ll = hu->priv;
98 struct hcill_cmd *hcill_packet;
99
100 BT_DBG("hu %p cmd 0x%x", hu, cmd);
101
102 /* allocate packet */
103 skb = bt_skb_alloc(1, GFP_ATOMIC);
104 if (!skb) {
105 BT_ERR("cannot allocate memory for HCILL packet");
106 err = -ENOMEM;
107 goto out;
108 }
109
110 /* prepare packet */
111 hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
112 hcill_packet->cmd = cmd;
113 skb->dev = (void *) hu->hdev;
114
115 /* send packet */
116 skb_queue_tail(&ll->txq, skb);
117out:
118 return err;
119}
120
121/* Initialize protocol */
122static int ll_open(struct hci_uart *hu)
123{
124 struct ll_struct *ll;
125
126 BT_DBG("hu %p", hu);
127
128 ll = kzalloc(sizeof(*ll), GFP_ATOMIC);
129 if (!ll)
130 return -ENOMEM;
131
132 skb_queue_head_init(&ll->txq);
133 skb_queue_head_init(&ll->tx_wait_q);
134 spin_lock_init(&ll->hcill_lock);
135
136 ll->hcill_state = HCILL_AWAKE;
137
138 hu->priv = ll;
139
140 return 0;
141}
142
143/* Flush protocol data */
144static int ll_flush(struct hci_uart *hu)
145{
146 struct ll_struct *ll = hu->priv;
147
148 BT_DBG("hu %p", hu);
149
150 skb_queue_purge(&ll->tx_wait_q);
151 skb_queue_purge(&ll->txq);
152
153 return 0;
154}
155
156/* Close protocol */
157static int ll_close(struct hci_uart *hu)
158{
159 struct ll_struct *ll = hu->priv;
160
161 BT_DBG("hu %p", hu);
162
163 skb_queue_purge(&ll->tx_wait_q);
164 skb_queue_purge(&ll->txq);
165
166 if (ll->rx_skb)
167 kfree_skb(ll->rx_skb);
168
169 hu->priv = NULL;
170
171 kfree(ll);
172
173 return 0;
174}
175
176/*
177 * internal function, which does common work of the device wake up process:
178 * 1. places all pending packets (waiting in tx_wait_q list) in txq list.
179 * 2. changes internal state to HCILL_AWAKE.
180 * Note: assumes that hcill_lock spinlock is taken,
181 * shouldn't be called otherwise!
182 */
183static void __ll_do_awake(struct ll_struct *ll)
184{
185 struct sk_buff *skb = NULL;
186
187 while ((skb = skb_dequeue(&ll->tx_wait_q)))
188 skb_queue_tail(&ll->txq, skb);
189
190 ll->hcill_state = HCILL_AWAKE;
191}
192
193/*
194 * Called upon a wake-up-indication from the device
195 */
196static void ll_device_want_to_wakeup(struct hci_uart *hu)
197{
198 unsigned long flags;
199 struct ll_struct *ll = hu->priv;
200
201 BT_DBG("hu %p", hu);
202
203 /* lock hcill state */
204 spin_lock_irqsave(&ll->hcill_lock, flags);
205
206 switch (ll->hcill_state) {
207 case HCILL_ASLEEP:
208 /* acknowledge device wake up */
209 if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {
210 BT_ERR("cannot acknowledge device wake up");
211 goto out;
212 }
213 break;
214 case HCILL_ASLEEP_TO_AWAKE:
215 /*
216 * this state means that a wake-up-indication
217 * is already on its way to the device,
218 * and will serve as the required wake-up-ack
219 */
220 BT_DBG("dual wake-up-indication");
221 break;
222 default:
223 /* any other state are illegal */
224 BT_ERR("received HCILL_WAKE_UP_IND in state %ld", ll->hcill_state);
225 break;
226 }
227
228 /* send pending packets and change state to HCILL_AWAKE */
229 __ll_do_awake(ll);
230
231out:
232 spin_unlock_irqrestore(&ll->hcill_lock, flags);
233
234 /* actually send the packets */
235 hci_uart_tx_wakeup(hu);
236}
237
238/*
239 * Called upon a sleep-indication from the device
240 */
241static void ll_device_want_to_sleep(struct hci_uart *hu)
242{
243 unsigned long flags;
244 struct ll_struct *ll = hu->priv;
245
246 BT_DBG("hu %p", hu);
247
248 /* lock hcill state */
249 spin_lock_irqsave(&ll->hcill_lock, flags);
250
251 /* sanity check */
252 if (ll->hcill_state != HCILL_AWAKE)
253 BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld", ll->hcill_state);
254
255 /* acknowledge device sleep */
256 if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) {
257 BT_ERR("cannot acknowledge device sleep");
258 goto out;
259 }
260
261 /* update state */
262 ll->hcill_state = HCILL_ASLEEP;
263
264out:
265 spin_unlock_irqrestore(&ll->hcill_lock, flags);
266
267 /* actually send the sleep ack packet */
268 hci_uart_tx_wakeup(hu);
269}
270
271/*
272 * Called upon wake-up-acknowledgement from the device
273 */
274static void ll_device_woke_up(struct hci_uart *hu)
275{
276 unsigned long flags;
277 struct ll_struct *ll = hu->priv;
278
279 BT_DBG("hu %p", hu);
280
281 /* lock hcill state */
282 spin_lock_irqsave(&ll->hcill_lock, flags);
283
284 /* sanity check */
285 if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE)
286 BT_ERR("received HCILL_WAKE_UP_ACK in state %ld", ll->hcill_state);
287
288 /* send pending packets and change state to HCILL_AWAKE */
289 __ll_do_awake(ll);
290
291 spin_unlock_irqrestore(&ll->hcill_lock, flags);
292
293 /* actually send the packets */
294 hci_uart_tx_wakeup(hu);
295}
296
297/* Enqueue frame for transmittion (padding, crc, etc) */
298/* may be called from two simultaneous tasklets */
299static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
300{
301 unsigned long flags = 0;
302 struct ll_struct *ll = hu->priv;
303
304 BT_DBG("hu %p skb %p", hu, skb);
305
306 /* Prepend skb with frame type */
307 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
308
309 /* lock hcill state */
310 spin_lock_irqsave(&ll->hcill_lock, flags);
311
312 /* act according to current state */
313 switch (ll->hcill_state) {
314 case HCILL_AWAKE:
315 BT_DBG("device awake, sending normally");
316 skb_queue_tail(&ll->txq, skb);
317 break;
318 case HCILL_ASLEEP:
319 BT_DBG("device asleep, waking up and queueing packet");
320 /* save packet for later */
321 skb_queue_tail(&ll->tx_wait_q, skb);
322 /* awake device */
323 if (send_hcill_cmd(HCILL_WAKE_UP_IND, hu) < 0) {
324 BT_ERR("cannot wake up device");
325 break;
326 }
327 ll->hcill_state = HCILL_ASLEEP_TO_AWAKE;
328 break;
329 case HCILL_ASLEEP_TO_AWAKE:
330 BT_DBG("device waking up, queueing packet");
331 /* transient state; just keep packet for later */
332 skb_queue_tail(&ll->tx_wait_q, skb);
333 break;
334 default:
335 BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state);
336 kfree_skb(skb);
337 break;
338 }
339
340 spin_unlock_irqrestore(&ll->hcill_lock, flags);
341
342 return 0;
343}
344
345static inline int ll_check_data_len(struct ll_struct *ll, int len)
346{
347 register int room = skb_tailroom(ll->rx_skb);
348
349 BT_DBG("len %d room %d", len, room);
350
351 if (!len) {
352 hci_recv_frame(ll->rx_skb);
353 } else if (len > room) {
354 BT_ERR("Data length is too large");
355 kfree_skb(ll->rx_skb);
356 } else {
357 ll->rx_state = HCILL_W4_DATA;
358 ll->rx_count = len;
359 return len;
360 }
361
362 ll->rx_state = HCILL_W4_PACKET_TYPE;
363 ll->rx_skb = NULL;
364 ll->rx_count = 0;
365
366 return 0;
367}
368
369/* Recv data */
370static int ll_recv(struct hci_uart *hu, void *data, int count)
371{
372 struct ll_struct *ll = hu->priv;
373 register char *ptr;
374 struct hci_event_hdr *eh;
375 struct hci_acl_hdr *ah;
376 struct hci_sco_hdr *sh;
377 register int len, type, dlen;
378
379 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
380
381 ptr = data;
382 while (count) {
383 if (ll->rx_count) {
384 len = min_t(unsigned int, ll->rx_count, count);
385 memcpy(skb_put(ll->rx_skb, len), ptr, len);
386 ll->rx_count -= len; count -= len; ptr += len;
387
388 if (ll->rx_count)
389 continue;
390
391 switch (ll->rx_state) {
392 case HCILL_W4_DATA:
393 BT_DBG("Complete data");
394 hci_recv_frame(ll->rx_skb);
395
396 ll->rx_state = HCILL_W4_PACKET_TYPE;
397 ll->rx_skb = NULL;
398 continue;
399
400 case HCILL_W4_EVENT_HDR:
401 eh = (struct hci_event_hdr *) ll->rx_skb->data;
402
403 BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
404
405 ll_check_data_len(ll, eh->plen);
406 continue;
407
408 case HCILL_W4_ACL_HDR:
409 ah = (struct hci_acl_hdr *) ll->rx_skb->data;
410 dlen = __le16_to_cpu(ah->dlen);
411
412 BT_DBG("ACL header: dlen %d", dlen);
413
414 ll_check_data_len(ll, dlen);
415 continue;
416
417 case HCILL_W4_SCO_HDR:
418 sh = (struct hci_sco_hdr *) ll->rx_skb->data;
419
420 BT_DBG("SCO header: dlen %d", sh->dlen);
421
422 ll_check_data_len(ll, sh->dlen);
423 continue;
424 }
425 }
426
427 /* HCILL_W4_PACKET_TYPE */
428 switch (*ptr) {
429 case HCI_EVENT_PKT:
430 BT_DBG("Event packet");
431 ll->rx_state = HCILL_W4_EVENT_HDR;
432 ll->rx_count = HCI_EVENT_HDR_SIZE;
433 type = HCI_EVENT_PKT;
434 break;
435
436 case HCI_ACLDATA_PKT:
437 BT_DBG("ACL packet");
438 ll->rx_state = HCILL_W4_ACL_HDR;
439 ll->rx_count = HCI_ACL_HDR_SIZE;
440 type = HCI_ACLDATA_PKT;
441 break;
442
443 case HCI_SCODATA_PKT:
444 BT_DBG("SCO packet");
445 ll->rx_state = HCILL_W4_SCO_HDR;
446 ll->rx_count = HCI_SCO_HDR_SIZE;
447 type = HCI_SCODATA_PKT;
448 break;
449
450 /* HCILL signals */
451 case HCILL_GO_TO_SLEEP_IND:
452 BT_DBG("HCILL_GO_TO_SLEEP_IND packet");
453 ll_device_want_to_sleep(hu);
454 ptr++; count--;
455 continue;
456
457 case HCILL_GO_TO_SLEEP_ACK:
458 /* shouldn't happen */
459 BT_ERR("received HCILL_GO_TO_SLEEP_ACK (in state %ld)", ll->hcill_state);
460 ptr++; count--;
461 continue;
462
463 case HCILL_WAKE_UP_IND:
464 BT_DBG("HCILL_WAKE_UP_IND packet");
465 ll_device_want_to_wakeup(hu);
466 ptr++; count--;
467 continue;
468
469 case HCILL_WAKE_UP_ACK:
470 BT_DBG("HCILL_WAKE_UP_ACK packet");
471 ll_device_woke_up(hu);
472 ptr++; count--;
473 continue;
474
475 default:
476 BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr);
477 hu->hdev->stat.err_rx++;
478 ptr++; count--;
479 continue;
480 };
481
482 ptr++; count--;
483
484 /* Allocate packet */
485 ll->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
486 if (!ll->rx_skb) {
487 BT_ERR("Can't allocate mem for new packet");
488 ll->rx_state = HCILL_W4_PACKET_TYPE;
489 ll->rx_count = 0;
490 return 0;
491 }
492
493 ll->rx_skb->dev = (void *) hu->hdev;
494 bt_cb(ll->rx_skb)->pkt_type = type;
495 }
496
497 return count;
498}
499
500static struct sk_buff *ll_dequeue(struct hci_uart *hu)
501{
502 struct ll_struct *ll = hu->priv;
503 return skb_dequeue(&ll->txq);
504}
505
506static struct hci_uart_proto llp = {
507 .id = HCI_UART_LL,
508 .open = ll_open,
509 .close = ll_close,
510 .recv = ll_recv,
511 .enqueue = ll_enqueue,
512 .dequeue = ll_dequeue,
513 .flush = ll_flush,
514};
515
516int ll_init(void)
517{
518 int err = hci_uart_register_proto(&llp);
519
520 if (!err)
521 BT_INFO("HCILL protocol initialized");
522 else
523 BT_ERR("HCILL protocol registration failed");
524
525 return err;
526}
527
528int ll_deinit(void)
529{
530 return hci_uart_unregister_proto(&llp);
531}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 1097ce72393f..50113db06b9f 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -33,12 +33,13 @@
33#define HCIUARTGETDEVICE _IOR('U', 202, int) 33#define HCIUARTGETDEVICE _IOR('U', 202, int)
34 34
35/* UART protocols */ 35/* UART protocols */
36#define HCI_UART_MAX_PROTO 4 36#define HCI_UART_MAX_PROTO 5
37 37
38#define HCI_UART_H4 0 38#define HCI_UART_H4 0
39#define HCI_UART_BCSP 1 39#define HCI_UART_BCSP 1
40#define HCI_UART_3WIRE 2 40#define HCI_UART_3WIRE 2
41#define HCI_UART_H4DS 3 41#define HCI_UART_H4DS 3
42#define HCI_UART_LL 4
42 43
43struct hci_uart; 44struct hci_uart;
44 45
@@ -85,3 +86,8 @@ int h4_deinit(void);
85int bcsp_init(void); 86int bcsp_init(void);
86int bcsp_deinit(void); 87int bcsp_deinit(void);
87#endif 88#endif
89
90#ifdef CONFIG_BT_HCIUART_LL
91int ll_init(void);
92int ll_deinit(void);
93#endif
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index d1bd0f08a331..e4f579c3e245 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -1602,8 +1602,8 @@ static void cyz_handle_tx(struct cyclades_port *info,
1602 info->icount.tx++; 1602 info->icount.tx++;
1603 } 1603 }
1604#endif 1604#endif
1605ztxdone:
1606 tty_wakeup(tty); 1605 tty_wakeup(tty);
1606ztxdone:
1607 /* Update tx_put */ 1607 /* Update tx_put */
1608 cy_writel(&buf_ctrl->tx_put, tx_put); 1608 cy_writel(&buf_ctrl->tx_put, tx_put);
1609 } 1609 }
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 2f307c4df335..67588326ae56 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -606,7 +606,7 @@ static int
606at_context_queue_packet(struct context *ctx, struct fw_packet *packet) 606at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
607{ 607{
608 struct fw_ohci *ohci = ctx->ohci; 608 struct fw_ohci *ohci = ctx->ohci;
609 dma_addr_t d_bus, payload_bus; 609 dma_addr_t d_bus, uninitialized_var(payload_bus);
610 struct driver_data *driver_data; 610 struct driver_data *driver_data;
611 struct descriptor *d, *last; 611 struct descriptor *d, *last;
612 __le32 *header; 612 __le32 *header;
@@ -1459,7 +1459,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1459 /* FIXME: We need a fallback for pre 1.1 OHCI. */ 1459 /* FIXME: We need a fallback for pre 1.1 OHCI. */
1460 if (callback == handle_ir_dualbuffer_packet && 1460 if (callback == handle_ir_dualbuffer_packet &&
1461 ohci->version < OHCI_VERSION_1_1) 1461 ohci->version < OHCI_VERSION_1_1)
1462 return ERR_PTR(-EINVAL); 1462 return ERR_PTR(-ENOSYS);
1463 1463
1464 spin_lock_irqsave(&ohci->lock, flags); 1464 spin_lock_irqsave(&ohci->lock, flags);
1465 index = ffs(*mask) - 1; 1465 index = ffs(*mask) - 1;
@@ -1778,7 +1778,7 @@ ohci_queue_iso(struct fw_iso_context *base,
1778 buffer, payload); 1778 buffer, payload);
1779 else 1779 else
1780 /* FIXME: Implement fallback for OHCI 1.0 controllers. */ 1780 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1781 return -EINVAL; 1781 return -ENOSYS;
1782} 1782}
1783 1783
1784static const struct fw_card_driver ohci_driver = { 1784static const struct fw_card_driver ohci_driver = {
@@ -1898,7 +1898,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1898 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 1898 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1899 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 1899 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1900 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff); 1900 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
1901 1901 if (ohci->version < OHCI_VERSION_1_1) {
1902 fw_notify(" Isochronous I/O is not yet implemented for "
1903 "OHCI 1.0 chips.\n");
1904 fw_notify(" Cameras, audio devices etc. won't work on "
1905 "this controller with this driver version.\n");
1906 }
1902 return 0; 1907 return 0;
1903 1908
1904 fail_self_id: 1909 fail_self_id:
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index ff20377b4c82..e196aefa2070 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -935,11 +935,11 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
935 * than two possibly non-adjacent physical 4kB pages. 935 * than two possibly non-adjacent physical 4kB pages.
936 */ 936 */
937 /* group sequential buffers into one large buffer */ 937 /* group sequential buffers into one large buffer */
938 addr = page_to_phys(sg->page) + sg->offset; 938 addr = sg_phys(sg);
939 size = sg_dma_len(sg); 939 size = sg_dma_len(sg);
940 while (--i) { 940 while (--i) {
941 sg = sg_next(sg); 941 sg = sg_next(sg);
942 if ((addr + size) != page_to_phys(sg->page) + sg->offset) 942 if ((addr + size) != sg_phys(sg))
943 break; 943 break;
944 size += sg_dma_len(sg); 944 size += sg_dma_len(sg);
945 } 945 }
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index d5146c57e5b3..6a6f2e066b46 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -47,6 +47,7 @@
47#include <linux/spinlock.h> 47#include <linux/spinlock.h>
48#include <linux/kmod.h> 48#include <linux/kmod.h>
49#include <linux/pci.h> 49#include <linux/pci.h>
50#include <linux/scatterlist.h>
50 51
51#include <asm/byteorder.h> 52#include <asm/byteorder.h>
52#include <asm/irq.h> 53#include <asm/irq.h>
@@ -1317,12 +1318,14 @@ static int hwif_init(ide_hwif_t *hwif)
1317 if (!hwif->sg_max_nents) 1318 if (!hwif->sg_max_nents)
1318 hwif->sg_max_nents = PRD_ENTRIES; 1319 hwif->sg_max_nents = PRD_ENTRIES;
1319 1320
1320 hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents, 1321 hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
1321 GFP_KERNEL); 1322 GFP_KERNEL);
1322 if (!hwif->sg_table) { 1323 if (!hwif->sg_table) {
1323 printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name); 1324 printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
1324 goto out; 1325 goto out;
1325 } 1326 }
1327
1328 sg_init_table(hwif->sg_table, hwif->sg_max_nents);
1326 1329
1327 if (init_irq(hwif) == 0) 1330 if (init_irq(hwif) == 0)
1328 goto done; 1331 goto done;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 73ef6bf5fbcc..d066546f2831 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -261,7 +261,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
261 hwif->cursg = sg; 261 hwif->cursg = sg;
262 } 262 }
263 263
264 page = cursg->page; 264 page = sg_page(cursg);
265 offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE; 265 offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
266 266
267 /* get the current page and offset */ 267 /* get the current page and offset */
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 1de58566e5b6..a4ce3ba15d61 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -276,8 +276,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
276 276
277 if (iswrite) { 277 if (iswrite) {
278 if(!put_source_flags(ahwif->tx_chan, 278 if(!put_source_flags(ahwif->tx_chan,
279 (void*)(page_address(sg->page) 279 (void*) sg_virt(sg),
280 + sg->offset),
281 tc, flags)) { 280 tc, flags)) {
282 printk(KERN_ERR "%s failed %d\n", 281 printk(KERN_ERR "%s failed %d\n",
283 __FUNCTION__, __LINE__); 282 __FUNCTION__, __LINE__);
@@ -285,8 +284,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
285 } else 284 } else
286 { 285 {
287 if(!put_dest_flags(ahwif->rx_chan, 286 if(!put_dest_flags(ahwif->rx_chan,
288 (void*)(page_address(sg->page) 287 (void*) sg_virt(sg),
289 + sg->offset),
290 tc, flags)) { 288 tc, flags)) {
291 printk(KERN_ERR "%s failed %d\n", 289 printk(KERN_ERR "%s failed %d\n",
292 __FUNCTION__, __LINE__); 290 __FUNCTION__, __LINE__);
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index 45d605581922..25e113b50d86 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -111,7 +111,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
111 unsigned long va = 111 unsigned long va =
112 (unsigned long)dma->kvirt + (i << PAGE_SHIFT); 112 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
113 113
114 dma->sglist[i].page = vmalloc_to_page((void *)va); 114 sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
115 dma->sglist[i].length = PAGE_SIZE; 115 dma->sglist[i].length = PAGE_SIZE;
116 } 116 }
117 117
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 1b353b964b33..d5dfe11aa5c6 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1466,7 +1466,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1466 cmd->dma_size = sgpnt[0].length; 1466 cmd->dma_size = sgpnt[0].length;
1467 cmd->dma_type = CMD_DMA_PAGE; 1467 cmd->dma_type = CMD_DMA_PAGE;
1468 cmd->cmd_dma = dma_map_page(hi->host->device.parent, 1468 cmd->cmd_dma = dma_map_page(hi->host->device.parent,
1469 sgpnt[0].page, sgpnt[0].offset, 1469 sg_page(&sgpnt[0]), sgpnt[0].offset,
1470 cmd->dma_size, cmd->dma_dir); 1470 cmd->dma_size, cmd->dma_dir);
1471 1471
1472 orb->data_descriptor_lo = cmd->cmd_dma; 1472 orb->data_descriptor_lo = cmd->cmd_dma;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2f54e29dc7a6..14159ff29408 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
55 ib_dma_unmap_sg(dev, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 struct page *page = sg_page(&chunk->page_list[i]);
59
58 if (umem->writable && dirty) 60 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 61 set_page_dirty_lock(page);
60 put_page(chunk->page_list[i].page); 62 put_page(page);
61 } 63 }
62 64
63 kfree(chunk); 65 kfree(chunk);
@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
164 } 166 }
165 167
166 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 168 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
169 sg_init_table(chunk->page_list, chunk->nents);
167 for (i = 0; i < chunk->nents; ++i) { 170 for (i = 0; i < chunk->nents; ++i) {
168 if (vma_list && 171 if (vma_list &&
169 !is_vm_hugetlb_page(vma_list[i + off])) 172 !is_vm_hugetlb_page(vma_list[i + off]))
170 umem->hugetlb = 0; 173 umem->hugetlb = 0;
171 chunk->page_list[i].page = page_list[i + off]; 174 sg_set_page(&chunk->page_list[i], page_list[i + off]);
172 chunk->page_list[i].offset = 0; 175 chunk->page_list[i].offset = 0;
173 chunk->page_list[i].length = PAGE_SIZE; 176 chunk->page_list[i].length = PAGE_SIZE;
174 } 177 }
@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
179 DMA_BIDIRECTIONAL); 182 DMA_BIDIRECTIONAL);
180 if (chunk->nmap <= 0) { 183 if (chunk->nmap <= 0) {
181 for (i = 0; i < chunk->nents; ++i) 184 for (i = 0; i < chunk->nents; ++i)
182 put_page(chunk->page_list[i].page); 185 put_page(sg_page(&chunk->page_list[i]));
183 kfree(chunk); 186 kfree(chunk);
184 187
185 ret = -ENOMEM; 188 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index da88738265ed..ead7230d7738 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1776,7 +1776,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1776 list_for_each_entry_continue( 1776 list_for_each_entry_continue(
1777 chunk, (&(pginfo->u.usr.region->chunk_list)), list) { 1777 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1778 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { 1778 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1779 pgaddr = page_to_pfn(chunk->page_list[i].page) 1779 pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
1780 << PAGE_SHIFT ; 1780 << PAGE_SHIFT ;
1781 *kpage = phys_to_abs(pgaddr + 1781 *kpage = phys_to_abs(pgaddr +
1782 (pginfo->next_hwpage * 1782 (pginfo->next_hwpage *
@@ -1832,7 +1832,7 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1832{ 1832{
1833 int t; 1833 int t;
1834 for (t = start_idx; t <= end_idx; t++) { 1834 for (t = start_idx; t <= end_idx; t++) {
1835 u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT; 1835 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1836 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1836 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
1837 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1837 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1838 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1838 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
@@ -1867,7 +1867,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1867 chunk, (&(pginfo->u.usr.region->chunk_list)), list) { 1867 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1868 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { 1868 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1869 if (nr_kpages == kpages_per_hwpage) { 1869 if (nr_kpages == kpages_per_hwpage) {
1870 pgaddr = ( page_to_pfn(chunk->page_list[i].page) 1870 pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
1871 << PAGE_SHIFT ); 1871 << PAGE_SHIFT );
1872 *kpage = phys_to_abs(pgaddr); 1872 *kpage = phys_to_abs(pgaddr);
1873 if ( !(*kpage) ) { 1873 if ( !(*kpage) ) {
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index 22709a4f8fc8..e90a0ea538a0 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
108 BUG_ON(!valid_dma_direction(direction)); 108 BUG_ON(!valid_dma_direction(direction));
109 109
110 for_each_sg(sgl, sg, nents, i) { 110 for_each_sg(sgl, sg, nents, i) {
111 addr = (u64) page_address(sg->page); 111 addr = (u64) page_address(sg_page(sg));
112 /* TODO: handle highmem pages */ 112 /* TODO: handle highmem pages */
113 if (!addr) { 113 if (!addr) {
114 ret = 0; 114 ret = 0;
@@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
127 127
128static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) 128static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
129{ 129{
130 u64 addr = (u64) page_address(sg->page); 130 u64 addr = (u64) page_address(sg_page(sg));
131 131
132 if (addr) 132 if (addr)
133 addr += sg->offset; 133 addr += sg->offset;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index e442470a2375..db4ba92f79fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
225 for (i = 0; i < chunk->nents; i++) { 225 for (i = 0; i < chunk->nents; i++) {
226 void *vaddr; 226 void *vaddr;
227 227
228 vaddr = page_address(chunk->page_list[i].page); 228 vaddr = page_address(sg_page(&chunk->page_list[i]));
229 if (!vaddr) { 229 if (!vaddr) {
230 ret = ERR_PTR(-EINVAL); 230 ret = ERR_PTR(-EINVAL);
231 goto bail; 231 goto bail;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index e61f3e626980..007b38157fc4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
71 PCI_DMA_BIDIRECTIONAL); 71 PCI_DMA_BIDIRECTIONAL);
72 72
73 for (i = 0; i < chunk->npages; ++i) 73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(chunk->mem[i].page, 74 __free_pages(sg_page(&chunk->mem[i]),
75 get_order(chunk->mem[i].length)); 75 get_order(chunk->mem[i].length));
76} 76}
77 77
@@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
81 81
82 for (i = 0; i < chunk->npages; ++i) { 82 for (i = 0; i < chunk->npages; ++i) {
83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
84 lowmem_page_address(chunk->mem[i].page), 84 lowmem_page_address(sg_page(&chunk->mem[i])),
85 sg_dma_address(&chunk->mem[i])); 85 sg_dma_address(&chunk->mem[i]));
86 } 86 }
87} 87}
@@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
107 107
108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
109{ 109{
110 mem->page = alloc_pages(gfp_mask, order); 110 struct page *page;
111 if (!mem->page) 111
112 page = alloc_pages(gfp_mask, order);
113 if (!page)
112 return -ENOMEM; 114 return -ENOMEM;
113 115
116 sg_set_page(mem, page);
114 mem->length = PAGE_SIZE << order; 117 mem->length = PAGE_SIZE << order;
115 mem->offset = 0; 118 mem->offset = 0;
116 return 0; 119 return 0;
@@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
157 if (!chunk) 160 if (!chunk)
158 goto fail; 161 goto fail;
159 162
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
160 chunk->npages = 0; 164 chunk->npages = 0;
161 chunk->nsg = 0; 165 chunk->nsg = 0;
162 list_add_tail(&chunk->list, &icm->chunk_list); 166 list_add_tail(&chunk->list, &icm->chunk_list);
@@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
304 * so if we found the page, dma_handle has already 308 * so if we found the page, dma_handle has already
305 * been assigned to. */ 309 * been assigned to. */
306 if (chunk->mem[i].length > offset) { 310 if (chunk->mem[i].length > offset) {
307 page = chunk->mem[i].page; 311 page = sg_page(&chunk->mem[i]);
308 goto out; 312 goto out;
309 } 313 }
310 offset -= chunk->mem[i].length; 314 offset -= chunk->mem[i].length;
@@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
445int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 449int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
446 struct mthca_user_db_table *db_tab, int index, u64 uaddr) 450 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
447{ 451{
452 struct page *pages[1];
448 int ret = 0; 453 int ret = 0;
449 u8 status; 454 u8 status;
450 int i; 455 int i;
@@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
472 } 477 }
473 478
474 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 479 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
475 &db_tab->page[i].mem.page, NULL); 480 pages, NULL);
476 if (ret < 0) 481 if (ret < 0)
477 goto out; 482 goto out;
478 483
484 sg_set_page(&db_tab->page[i].mem, pages[0]);
479 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; 485 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
480 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; 486 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
481 487
482 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 488 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
483 if (ret < 0) { 489 if (ret < 0) {
484 put_page(db_tab->page[i].mem.page); 490 put_page(pages[0]);
485 goto out; 491 goto out;
486 } 492 }
487 493
@@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
491 ret = -EINVAL; 497 ret = -EINVAL;
492 if (ret) { 498 if (ret) {
493 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 499 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
494 put_page(db_tab->page[i].mem.page); 500 put_page(sg_page(&db_tab->page[i].mem));
495 goto out; 501 goto out;
496 } 502 }
497 503
@@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
557 if (db_tab->page[i].uvirt) { 563 if (db_tab->page[i].uvirt) {
558 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 564 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
559 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 565 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
560 put_page(db_tab->page[i].mem.page); 566 put_page(sg_page(&db_tab->page[i].mem));
561 } 567 }
562 } 568 }
563 569
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index f3529b6f0a33..d68798061795 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -131,7 +131,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
131 131
132 p = mem; 132 p = mem;
133 for_each_sg(sgl, sg, data->size, i) { 133 for_each_sg(sgl, sg, data->size, i) {
134 from = kmap_atomic(sg->page, KM_USER0); 134 from = kmap_atomic(sg_page(sg), KM_USER0);
135 memcpy(p, 135 memcpy(p,
136 from + sg->offset, 136 from + sg->offset,
137 sg->length); 137 sg->length);
@@ -191,7 +191,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
191 191
192 p = mem; 192 p = mem;
193 for_each_sg(sgl, sg, sg_size, i) { 193 for_each_sg(sgl, sg, sg_size, i) {
194 to = kmap_atomic(sg->page, KM_SOFTIRQ0); 194 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
195 memcpy(to + sg->offset, 195 memcpy(to + sg->offset,
196 p, 196 p,
197 sg->length); 197 sg->length);
@@ -300,7 +300,7 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
300 for_each_sg(sgl, sg, data->dma_nents, i) { 300 for_each_sg(sgl, sg, data->dma_nents, i) {
301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
302 "offset: %ld sz: %ld\n", i, 302 "offset: %ld sz: %ld\n", i,
303 (unsigned long)page_to_phys(sg->page), 303 (unsigned long)sg_phys(sg),
304 (unsigned long)sg->offset, 304 (unsigned long)sg->offset,
305 (unsigned long)sg->length); */ 305 (unsigned long)sg->length); */
306 end_addr = ib_sg_dma_address(ibdev, sg) + 306 end_addr = ib_sg_dma_address(ibdev, sg) +
@@ -336,7 +336,7 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
337 "off:0x%x sz:0x%x dma_len:0x%x\n", 337 "off:0x%x sz:0x%x dma_len:0x%x\n",
338 i, (unsigned long)ib_sg_dma_address(ibdev, sg), 338 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
339 sg->page, sg->offset, 339 sg_page(sg), sg->offset,
340 sg->length, ib_sg_dma_len(ibdev, sg)); 340 sg->length, ib_sg_dma_len(ibdev, sg));
341} 341}
342 342
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index a67b29b089ef..e5f4da928340 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -256,7 +256,6 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev)
256 printk(KERN_ERR DRV_NAME 256 printk(KERN_ERR DRV_NAME
257 ": unable to claim irq %d; error %d\n", 257 ": unable to claim irq %d; error %d\n",
258 bf54x_kpad->irq, error); 258 bf54x_kpad->irq, error);
259 error = -EBUSY;
260 goto out2; 259 goto out2;
261 } 260 }
262 261
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 0117817bf538..f132702d137d 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -504,25 +504,22 @@ static void atp_complete(struct urb* urb)
504 memset(dev->xy_acc, 0, sizeof(dev->xy_acc)); 504 memset(dev->xy_acc, 0, sizeof(dev->xy_acc));
505 } 505 }
506 506
507 /* Geyser 3 will continue to send packets continually after 507 input_report_key(dev->input, BTN_LEFT, key);
508 input_sync(dev->input);
509
510 /* Many Geysers will continue to send packets continually after
508 the first touch unless reinitialised. Do so if it's been 511 the first touch unless reinitialised. Do so if it's been
509 idle for a while in order to avoid waking the kernel up 512 idle for a while in order to avoid waking the kernel up
510 several hundred times a second */ 513 several hundred times a second */
511 514
512 if (atp_is_geyser_3(dev)) { 515 if (!x && !y && !key) {
513 if (!x && !y && !key) { 516 dev->idlecount++;
514 dev->idlecount++; 517 if (dev->idlecount == 10) {
515 if (dev->idlecount == 10) { 518 dev->valid = 0;
516 dev->valid = 0; 519 schedule_work(&dev->work);
517 schedule_work(&dev->work);
518 }
519 } 520 }
520 else 521 } else
521 dev->idlecount = 0; 522 dev->idlecount = 0;
522 }
523
524 input_report_key(dev->input, BTN_LEFT, key);
525 input_sync(dev->input);
526 523
527exit: 524exit:
528 retval = usb_submit_urb(dev->urb, GFP_ATOMIC); 525 retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 11dafc0ee994..1a0cea3c5294 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -20,6 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/i8042.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25 26
@@ -208,7 +209,7 @@ static int __i8042_command(unsigned char *param, int command)
208 return 0; 209 return 0;
209} 210}
210 211
211static int i8042_command(unsigned char *param, int command) 212int i8042_command(unsigned char *param, int command)
212{ 213{
213 unsigned long flags; 214 unsigned long flags;
214 int retval; 215 int retval;
@@ -219,6 +220,7 @@ static int i8042_command(unsigned char *param, int command)
219 220
220 return retval; 221 return retval;
221} 222}
223EXPORT_SYMBOL(i8042_command);
222 224
223/* 225/*
224 * i8042_kbd_write() sends a byte out through the keyboard interface. 226 * i8042_kbd_write() sends a byte out through the keyboard interface.
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index b3eb7a72d961..dd22d91f8b39 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -61,28 +61,6 @@
61#define I8042_CTR_XLATE 0x40 61#define I8042_CTR_XLATE 0x40
62 62
63/* 63/*
64 * Commands.
65 */
66
67#define I8042_CMD_CTL_RCTR 0x0120
68#define I8042_CMD_CTL_WCTR 0x1060
69#define I8042_CMD_CTL_TEST 0x01aa
70
71#define I8042_CMD_KBD_DISABLE 0x00ad
72#define I8042_CMD_KBD_ENABLE 0x00ae
73#define I8042_CMD_KBD_TEST 0x01ab
74#define I8042_CMD_KBD_LOOP 0x11d2
75
76#define I8042_CMD_AUX_DISABLE 0x00a7
77#define I8042_CMD_AUX_ENABLE 0x00a8
78#define I8042_CMD_AUX_TEST 0x01a9
79#define I8042_CMD_AUX_SEND 0x10d4
80#define I8042_CMD_AUX_LOOP 0x11d3
81
82#define I8042_CMD_MUX_PFX 0x0090
83#define I8042_CMD_MUX_SEND 0x1090
84
85/*
86 * Return codes. 64 * Return codes.
87 */ 65 */
88 66
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e3e0baa1a158..fa8442b6241c 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -202,6 +202,7 @@ config TOUCHSCREEN_USB_COMPOSITE
202 - DMC TSC-10/25 202 - DMC TSC-10/25
203 - IRTOUCHSYSTEMS/UNITOP 203 - IRTOUCHSYSTEMS/UNITOP
204 - IdealTEK URTC1000 204 - IdealTEK URTC1000
205 - GoTop Super_Q2/GogoPen/PenPower tablets
205 206
206 Have a look at <http://linux.chapter7.ch/touchkit/> for 207 Have a look at <http://linux.chapter7.ch/touchkit/> for
207 a usage description and the required user-space stuff. 208 a usage description and the required user-space stuff.
@@ -259,4 +260,9 @@ config TOUCHSCREEN_USB_GENERAL_TOUCH
259 bool "GeneralTouch Touchscreen device support" if EMBEDDED 260 bool "GeneralTouch Touchscreen device support" if EMBEDDED
260 depends on TOUCHSCREEN_USB_COMPOSITE 261 depends on TOUCHSCREEN_USB_COMPOSITE
261 262
263config TOUCHSCREEN_USB_GOTOP
264 default y
265 bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED
266 depends on TOUCHSCREEN_USB_COMPOSITE
267
262endif 268endif
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 5f34b78d5ddb..19055e7381f8 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -11,8 +11,9 @@
11 * - DMC TSC-10/25 11 * - DMC TSC-10/25
12 * - IRTOUCHSYSTEMS/UNITOP 12 * - IRTOUCHSYSTEMS/UNITOP
13 * - IdealTEK URTC1000 13 * - IdealTEK URTC1000
14 * - GoTop Super_Q2/GogoPen/PenPower tablets
14 * 15 *
15 * Copyright (C) 2004-2006 by Daniel Ritz <daniel.ritz@gmx.ch> 16 * Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch>
16 * Copyright (C) by Todd E. Johnson (mtouchusb.c) 17 * Copyright (C) by Todd E. Johnson (mtouchusb.c)
17 * 18 *
18 * This program is free software; you can redistribute it and/or 19 * This program is free software; you can redistribute it and/or
@@ -115,6 +116,7 @@ enum {
115 DEVTYPE_IRTOUCH, 116 DEVTYPE_IRTOUCH,
116 DEVTYPE_IDEALTEK, 117 DEVTYPE_IDEALTEK,
117 DEVTYPE_GENERAL_TOUCH, 118 DEVTYPE_GENERAL_TOUCH,
119 DEVTYPE_GOTOP,
118}; 120};
119 121
120static struct usb_device_id usbtouch_devices[] = { 122static struct usb_device_id usbtouch_devices[] = {
@@ -168,6 +170,12 @@ static struct usb_device_id usbtouch_devices[] = {
168 {USB_DEVICE(0x0dfc, 0x0001), .driver_info = DEVTYPE_GENERAL_TOUCH}, 170 {USB_DEVICE(0x0dfc, 0x0001), .driver_info = DEVTYPE_GENERAL_TOUCH},
169#endif 171#endif
170 172
173#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
174 {USB_DEVICE(0x08f2, 0x007f), .driver_info = DEVTYPE_GOTOP},
175 {USB_DEVICE(0x08f2, 0x00ce), .driver_info = DEVTYPE_GOTOP},
176 {USB_DEVICE(0x08f2, 0x00f4), .driver_info = DEVTYPE_GOTOP},
177#endif
178
171 {} 179 {}
172}; 180};
173 181
@@ -501,6 +509,20 @@ static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
501#endif 509#endif
502 510
503/***************************************************************************** 511/*****************************************************************************
512 * GoTop Part
513 */
514#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
515static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
516{
517 dev->x = ((pkt[1] & 0x38) << 4) | pkt[2];
518 dev->y = ((pkt[1] & 0x07) << 7) | pkt[3];
519 dev->touch = pkt[0] & 0x01;
520 return 1;
521}
522#endif
523
524
525/*****************************************************************************
504 * the different device descriptors 526 * the different device descriptors
505 */ 527 */
506static struct usbtouch_device_info usbtouch_dev_info[] = { 528static struct usbtouch_device_info usbtouch_dev_info[] = {
@@ -623,9 +645,19 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
623 .max_yc = 0x0500, 645 .max_yc = 0x0500,
624 .rept_size = 7, 646 .rept_size = 7,
625 .read_data = general_touch_read_data, 647 .read_data = general_touch_read_data,
626 } 648 },
627#endif 649#endif
628 650
651#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
652 [DEVTYPE_GOTOP] = {
653 .min_xc = 0x0,
654 .max_xc = 0x03ff,
655 .min_yc = 0x0,
656 .max_yc = 0x03ff,
657 .rept_size = 4,
658 .read_data = gotop_read_data,
659 },
660#endif
629}; 661};
630 662
631 663
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index af2d288c881d..07ae280e8fe5 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -198,21 +198,15 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
198 198
199static void ack_flush(void *_completed) 199static void ack_flush(void *_completed)
200{ 200{
201 atomic_t *completed = _completed;
202
203 atomic_inc(completed);
204} 201}
205 202
206void kvm_flush_remote_tlbs(struct kvm *kvm) 203void kvm_flush_remote_tlbs(struct kvm *kvm)
207{ 204{
208 int i, cpu, needed; 205 int i, cpu;
209 cpumask_t cpus; 206 cpumask_t cpus;
210 struct kvm_vcpu *vcpu; 207 struct kvm_vcpu *vcpu;
211 atomic_t completed;
212 208
213 atomic_set(&completed, 0);
214 cpus_clear(cpus); 209 cpus_clear(cpus);
215 needed = 0;
216 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 210 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
217 vcpu = kvm->vcpus[i]; 211 vcpu = kvm->vcpus[i];
218 if (!vcpu) 212 if (!vcpu)
@@ -221,23 +215,9 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
221 continue; 215 continue;
222 cpu = vcpu->cpu; 216 cpu = vcpu->cpu;
223 if (cpu != -1 && cpu != raw_smp_processor_id()) 217 if (cpu != -1 && cpu != raw_smp_processor_id())
224 if (!cpu_isset(cpu, cpus)) { 218 cpu_set(cpu, cpus);
225 cpu_set(cpu, cpus);
226 ++needed;
227 }
228 }
229
230 /*
231 * We really want smp_call_function_mask() here. But that's not
232 * available, so ipi all cpus in parallel and wait for them
233 * to complete.
234 */
235 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
236 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
237 while (atomic_read(&completed) != needed) {
238 cpu_relax();
239 barrier();
240 } 219 }
220 smp_call_function_mask(cpus, ack_flush, NULL, 1);
241} 221}
242 222
243int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 223int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
@@ -2054,12 +2034,21 @@ again:
2054 2034
2055 kvm_x86_ops->run(vcpu, kvm_run); 2035 kvm_x86_ops->run(vcpu, kvm_run);
2056 2036
2057 kvm_guest_exit();
2058 vcpu->guest_mode = 0; 2037 vcpu->guest_mode = 0;
2059 local_irq_enable(); 2038 local_irq_enable();
2060 2039
2061 ++vcpu->stat.exits; 2040 ++vcpu->stat.exits;
2062 2041
2042 /*
2043 * We must have an instruction between local_irq_enable() and
2044 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2045 * the interrupt shadow. The stat.exits increment will do nicely.
2046 * But we need to prevent reordering, hence this barrier():
2047 */
2048 barrier();
2049
2050 kvm_guest_exit();
2051
2063 preempt_enable(); 2052 preempt_enable();
2064 2053
2065 /* 2054 /*
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index a190587cf6a5..238fcad3cece 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -494,12 +494,19 @@ static void apic_send_ipi(struct kvm_lapic *apic)
494 494
495static u32 apic_get_tmcct(struct kvm_lapic *apic) 495static u32 apic_get_tmcct(struct kvm_lapic *apic)
496{ 496{
497 u32 counter_passed; 497 u64 counter_passed;
498 ktime_t passed, now = apic->timer.dev.base->get_time(); 498 ktime_t passed, now;
499 u32 tmcct = apic_get_reg(apic, APIC_TMICT); 499 u32 tmcct;
500 500
501 ASSERT(apic != NULL); 501 ASSERT(apic != NULL);
502 502
503 now = apic->timer.dev.base->get_time();
504 tmcct = apic_get_reg(apic, APIC_TMICT);
505
506 /* if initial count is 0, current count should also be 0 */
507 if (tmcct == 0)
508 return 0;
509
503 if (unlikely(ktime_to_ns(now) <= 510 if (unlikely(ktime_to_ns(now) <=
504 ktime_to_ns(apic->timer.last_update))) { 511 ktime_to_ns(apic->timer.last_update))) {
505 /* Wrap around */ 512 /* Wrap around */
@@ -514,15 +521,24 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
514 521
515 counter_passed = div64_64(ktime_to_ns(passed), 522 counter_passed = div64_64(ktime_to_ns(passed),
516 (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); 523 (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
517 tmcct -= counter_passed;
518 524
519 if (tmcct <= 0) { 525 if (counter_passed > tmcct) {
520 if (unlikely(!apic_lvtt_period(apic))) 526 if (unlikely(!apic_lvtt_period(apic))) {
527 /* one-shot timers stick at 0 until reset */
521 tmcct = 0; 528 tmcct = 0;
522 else 529 } else {
523 do { 530 /*
524 tmcct += apic_get_reg(apic, APIC_TMICT); 531 * periodic timers reset to APIC_TMICT when they
525 } while (tmcct <= 0); 532 * hit 0. The while loop simulates this happening N
533 * times. (counter_passed %= tmcct) would also work,
534 * but might be slower or not work on 32-bit??
535 */
536 while (counter_passed > tmcct)
537 counter_passed -= tmcct;
538 tmcct -= counter_passed;
539 }
540 } else {
541 tmcct -= counter_passed;
526 } 542 }
527 543
528 return tmcct; 544 return tmcct;
@@ -853,7 +869,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
853 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 869 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
854 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 870 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
855 } 871 }
856 apic->timer.divide_count = 0; 872 update_divide_count(apic);
857 atomic_set(&apic->timer.pending, 0); 873 atomic_set(&apic->timer.pending, 0);
858 if (vcpu->vcpu_id == 0) 874 if (vcpu->vcpu_id == 0)
859 vcpu->apic_base |= MSR_IA32_APICBASE_BSP; 875 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 6d84d30f5ed0..feb5ac986c5d 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -1049,6 +1049,7 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1049 destroy_kvm_mmu(vcpu); 1049 destroy_kvm_mmu(vcpu);
1050 return init_kvm_mmu(vcpu); 1050 return init_kvm_mmu(vcpu);
1051} 1051}
1052EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1052 1053
1053int kvm_mmu_load(struct kvm_vcpu *vcpu) 1054int kvm_mmu_load(struct kvm_vcpu *vcpu)
1054{ 1055{
@@ -1088,7 +1089,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1088 mmu_page_remove_parent_pte(child, spte); 1089 mmu_page_remove_parent_pte(child, spte);
1089 } 1090 }
1090 } 1091 }
1091 *spte = 0; 1092 set_shadow_pte(spte, 0);
1092 kvm_flush_remote_tlbs(vcpu->kvm); 1093 kvm_flush_remote_tlbs(vcpu->kvm);
1093} 1094}
1094 1095
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 4f115a8e45ef..bb56ae3f89b6 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -523,6 +523,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
523 523
524static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 524static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
525{ 525{
526 if (vcpu->rmode.active)
527 rflags |= IOPL_MASK | X86_EFLAGS_VM;
526 vmcs_writel(GUEST_RFLAGS, rflags); 528 vmcs_writel(GUEST_RFLAGS, rflags);
527} 529}
528 530
@@ -1128,6 +1130,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1128 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs); 1130 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
1129 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); 1131 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
1130 1132
1133 kvm_mmu_reset_context(vcpu);
1131 init_rmode_tss(vcpu->kvm); 1134 init_rmode_tss(vcpu->kvm);
1132} 1135}
1133 1136
@@ -1760,10 +1763,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1760 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); 1763 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1761 } 1764 }
1762 1765
1763 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */ 1766 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
1764 asm ("int $2"); 1767 return 1; /* already handled by vmx_vcpu_run() */
1765 return 1;
1766 }
1767 1768
1768 if (is_no_device(intr_info)) { 1769 if (is_no_device(intr_info)) {
1769 vmx_fpu_activate(vcpu); 1770 vmx_fpu_activate(vcpu);
@@ -2196,6 +2197,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2196static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2197static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2197{ 2198{
2198 struct vcpu_vmx *vmx = to_vmx(vcpu); 2199 struct vcpu_vmx *vmx = to_vmx(vcpu);
2200 u32 intr_info;
2199 2201
2200 /* 2202 /*
2201 * Loading guest fpu may have cleared host cr0.ts 2203 * Loading guest fpu may have cleared host cr0.ts
@@ -2322,6 +2324,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2322 2324
2323 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2325 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2324 vmx->launched = 1; 2326 vmx->launched = 1;
2327
2328 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2329
2330 /* We need to handle NMIs before interrupts are enabled */
2331 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
2332 asm("int $2");
2325} 2333}
2326 2334
2327static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, 2335static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 9737c3b2f48c..a6ace302e0cd 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -212,7 +212,8 @@ static u16 twobyte_table[256] = {
212 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, 212 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
213 DstReg | SrcMem16 | ModRM | Mov, 213 DstReg | SrcMem16 | ModRM | Mov,
214 /* 0xC0 - 0xCF */ 214 /* 0xC0 - 0xCF */
215 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 0, 215 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
216 0, 0, 0, 0, 0, 0, 0, 0,
216 /* 0xD0 - 0xDF */ 217 /* 0xD0 - 0xDF */
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
218 /* 0xE0 - 0xEF */ 219 /* 0xE0 - 0xEF */
@@ -596,11 +597,10 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
596 case 0xf0: /* LOCK */ 597 case 0xf0: /* LOCK */
597 lock_prefix = 1; 598 lock_prefix = 1;
598 break; 599 break;
600 case 0xf2: /* REPNE/REPNZ */
599 case 0xf3: /* REP/REPE/REPZ */ 601 case 0xf3: /* REP/REPE/REPZ */
600 rep_prefix = 1; 602 rep_prefix = 1;
601 break; 603 break;
602 case 0xf2: /* REPNE/REPNZ */
603 break;
604 default: 604 default:
605 goto done_prefixes; 605 goto done_prefixes;
606 } 606 }
@@ -825,6 +825,14 @@ done_prefixes:
825 if (twobyte && b == 0x01 && modrm_reg == 7) 825 if (twobyte && b == 0x01 && modrm_reg == 7)
826 break; 826 break;
827 srcmem_common: 827 srcmem_common:
828 /*
829 * For instructions with a ModR/M byte, switch to register
830 * access if Mod = 3.
831 */
832 if ((d & ModRM) && modrm_mod == 3) {
833 src.type = OP_REG;
834 break;
835 }
828 src.type = OP_MEM; 836 src.type = OP_MEM;
829 src.ptr = (unsigned long *)cr2; 837 src.ptr = (unsigned long *)cr2;
830 src.val = 0; 838 src.val = 0;
@@ -893,6 +901,14 @@ done_prefixes:
893 dst.ptr = (unsigned long *)cr2; 901 dst.ptr = (unsigned long *)cr2;
894 dst.bytes = (d & ByteOp) ? 1 : op_bytes; 902 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
895 dst.val = 0; 903 dst.val = 0;
904 /*
905 * For instructions with a ModR/M byte, switch to register
906 * access if Mod = 3.
907 */
908 if ((d & ModRM) && modrm_mod == 3) {
909 dst.type = OP_REG;
910 break;
911 }
896 if (d & BitOp) { 912 if (d & BitOp) {
897 unsigned long mask = ~(dst.bytes * 8 - 1); 913 unsigned long mask = ~(dst.bytes * 8 - 1);
898 914
@@ -1083,31 +1099,6 @@ push:
1083 case 0xd2 ... 0xd3: /* Grp2 */ 1099 case 0xd2 ... 0xd3: /* Grp2 */
1084 src.val = _regs[VCPU_REGS_RCX]; 1100 src.val = _regs[VCPU_REGS_RCX];
1085 goto grp2; 1101 goto grp2;
1086 case 0xe8: /* call (near) */ {
1087 long int rel;
1088 switch (op_bytes) {
1089 case 2:
1090 rel = insn_fetch(s16, 2, _eip);
1091 break;
1092 case 4:
1093 rel = insn_fetch(s32, 4, _eip);
1094 break;
1095 case 8:
1096 rel = insn_fetch(s64, 8, _eip);
1097 break;
1098 default:
1099 DPRINTF("Call: Invalid op_bytes\n");
1100 goto cannot_emulate;
1101 }
1102 src.val = (unsigned long) _eip;
1103 JMP_REL(rel);
1104 goto push;
1105 }
1106 case 0xe9: /* jmp rel */
1107 case 0xeb: /* jmp rel short */
1108 JMP_REL(src.val);
1109 no_wb = 1; /* Disable writeback. */
1110 break;
1111 case 0xf6 ... 0xf7: /* Grp3 */ 1102 case 0xf6 ... 0xf7: /* Grp3 */
1112 switch (modrm_reg) { 1103 switch (modrm_reg) {
1113 case 0 ... 1: /* test */ 1104 case 0 ... 1: /* test */
@@ -1350,6 +1341,32 @@ special_insn:
1350 case 0xae ... 0xaf: /* scas */ 1341 case 0xae ... 0xaf: /* scas */
1351 DPRINTF("Urk! I don't handle SCAS.\n"); 1342 DPRINTF("Urk! I don't handle SCAS.\n");
1352 goto cannot_emulate; 1343 goto cannot_emulate;
1344 case 0xe8: /* call (near) */ {
1345 long int rel;
1346 switch (op_bytes) {
1347 case 2:
1348 rel = insn_fetch(s16, 2, _eip);
1349 break;
1350 case 4:
1351 rel = insn_fetch(s32, 4, _eip);
1352 break;
1353 case 8:
1354 rel = insn_fetch(s64, 8, _eip);
1355 break;
1356 default:
1357 DPRINTF("Call: Invalid op_bytes\n");
1358 goto cannot_emulate;
1359 }
1360 src.val = (unsigned long) _eip;
1361 JMP_REL(rel);
1362 goto push;
1363 }
1364 case 0xe9: /* jmp rel */
1365 case 0xeb: /* jmp rel short */
1366 JMP_REL(src.val);
1367 no_wb = 1; /* Disable writeback. */
1368 break;
1369
1353 1370
1354 } 1371 }
1355 goto writeback; 1372 goto writeback;
@@ -1501,6 +1518,10 @@ twobyte_insn:
1501 dst.bytes = op_bytes; 1518 dst.bytes = op_bytes;
1502 dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val; 1519 dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val;
1503 break; 1520 break;
1521 case 0xc3: /* movnti */
1522 dst.bytes = op_bytes;
1523 dst.val = (op_bytes == 4) ? (u32) src.val : (u64) src.val;
1524 break;
1504 } 1525 }
1505 goto writeback; 1526 goto writeback;
1506 1527
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 927cb34c4805..7c426d07a555 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -274,7 +274,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
274 if (bitmap->offset < 0) { 274 if (bitmap->offset < 0) {
275 /* DATA BITMAP METADATA */ 275 /* DATA BITMAP METADATA */
276 if (bitmap->offset 276 if (bitmap->offset
277 + page->index * (PAGE_SIZE/512) 277 + (long)(page->index * (PAGE_SIZE/512))
278 + size/512 > 0) 278 + size/512 > 0)
279 /* bitmap runs in to metadata */ 279 /* bitmap runs in to metadata */
280 return -EINVAL; 280 return -EINVAL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0eb5416798bd..ac54f697c508 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -348,16 +348,17 @@ static int crypt_convert(struct crypt_config *cc,
348 ctx->idx_out < ctx->bio_out->bi_vcnt) { 348 ctx->idx_out < ctx->bio_out->bi_vcnt) {
349 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 349 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
350 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 350 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
351 struct scatterlist sg_in = { 351 struct scatterlist sg_in, sg_out;
352 .page = bv_in->bv_page, 352
353 .offset = bv_in->bv_offset + ctx->offset_in, 353 sg_init_table(&sg_in, 1);
354 .length = 1 << SECTOR_SHIFT 354 sg_set_page(&sg_in, bv_in->bv_page);
355 }; 355 sg_in.offset = bv_in->bv_offset + ctx->offset_in;
356 struct scatterlist sg_out = { 356 sg_in.length = 1 << SECTOR_SHIFT;
357 .page = bv_out->bv_page, 357
358 .offset = bv_out->bv_offset + ctx->offset_out, 358 sg_init_table(&sg_out, 1);
359 .length = 1 << SECTOR_SHIFT 359 sg_set_page(&sg_out, bv_out->bv_page);
360 }; 360 sg_out.offset = bv_out->bv_offset + ctx->offset_out;
361 sg_out.length = 1 << SECTOR_SHIFT;
361 362
362 ctx->offset_in += sg_in.length; 363 ctx->offset_in += sg_in.length;
363 if (ctx->offset_in >= bv_in->bv_len) { 364 if (ctx->offset_in >= bv_in->bv_len) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ee181a01f52..80a67d789b72 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -376,7 +376,12 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
376 ack++; 376 ack++;
377 377
378 sh->ops.count -= ack; 378 sh->ops.count -= ack;
379 BUG_ON(sh->ops.count < 0); 379 if (unlikely(sh->ops.count < 0)) {
380 printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx "
381 "ops.complete: %#lx\n", pending, sh->ops.pending,
382 sh->ops.ack, sh->ops.complete);
383 BUG();
384 }
380 385
381 return pending; 386 return pending;
382} 387}
@@ -550,8 +555,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
550 } 555 }
551 } 556 }
552 } 557 }
553 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); 558 set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
554 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
555 559
556 return_io(return_bi); 560 return_io(return_bi);
557 561
@@ -2893,6 +2897,13 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2893 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2897 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2894 /* Now to look around and see what can be done */ 2898 /* Now to look around and see what can be done */
2895 2899
2900 /* clean-up completed biofill operations */
2901 if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) {
2902 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
2903 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
2904 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
2905 }
2906
2896 rcu_read_lock(); 2907 rcu_read_lock();
2897 for (i=disks; i--; ) { 2908 for (i=disks; i--; ) {
2898 mdk_rdev_t *rdev; 2909 mdk_rdev_t *rdev;
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index aefcf28da1ca..185e8a860c1a 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -1074,41 +1074,41 @@ EXPORT_SYMBOL_GPL(ir_codes_manli);
1074/* Mike Baikov <mike@baikov.com> */ 1074/* Mike Baikov <mike@baikov.com> */
1075IR_KEYTAB_TYPE ir_codes_gotview7135[IR_KEYTAB_SIZE] = { 1075IR_KEYTAB_TYPE ir_codes_gotview7135[IR_KEYTAB_SIZE] = {
1076 1076
1077 [ 0x21 ] = KEY_POWER, 1077 [ 0x11 ] = KEY_POWER,
1078 [ 0x69 ] = KEY_TV, 1078 [ 0x35 ] = KEY_TV,
1079 [ 0x33 ] = KEY_0, 1079 [ 0x1b ] = KEY_0,
1080 [ 0x51 ] = KEY_1, 1080 [ 0x29 ] = KEY_1,
1081 [ 0x31 ] = KEY_2, 1081 [ 0x19 ] = KEY_2,
1082 [ 0x71 ] = KEY_3, 1082 [ 0x39 ] = KEY_3,
1083 [ 0x3b ] = KEY_4, 1083 [ 0x1f ] = KEY_4,
1084 [ 0x58 ] = KEY_5, 1084 [ 0x2c ] = KEY_5,
1085 [ 0x41 ] = KEY_6, 1085 [ 0x21 ] = KEY_6,
1086 [ 0x48 ] = KEY_7, 1086 [ 0x24 ] = KEY_7,
1087 [ 0x30 ] = KEY_8, 1087 [ 0x18 ] = KEY_8,
1088 [ 0x53 ] = KEY_9, 1088 [ 0x2b ] = KEY_9,
1089 [ 0x73 ] = KEY_AGAIN, /* LOOP */ 1089 [ 0x3b ] = KEY_AGAIN, /* LOOP */
1090 [ 0x0a ] = KEY_AUDIO, 1090 [ 0x06 ] = KEY_AUDIO,
1091 [ 0x61 ] = KEY_PRINT, /* PREVIEW */ 1091 [ 0x31 ] = KEY_PRINT, /* PREVIEW */
1092 [ 0x7a ] = KEY_VIDEO, 1092 [ 0x3e ] = KEY_VIDEO,
1093 [ 0x20 ] = KEY_CHANNELUP, 1093 [ 0x10 ] = KEY_CHANNELUP,
1094 [ 0x40 ] = KEY_CHANNELDOWN, 1094 [ 0x20 ] = KEY_CHANNELDOWN,
1095 [ 0x18 ] = KEY_VOLUMEDOWN, 1095 [ 0x0c ] = KEY_VOLUMEDOWN,
1096 [ 0x50 ] = KEY_VOLUMEUP, 1096 [ 0x28 ] = KEY_VOLUMEUP,
1097 [ 0x10 ] = KEY_MUTE, 1097 [ 0x08 ] = KEY_MUTE,
1098 [ 0x4a ] = KEY_SEARCH, 1098 [ 0x26 ] = KEY_SEARCH, /*SCAN*/
1099 [ 0x7b ] = KEY_SHUFFLE, /* SNAPSHOT */ 1099 [ 0x3f ] = KEY_SHUFFLE, /* SNAPSHOT */
1100 [ 0x22 ] = KEY_RECORD, 1100 [ 0x12 ] = KEY_RECORD,
1101 [ 0x62 ] = KEY_STOP, 1101 [ 0x32 ] = KEY_STOP,
1102 [ 0x78 ] = KEY_PLAY, 1102 [ 0x3c ] = KEY_PLAY,
1103 [ 0x39 ] = KEY_REWIND, 1103 [ 0x1d ] = KEY_REWIND,
1104 [ 0x59 ] = KEY_PAUSE, 1104 [ 0x2d ] = KEY_PAUSE,
1105 [ 0x19 ] = KEY_FORWARD, 1105 [ 0x0d ] = KEY_FORWARD,
1106 [ 0x09 ] = KEY_ZOOM, 1106 [ 0x05 ] = KEY_ZOOM, /*FULL*/
1107 1107
1108 [ 0x52 ] = KEY_F21, /* LIVE TIMESHIFT */ 1108 [ 0x2a ] = KEY_F21, /* LIVE TIMESHIFT */
1109 [ 0x1a ] = KEY_F22, /* MIN TIMESHIFT */ 1109 [ 0x0e ] = KEY_F22, /* MIN TIMESHIFT */
1110 [ 0x3a ] = KEY_F23, /* TIMESHIFT */ 1110 [ 0x1e ] = KEY_F23, /* TIMESHIFT */
1111 [ 0x70 ] = KEY_F24, /* NORMAL TIMESHIFT */ 1111 [ 0x38 ] = KEY_F24, /* NORMAL TIMESHIFT */
1112}; 1112};
1113 1113
1114EXPORT_SYMBOL_GPL(ir_codes_gotview7135); 1114EXPORT_SYMBOL_GPL(ir_codes_gotview7135);
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 365a22118a09..2b1f8b4be00a 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -112,12 +112,13 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
112 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); 112 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
113 if (NULL == sglist) 113 if (NULL == sglist)
114 return NULL; 114 return NULL;
115 sg_init_table(sglist, nr_pages);
115 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { 116 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
116 pg = vmalloc_to_page(virt); 117 pg = vmalloc_to_page(virt);
117 if (NULL == pg) 118 if (NULL == pg)
118 goto err; 119 goto err;
119 BUG_ON(PageHighMem(pg)); 120 BUG_ON(PageHighMem(pg));
120 sglist[i].page = pg; 121 sg_set_page(&sglist[i], pg);
121 sglist[i].length = PAGE_SIZE; 122 sglist[i].length = PAGE_SIZE;
122 } 123 }
123 return sglist; 124 return sglist;
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index a05e5c182288..db08b0a8888a 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -345,7 +345,9 @@ static int cinergyt2_start_feed(struct dvb_demux_feed *dvbdmxfeed)
345 struct dvb_demux *demux = dvbdmxfeed->demux; 345 struct dvb_demux *demux = dvbdmxfeed->demux;
346 struct cinergyt2 *cinergyt2 = demux->priv; 346 struct cinergyt2 *cinergyt2 = demux->priv;
347 347
348 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem)) 348 if (cinergyt2->disconnect_pending)
349 return -EAGAIN;
350 if (mutex_lock_interruptible(&cinergyt2->sem))
349 return -ERESTARTSYS; 351 return -ERESTARTSYS;
350 352
351 if (cinergyt2->streaming == 0) 353 if (cinergyt2->streaming == 0)
@@ -361,7 +363,9 @@ static int cinergyt2_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
361 struct dvb_demux *demux = dvbdmxfeed->demux; 363 struct dvb_demux *demux = dvbdmxfeed->demux;
362 struct cinergyt2 *cinergyt2 = demux->priv; 364 struct cinergyt2 *cinergyt2 = demux->priv;
363 365
364 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem)) 366 if (cinergyt2->disconnect_pending)
367 return -EAGAIN;
368 if (mutex_lock_interruptible(&cinergyt2->sem))
365 return -ERESTARTSYS; 369 return -ERESTARTSYS;
366 370
367 if (--cinergyt2->streaming == 0) 371 if (--cinergyt2->streaming == 0)
@@ -481,12 +485,16 @@ static int cinergyt2_open (struct inode *inode, struct file *file)
481{ 485{
482 struct dvb_device *dvbdev = file->private_data; 486 struct dvb_device *dvbdev = file->private_data;
483 struct cinergyt2 *cinergyt2 = dvbdev->priv; 487 struct cinergyt2 *cinergyt2 = dvbdev->priv;
484 int err = -ERESTARTSYS; 488 int err = -EAGAIN;
485 489
486 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem)) 490 if (cinergyt2->disconnect_pending)
491 goto out;
492 err = mutex_lock_interruptible(&cinergyt2->wq_sem);
493 if (err)
487 goto out; 494 goto out;
488 495
489 if (mutex_lock_interruptible(&cinergyt2->sem)) 496 err = mutex_lock_interruptible(&cinergyt2->sem);
497 if (err)
490 goto out_unlock1; 498 goto out_unlock1;
491 499
492 if ((err = dvb_generic_open(inode, file))) 500 if ((err = dvb_generic_open(inode, file)))
@@ -550,7 +558,9 @@ static unsigned int cinergyt2_poll (struct file *file, struct poll_table_struct
550 struct cinergyt2 *cinergyt2 = dvbdev->priv; 558 struct cinergyt2 *cinergyt2 = dvbdev->priv;
551 unsigned int mask = 0; 559 unsigned int mask = 0;
552 560
553 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem)) 561 if (cinergyt2->disconnect_pending)
562 return -EAGAIN;
563 if (mutex_lock_interruptible(&cinergyt2->sem))
554 return -ERESTARTSYS; 564 return -ERESTARTSYS;
555 565
556 poll_wait(file, &cinergyt2->poll_wq, wait); 566 poll_wait(file, &cinergyt2->poll_wq, wait);
@@ -625,7 +635,9 @@ static int cinergyt2_ioctl (struct inode *inode, struct file *file,
625 if (copy_from_user(&p, (void __user*) arg, sizeof(p))) 635 if (copy_from_user(&p, (void __user*) arg, sizeof(p)))
626 return -EFAULT; 636 return -EFAULT;
627 637
628 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem)) 638 if (cinergyt2->disconnect_pending)
639 return -EAGAIN;
640 if (mutex_lock_interruptible(&cinergyt2->sem))
629 return -ERESTARTSYS; 641 return -ERESTARTSYS;
630 642
631 param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; 643 param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
@@ -996,7 +1008,9 @@ static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
996{ 1008{
997 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf); 1009 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
998 1010
999 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem)) 1011 if (cinergyt2->disconnect_pending)
1012 return -EAGAIN;
1013 if (mutex_lock_interruptible(&cinergyt2->wq_sem))
1000 return -ERESTARTSYS; 1014 return -ERESTARTSYS;
1001 1015
1002 cinergyt2_suspend_rc(cinergyt2); 1016 cinergyt2_suspend_rc(cinergyt2);
@@ -1017,16 +1031,18 @@ static int cinergyt2_resume (struct usb_interface *intf)
1017{ 1031{
1018 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf); 1032 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
1019 struct dvbt_set_parameters_msg *param = &cinergyt2->param; 1033 struct dvbt_set_parameters_msg *param = &cinergyt2->param;
1020 int err = -ERESTARTSYS; 1034 int err = -EAGAIN;
1021 1035
1022 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem)) 1036 if (cinergyt2->disconnect_pending)
1037 goto out;
1038 err = mutex_lock_interruptible(&cinergyt2->wq_sem);
1039 if (err)
1023 goto out; 1040 goto out;
1024 1041
1025 if (mutex_lock_interruptible(&cinergyt2->sem)) 1042 err = mutex_lock_interruptible(&cinergyt2->sem);
1043 if (err)
1026 goto out_unlock1; 1044 goto out_unlock1;
1027 1045
1028 err = 0;
1029
1030 if (!cinergyt2->sleeping) { 1046 if (!cinergyt2->sleeping) {
1031 cinergyt2_sleep(cinergyt2, 0); 1047 cinergyt2_sleep(cinergyt2, 0);
1032 cinergyt2_command(cinergyt2, (char *) param, sizeof(*param), NULL, 0); 1048 cinergyt2_command(cinergyt2, (char *) param, sizeof(*param), NULL, 0);
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 084a508a03da..89437fdab8be 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -972,7 +972,7 @@ static int dvb_ca_en50221_thread(void *data)
972 /* main loop */ 972 /* main loop */
973 while (!kthread_should_stop()) { 973 while (!kthread_should_stop()) {
974 /* sleep for a bit */ 974 /* sleep for a bit */
975 while (!ca->wakeup) { 975 if (!ca->wakeup) {
976 set_current_state(TASK_INTERRUPTIBLE); 976 set_current_state(TASK_INTERRUPTIBLE);
977 schedule_timeout(ca->delay); 977 schedule_timeout(ca->delay);
978 if (kthread_should_stop()) 978 if (kthread_should_stop())
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index e8c4a8694532..58452b52002c 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -828,7 +828,7 @@ MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
828#define DIB0700_DEFAULT_DEVICE_PROPERTIES \ 828#define DIB0700_DEFAULT_DEVICE_PROPERTIES \
829 .caps = DVB_USB_IS_AN_I2C_ADAPTER, \ 829 .caps = DVB_USB_IS_AN_I2C_ADAPTER, \
830 .usb_ctrl = DEVICE_SPECIFIC, \ 830 .usb_ctrl = DEVICE_SPECIFIC, \
831 .firmware = "dvb-usb-dib0700-03-pre1.fw", \ 831 .firmware = "dvb-usb-dib0700-1.10.fw", \
832 .download_firmware = dib0700_download_firmware, \ 832 .download_firmware = dib0700_download_firmware, \
833 .no_reconnect = 1, \ 833 .no_reconnect = 1, \
834 .size_of_priv = sizeof(struct dib0700_state), \ 834 .size_of_priv = sizeof(struct dib0700_state), \
diff --git a/drivers/media/radio/miropcm20-radio.c b/drivers/media/radio/miropcm20-radio.c
index c7c9d1dc0690..3ae56fef8c92 100644
--- a/drivers/media/radio/miropcm20-radio.c
+++ b/drivers/media/radio/miropcm20-radio.c
@@ -229,7 +229,6 @@ static struct video_device pcm20_radio = {
229 .owner = THIS_MODULE, 229 .owner = THIS_MODULE,
230 .name = "Miro PCM 20 radio", 230 .name = "Miro PCM 20 radio",
231 .type = VID_TYPE_TUNER, 231 .type = VID_TYPE_TUNER,
232 .hardware = VID_HARDWARE_RTRACK,
233 .fops = &pcm20_fops, 232 .fops = &pcm20_fops,
234 .priv = &pcm20_unit 233 .priv = &pcm20_unit
235}; 234};
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 0c963db03614..5e4b9ddb23c0 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -554,7 +554,6 @@ static struct video_device gemtek_radio = {
554 .owner = THIS_MODULE, 554 .owner = THIS_MODULE,
555 .name = "GemTek Radio card", 555 .name = "GemTek Radio card",
556 .type = VID_TYPE_TUNER, 556 .type = VID_TYPE_TUNER,
557 .hardware = VID_HARDWARE_GEMTEK,
558 .fops = &gemtek_fops, 557 .fops = &gemtek_fops,
559 .vidioc_querycap = vidioc_querycap, 558 .vidioc_querycap = vidioc_querycap,
560 .vidioc_g_tuner = vidioc_g_tuner, 559 .vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 19e9929ffa0f..c94a4d0f2804 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -755,7 +755,6 @@ static struct video_device ar_template = {
755 .owner = THIS_MODULE, 755 .owner = THIS_MODULE,
756 .name = "Colour AR VGA", 756 .name = "Colour AR VGA",
757 .type = VID_TYPE_CAPTURE, 757 .type = VID_TYPE_CAPTURE,
758 .hardware = VID_HARDWARE_ARV,
759 .fops = &ar_fops, 758 .fops = &ar_fops,
760 .release = ar_release, 759 .release = ar_release,
761 .minor = -1, 760 .minor = -1,
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 7a332b3efe51..9feeb636ff9b 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3877,7 +3877,6 @@ static struct video_device bttv_video_template =
3877 .name = "UNSET", 3877 .name = "UNSET",
3878 .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER| 3878 .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|
3879 VID_TYPE_CLIPPING|VID_TYPE_SCALES, 3879 VID_TYPE_CLIPPING|VID_TYPE_SCALES,
3880 .hardware = VID_HARDWARE_BT848,
3881 .fops = &bttv_fops, 3880 .fops = &bttv_fops,
3882 .minor = -1, 3881 .minor = -1,
3883}; 3882};
@@ -3886,7 +3885,6 @@ static struct video_device bttv_vbi_template =
3886{ 3885{
3887 .name = "bt848/878 vbi", 3886 .name = "bt848/878 vbi",
3888 .type = VID_TYPE_TUNER|VID_TYPE_TELETEXT, 3887 .type = VID_TYPE_TUNER|VID_TYPE_TELETEXT,
3889 .hardware = VID_HARDWARE_BT848,
3890 .fops = &bttv_fops, 3888 .fops = &bttv_fops,
3891 .minor = -1, 3889 .minor = -1,
3892}; 3890};
@@ -4034,7 +4032,6 @@ static struct video_device radio_template =
4034{ 4032{
4035 .name = "bt848/878 radio", 4033 .name = "bt848/878 radio",
4036 .type = VID_TYPE_TUNER, 4034 .type = VID_TYPE_TUNER,
4037 .hardware = VID_HARDWARE_BT848,
4038 .fops = &radio_fops, 4035 .fops = &radio_fops,
4039 .minor = -1, 4036 .minor = -1,
4040}; 4037};
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 7f7e3d3398d0..58423525591f 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -899,7 +899,6 @@ static struct video_device qcam_template=
899 .owner = THIS_MODULE, 899 .owner = THIS_MODULE,
900 .name = "Connectix Quickcam", 900 .name = "Connectix Quickcam",
901 .type = VID_TYPE_CAPTURE, 901 .type = VID_TYPE_CAPTURE,
902 .hardware = VID_HARDWARE_QCAM_BW,
903 .fops = &qcam_fops, 902 .fops = &qcam_fops,
904}; 903};
905 904
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index f76c6a6c3766..cf1546b5a7f1 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -699,7 +699,6 @@ static struct video_device qcam_template=
699 .owner = THIS_MODULE, 699 .owner = THIS_MODULE,
700 .name = "Colour QuickCam", 700 .name = "Colour QuickCam",
701 .type = VID_TYPE_CAPTURE, 701 .type = VID_TYPE_CAPTURE,
702 .hardware = VID_HARDWARE_QCAM_C,
703 .fops = &qcam_fops, 702 .fops = &qcam_fops,
704}; 703};
705 704
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index a1d02e5ce0fd..7c630f5ee725 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(colorspace_conv,
65 65
66#define ABOUT "V4L-Driver for Vision CPiA based cameras" 66#define ABOUT "V4L-Driver for Vision CPiA based cameras"
67 67
68#ifndef VID_HARDWARE_CPIA
69#define VID_HARDWARE_CPIA 24 /* FIXME -> from linux/videodev.h */
70#endif
71
72#define CPIA_MODULE_CPIA (0<<5) 68#define CPIA_MODULE_CPIA (0<<5)
73#define CPIA_MODULE_SYSTEM (1<<5) 69#define CPIA_MODULE_SYSTEM (1<<5)
74#define CPIA_MODULE_VP_CTRL (5<<5) 70#define CPIA_MODULE_VP_CTRL (5<<5)
@@ -3804,7 +3800,6 @@ static struct video_device cpia_template = {
3804 .owner = THIS_MODULE, 3800 .owner = THIS_MODULE,
3805 .name = "CPiA Camera", 3801 .name = "CPiA Camera",
3806 .type = VID_TYPE_CAPTURE, 3802 .type = VID_TYPE_CAPTURE,
3807 .hardware = VID_HARDWARE_CPIA,
3808 .fops = &cpia_fops, 3803 .fops = &cpia_fops,
3809}; 3804};
3810 3805
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index e3aaba1e0e0a..e378abec806d 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -86,10 +86,6 @@ MODULE_LICENSE("GPL");
86 86
87#define ABOUT "V4L-Driver for Vision CPiA2 based cameras" 87#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
88 88
89#ifndef VID_HARDWARE_CPIA2
90#error "VID_HARDWARE_CPIA2 should have been defined in linux/videodev.h"
91#endif
92
93struct control_menu_info { 89struct control_menu_info {
94 int value; 90 int value;
95 char name[32]; 91 char name[32];
@@ -1942,7 +1938,6 @@ static struct video_device cpia2_template = {
1942 .type= VID_TYPE_CAPTURE, 1938 .type= VID_TYPE_CAPTURE,
1943 .type2 = V4L2_CAP_VIDEO_CAPTURE | 1939 .type2 = V4L2_CAP_VIDEO_CAPTURE |
1944 V4L2_CAP_STREAMING, 1940 V4L2_CAP_STREAMING,
1945 .hardware= VID_HARDWARE_CPIA2,
1946 .minor= -1, 1941 .minor= -1,
1947 .fops= &fops_template, 1942 .fops= &fops_template,
1948 .release= video_device_release, 1943 .release= video_device_release,
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index af16505bd2e0..3cdd136477e5 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -793,7 +793,7 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
793 dev->pci->subsystem_device); 793 dev->pci->subsystem_device);
794 794
795 cx23885_devcount--; 795 cx23885_devcount--;
796 goto fail_free; 796 return -ENODEV;
797 } 797 }
798 798
799 /* PCIe stuff */ 799 /* PCIe stuff */
@@ -835,10 +835,6 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
835 } 835 }
836 836
837 return 0; 837 return 0;
838
839fail_free:
840 kfree(dev);
841 return -ENODEV;
842} 838}
843 839
844void cx23885_dev_unregister(struct cx23885_dev *dev) 840void cx23885_dev_unregister(struct cx23885_dev *dev)
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 141dadf7cf1b..40ffd7a5579a 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -39,6 +39,7 @@
39#include <sound/pcm_params.h> 39#include <sound/pcm_params.h>
40#include <sound/control.h> 40#include <sound/control.h>
41#include <sound/initval.h> 41#include <sound/initval.h>
42#include <sound/tlv.h>
42 43
43#include "cx88.h" 44#include "cx88.h"
44#include "cx88-reg.h" 45#include "cx88-reg.h"
@@ -82,6 +83,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
82 83
83 84
84 85
86
85/**************************************************************************** 87/****************************************************************************
86 Module global static vars 88 Module global static vars
87 ****************************************************************************/ 89 ****************************************************************************/
@@ -545,8 +547,8 @@ static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, char *name)
545/**************************************************************************** 547/****************************************************************************
546 CONTROL INTERFACE 548 CONTROL INTERFACE
547 ****************************************************************************/ 549 ****************************************************************************/
548static int snd_cx88_capture_volume_info(struct snd_kcontrol *kcontrol, 550static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
549 struct snd_ctl_elem_info *info) 551 struct snd_ctl_elem_info *info)
550{ 552{
551 info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 553 info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
552 info->count = 2; 554 info->count = 2;
@@ -556,9 +558,8 @@ static int snd_cx88_capture_volume_info(struct snd_kcontrol *kcontrol,
556 return 0; 558 return 0;
557} 559}
558 560
559/* OK - TODO: test it */ 561static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
560static int snd_cx88_capture_volume_get(struct snd_kcontrol *kcontrol, 562 struct snd_ctl_elem_value *value)
561 struct snd_ctl_elem_value *value)
562{ 563{
563 snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); 564 snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
564 struct cx88_core *core=chip->core; 565 struct cx88_core *core=chip->core;
@@ -573,8 +574,8 @@ static int snd_cx88_capture_volume_get(struct snd_kcontrol *kcontrol,
573} 574}
574 575
575/* OK - TODO: test it */ 576/* OK - TODO: test it */
576static int snd_cx88_capture_volume_put(struct snd_kcontrol *kcontrol, 577static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
577 struct snd_ctl_elem_value *value) 578 struct snd_ctl_elem_value *value)
578{ 579{
579 snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); 580 snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
580 struct cx88_core *core=chip->core; 581 struct cx88_core *core=chip->core;
@@ -605,14 +606,67 @@ static int snd_cx88_capture_volume_put(struct snd_kcontrol *kcontrol,
605 return changed; 606 return changed;
606} 607}
607 608
608static struct snd_kcontrol_new snd_cx88_capture_volume = { 609static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0);
610
611static struct snd_kcontrol_new snd_cx88_volume = {
612 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
613 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
614 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
615 .name = "Playback Volume",
616 .info = snd_cx88_volume_info,
617 .get = snd_cx88_volume_get,
618 .put = snd_cx88_volume_put,
619 .tlv.p = snd_cx88_db_scale,
620};
621
622static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
623 struct snd_ctl_elem_value *value)
624{
625 snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
626 struct cx88_core *core = chip->core;
627 u32 bit = kcontrol->private_value;
628
629 value->value.integer.value[0] = !(cx_read(AUD_VOL_CTL) & bit);
630 return 0;
631}
632
633static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
634 struct snd_ctl_elem_value *value)
635{
636 snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
637 struct cx88_core *core = chip->core;
638 u32 bit = kcontrol->private_value;
639 int ret = 0;
640 u32 vol;
641
642 spin_lock_irq(&chip->reg_lock);
643 vol = cx_read(AUD_VOL_CTL);
644 if (value->value.integer.value[0] != !(vol & bit)) {
645 vol ^= bit;
646 cx_write(AUD_VOL_CTL, vol);
647 ret = 1;
648 }
649 spin_unlock_irq(&chip->reg_lock);
650 return ret;
651}
652
653static struct snd_kcontrol_new snd_cx88_dac_switch = {
609 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 654 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
610 .name = "Capture Volume", 655 .name = "Playback Switch",
611 .info = snd_cx88_capture_volume_info, 656 .info = snd_ctl_boolean_mono_info,
612 .get = snd_cx88_capture_volume_get, 657 .get = snd_cx88_switch_get,
613 .put = snd_cx88_capture_volume_put, 658 .put = snd_cx88_switch_put,
659 .private_value = (1<<8),
614}; 660};
615 661
662static struct snd_kcontrol_new snd_cx88_source_switch = {
663 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
664 .name = "Capture Switch",
665 .info = snd_ctl_boolean_mono_info,
666 .get = snd_cx88_switch_get,
667 .put = snd_cx88_switch_put,
668 .private_value = (1<<6),
669};
616 670
617/**************************************************************************** 671/****************************************************************************
618 Basic Flow for Sound Devices 672 Basic Flow for Sound Devices
@@ -762,7 +816,13 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
762 if (err < 0) 816 if (err < 0)
763 goto error; 817 goto error;
764 818
765 err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_capture_volume, chip)); 819 err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_volume, chip));
820 if (err < 0)
821 goto error;
822 err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_dac_switch, chip));
823 if (err < 0)
824 goto error;
825 err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_source_switch, chip));
766 if (err < 0) 826 if (err < 0)
767 goto error; 827 goto error;
768 828
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 6d6f5048d762..f33f0b47142c 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -527,44 +527,6 @@ static void blackbird_codec_settings(struct cx8802_dev *dev)
527 cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params); 527 cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params);
528} 528}
529 529
530static struct v4l2_mpeg_compression default_mpeg_params = {
531 .st_type = V4L2_MPEG_PS_2,
532 .st_bitrate = {
533 .mode = V4L2_BITRATE_CBR,
534 .min = 0,
535 .target = 0,
536 .max = 0
537 },
538 .ts_pid_pmt = 16,
539 .ts_pid_audio = 260,
540 .ts_pid_video = 256,
541 .ts_pid_pcr = 259,
542 .ps_size = 0,
543 .au_type = V4L2_MPEG_AU_2_II,
544 .au_bitrate = {
545 .mode = V4L2_BITRATE_CBR,
546 .min = 224,
547 .target = 224,
548 .max = 224
549 },
550 .au_sample_rate = 48000,
551 .au_pesid = 0,
552 .vi_type = V4L2_MPEG_VI_2,
553 .vi_aspect_ratio = V4L2_MPEG_ASPECT_4_3,
554 .vi_bitrate = {
555 .mode = V4L2_BITRATE_CBR,
556 .min = 4000,
557 .target = 4500,
558 .max = 6000
559 },
560 .vi_frame_rate = 25,
561 .vi_frames_per_gop = 12,
562 .vi_bframes_count = 2,
563 .vi_pesid = 0,
564 .closed_gops = 1,
565 .pulldown = 0
566};
567
568static int blackbird_initialize_codec(struct cx8802_dev *dev) 530static int blackbird_initialize_codec(struct cx8802_dev *dev)
569{ 531{
570 struct cx88_core *core = dev->core; 532 struct cx88_core *core = dev->core;
@@ -852,23 +814,6 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
852 return videobuf_streamoff(&fh->mpegq); 814 return videobuf_streamoff(&fh->mpegq);
853} 815}
854 816
855static int vidioc_g_mpegcomp (struct file *file, void *fh,
856 struct v4l2_mpeg_compression *f)
857{
858 printk(KERN_WARNING "VIDIOC_G_MPEGCOMP is obsolete. "
859 "Replace with VIDIOC_G_EXT_CTRLS!");
860 memcpy(f,&default_mpeg_params,sizeof(*f));
861 return 0;
862}
863
864static int vidioc_s_mpegcomp (struct file *file, void *fh,
865 struct v4l2_mpeg_compression *f)
866{
867 printk(KERN_WARNING "VIDIOC_S_MPEGCOMP is obsolete. "
868 "Replace with VIDIOC_S_EXT_CTRLS!");
869 return 0;
870}
871
872static int vidioc_g_ext_ctrls (struct file *file, void *priv, 817static int vidioc_g_ext_ctrls (struct file *file, void *priv,
873 struct v4l2_ext_controls *f) 818 struct v4l2_ext_controls *f)
874{ 819{
@@ -1216,8 +1161,6 @@ static struct video_device cx8802_mpeg_template =
1216 .vidioc_dqbuf = vidioc_dqbuf, 1161 .vidioc_dqbuf = vidioc_dqbuf,
1217 .vidioc_streamon = vidioc_streamon, 1162 .vidioc_streamon = vidioc_streamon,
1218 .vidioc_streamoff = vidioc_streamoff, 1163 .vidioc_streamoff = vidioc_streamoff,
1219 .vidioc_g_mpegcomp = vidioc_g_mpegcomp,
1220 .vidioc_s_mpegcomp = vidioc_s_mpegcomp,
1221 .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls, 1164 .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
1222 .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls, 1165 .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
1223 .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls, 1166 .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index d16e5c6d21c0..fce19caf9d04 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -475,8 +475,9 @@ static int dvb_register(struct cx8802_dev *dev)
475 break; 475 break;
476 case CX88_BOARD_DNTV_LIVE_DVB_T_PRO: 476 case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
477#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE)) 477#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
478 /* MT352 is on a secondary I2C bus made from some GPIO lines */
478 dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config, 479 dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
479 &((struct vp3054_i2c_state *)dev->card_priv)->adap); 480 &dev->vp3054->adap);
480 if (dev->dvb.frontend != NULL) { 481 if (dev->dvb.frontend != NULL) {
481 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61, 482 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
482 &dev->core->i2c_adap, DVB_PLL_FMD1216ME); 483 &dev->core->i2c_adap, DVB_PLL_FMD1216ME);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index a652f294d23d..448c67380945 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -79,7 +79,8 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
79{ 79{
80 struct cx88_core *core = dev->core; 80 struct cx88_core *core = dev->core;
81 81
82 dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n", dev->width, dev->height, buf->vb.field); 82 dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
83 buf->vb.width, buf->vb.height, buf->vb.field);
83 84
84 /* setup fifo + format */ 85 /* setup fifo + format */
85 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 86 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
@@ -177,7 +178,6 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
177 struct cx88_dmaqueue *q) 178 struct cx88_dmaqueue *q)
178{ 179{
179 struct cx88_buffer *buf; 180 struct cx88_buffer *buf;
180 struct list_head *item;
181 181
182 dprintk( 1, "cx8802_restart_queue\n" ); 182 dprintk( 1, "cx8802_restart_queue\n" );
183 if (list_empty(&q->active)) 183 if (list_empty(&q->active))
@@ -223,10 +223,8 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
223 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 223 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
224 buf, buf->vb.i); 224 buf, buf->vb.i);
225 cx8802_start_dma(dev, q, buf); 225 cx8802_start_dma(dev, q, buf);
226 list_for_each(item,&q->active) { 226 list_for_each_entry(buf, &q->active, vb.queue)
227 buf = list_entry(item, struct cx88_buffer, vb.queue);
228 buf->count = q->count++; 227 buf->count = q->count++;
229 }
230 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); 228 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
231 return 0; 229 return 0;
232} 230}
@@ -572,42 +570,29 @@ int cx8802_resume_common(struct pci_dev *pci_dev)
572 return 0; 570 return 0;
573} 571}
574 572
573#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
574 defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
575struct cx8802_dev * cx8802_get_device(struct inode *inode) 575struct cx8802_dev * cx8802_get_device(struct inode *inode)
576{ 576{
577 int minor = iminor(inode); 577 int minor = iminor(inode);
578 struct cx8802_dev *h = NULL; 578 struct cx8802_dev *dev;
579 struct list_head *list;
580 579
581 list_for_each(list,&cx8802_devlist) { 580 list_for_each_entry(dev, &cx8802_devlist, devlist)
582 h = list_entry(list, struct cx8802_dev, devlist); 581 if (dev->mpeg_dev && dev->mpeg_dev->minor == minor)
583 if (h->mpeg_dev && h->mpeg_dev->minor == minor) 582 return dev;
584 return h;
585 }
586 583
587 return NULL; 584 return NULL;
588} 585}
586EXPORT_SYMBOL(cx8802_get_device);
587#endif
589 588
590struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype) 589struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
591{ 590{
592 struct cx8802_dev *h = NULL; 591 struct cx8802_driver *d;
593 struct cx8802_driver *d = NULL;
594 struct list_head *list;
595 struct list_head *list2;
596
597 list_for_each(list,&cx8802_devlist) {
598 h = list_entry(list, struct cx8802_dev, devlist);
599 if (h != dev)
600 continue;
601
602 list_for_each(list2, &h->drvlist.devlist) {
603 d = list_entry(list2, struct cx8802_driver, devlist);
604 592
605 /* only unregister the correct driver type */ 593 list_for_each_entry(d, &dev->drvlist, drvlist)
606 if (d->type_id == btype) { 594 if (d->type_id == btype)
607 return d; 595 return d;
608 }
609 }
610 }
611 596
612 return NULL; 597 return NULL;
613} 598}
@@ -671,10 +656,9 @@ static int cx8802_check_driver(struct cx8802_driver *drv)
671 656
672int cx8802_register_driver(struct cx8802_driver *drv) 657int cx8802_register_driver(struct cx8802_driver *drv)
673{ 658{
674 struct cx8802_dev *h; 659 struct cx8802_dev *dev;
675 struct cx8802_driver *driver; 660 struct cx8802_driver *driver;
676 struct list_head *list; 661 int err, i = 0;
677 int err = 0, i = 0;
678 662
679 printk(KERN_INFO 663 printk(KERN_INFO
680 "cx88/2: registering cx8802 driver, type: %s access: %s\n", 664 "cx88/2: registering cx8802 driver, type: %s access: %s\n",
@@ -686,14 +670,12 @@ int cx8802_register_driver(struct cx8802_driver *drv)
686 return err; 670 return err;
687 } 671 }
688 672
689 list_for_each(list,&cx8802_devlist) { 673 list_for_each_entry(dev, &cx8802_devlist, devlist) {
690 h = list_entry(list, struct cx8802_dev, devlist);
691
692 printk(KERN_INFO 674 printk(KERN_INFO
693 "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n", 675 "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
694 h->core->name, h->pci->subsystem_vendor, 676 dev->core->name, dev->pci->subsystem_vendor,
695 h->pci->subsystem_device, h->core->board.name, 677 dev->pci->subsystem_device, dev->core->board.name,
696 h->core->boardnr); 678 dev->core->boardnr);
697 679
698 /* Bring up a new struct for each driver instance */ 680 /* Bring up a new struct for each driver instance */
699 driver = kzalloc(sizeof(*drv),GFP_KERNEL); 681 driver = kzalloc(sizeof(*drv),GFP_KERNEL);
@@ -701,7 +683,7 @@ int cx8802_register_driver(struct cx8802_driver *drv)
701 return -ENOMEM; 683 return -ENOMEM;
702 684
703 /* Snapshot of the driver registration data */ 685 /* Snapshot of the driver registration data */
704 drv->core = h->core; 686 drv->core = dev->core;
705 drv->suspend = cx8802_suspend_common; 687 drv->suspend = cx8802_suspend_common;
706 drv->resume = cx8802_resume_common; 688 drv->resume = cx8802_resume_common;
707 drv->request_acquire = cx8802_request_acquire; 689 drv->request_acquire = cx8802_request_acquire;
@@ -712,49 +694,38 @@ int cx8802_register_driver(struct cx8802_driver *drv)
712 if (err == 0) { 694 if (err == 0) {
713 i++; 695 i++;
714 mutex_lock(&drv->core->lock); 696 mutex_lock(&drv->core->lock);
715 list_add_tail(&driver->devlist,&h->drvlist.devlist); 697 list_add_tail(&driver->drvlist, &dev->drvlist);
716 mutex_unlock(&drv->core->lock); 698 mutex_unlock(&drv->core->lock);
717 } else { 699 } else {
718 printk(KERN_ERR 700 printk(KERN_ERR
719 "%s/2: cx8802 probe failed, err = %d\n", 701 "%s/2: cx8802 probe failed, err = %d\n",
720 h->core->name, err); 702 dev->core->name, err);
721 } 703 }
722 704
723 } 705 }
724 if (i == 0)
725 err = -ENODEV;
726 else
727 err = 0;
728 706
729 return err; 707 return i ? 0 : -ENODEV;
730} 708}
731 709
732int cx8802_unregister_driver(struct cx8802_driver *drv) 710int cx8802_unregister_driver(struct cx8802_driver *drv)
733{ 711{
734 struct cx8802_dev *h; 712 struct cx8802_dev *dev;
735 struct cx8802_driver *d; 713 struct cx8802_driver *d, *dtmp;
736 struct list_head *list; 714 int err = 0;
737 struct list_head *list2, *q;
738 int err = 0, i = 0;
739 715
740 printk(KERN_INFO 716 printk(KERN_INFO
741 "cx88/2: unregistering cx8802 driver, type: %s access: %s\n", 717 "cx88/2: unregistering cx8802 driver, type: %s access: %s\n",
742 drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird", 718 drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
743 drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive"); 719 drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
744 720
745 list_for_each(list,&cx8802_devlist) { 721 list_for_each_entry(dev, &cx8802_devlist, devlist) {
746 i++;
747 h = list_entry(list, struct cx8802_dev, devlist);
748
749 printk(KERN_INFO 722 printk(KERN_INFO
750 "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n", 723 "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
751 h->core->name, h->pci->subsystem_vendor, 724 dev->core->name, dev->pci->subsystem_vendor,
752 h->pci->subsystem_device, h->core->board.name, 725 dev->pci->subsystem_device, dev->core->board.name,
753 h->core->boardnr); 726 dev->core->boardnr);
754
755 list_for_each_safe(list2, q, &h->drvlist.devlist) {
756 d = list_entry(list2, struct cx8802_driver, devlist);
757 727
728 list_for_each_entry_safe(d, dtmp, &dev->drvlist, drvlist) {
758 /* only unregister the correct driver type */ 729 /* only unregister the correct driver type */
759 if (d->type_id != drv->type_id) 730 if (d->type_id != drv->type_id)
760 continue; 731 continue;
@@ -762,12 +733,12 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
762 err = d->remove(d); 733 err = d->remove(d);
763 if (err == 0) { 734 if (err == 0) {
764 mutex_lock(&drv->core->lock); 735 mutex_lock(&drv->core->lock);
765 list_del(list2); 736 list_del(&d->drvlist);
766 mutex_unlock(&drv->core->lock); 737 mutex_unlock(&drv->core->lock);
738 kfree(d);
767 } else 739 } else
768 printk(KERN_ERR "%s/2: cx8802 driver remove " 740 printk(KERN_ERR "%s/2: cx8802 driver remove "
769 "failed (%d)\n", h->core->name, err); 741 "failed (%d)\n", dev->core->name, err);
770
771 } 742 }
772 743
773 } 744 }
@@ -805,7 +776,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
805 if (err != 0) 776 if (err != 0)
806 goto fail_free; 777 goto fail_free;
807 778
808 INIT_LIST_HEAD(&dev->drvlist.devlist); 779 INIT_LIST_HEAD(&dev->drvlist);
809 list_add_tail(&dev->devlist,&cx8802_devlist); 780 list_add_tail(&dev->devlist,&cx8802_devlist);
810 781
811 /* Maintain a reference so cx88-video can query the 8802 device. */ 782 /* Maintain a reference so cx88-video can query the 8802 device. */
@@ -825,23 +796,30 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
825static void __devexit cx8802_remove(struct pci_dev *pci_dev) 796static void __devexit cx8802_remove(struct pci_dev *pci_dev)
826{ 797{
827 struct cx8802_dev *dev; 798 struct cx8802_dev *dev;
828 struct cx8802_driver *h;
829 struct list_head *list;
830 799
831 dev = pci_get_drvdata(pci_dev); 800 dev = pci_get_drvdata(pci_dev);
832 801
833 dprintk( 1, "%s\n", __FUNCTION__); 802 dprintk( 1, "%s\n", __FUNCTION__);
834 803
835 list_for_each(list,&dev->drvlist.devlist) { 804 if (!list_empty(&dev->drvlist)) {
836 h = list_entry(list, struct cx8802_driver, devlist); 805 struct cx8802_driver *drv, *tmp;
837 dprintk( 1, " ->driver\n"); 806 int err;
838 if (h->remove == NULL) { 807
839 printk(KERN_ERR "%s .. skipping driver, no probe function\n", __FUNCTION__); 808 printk(KERN_WARNING "%s/2: Trying to remove cx8802 driver "
840 continue; 809 "while cx8802 sub-drivers still loaded?!\n",
810 dev->core->name);
811
812 list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
813 err = drv->remove(drv);
814 if (err == 0) {
815 mutex_lock(&drv->core->lock);
816 list_del(&drv->drvlist);
817 mutex_unlock(&drv->core->lock);
818 } else
819 printk(KERN_ERR "%s/2: cx8802 driver remove "
820 "failed (%d)\n", dev->core->name, err);
821 kfree(drv);
841 } 822 }
842 printk(KERN_INFO "%s .. Removing driver type %d\n", __FUNCTION__, h->type_id);
843 cx8802_unregister_driver(h);
844 list_del(&dev->drvlist.devlist);
845 } 823 }
846 824
847 /* Destroy any 8802 reference. */ 825 /* Destroy any 8802 reference. */
@@ -901,7 +879,6 @@ EXPORT_SYMBOL(cx8802_fini_common);
901 879
902EXPORT_SYMBOL(cx8802_register_driver); 880EXPORT_SYMBOL(cx8802_register_driver);
903EXPORT_SYMBOL(cx8802_unregister_driver); 881EXPORT_SYMBOL(cx8802_unregister_driver);
904EXPORT_SYMBOL(cx8802_get_device);
905EXPORT_SYMBOL(cx8802_get_driver); 882EXPORT_SYMBOL(cx8802_get_driver);
906/* ----------------------------------------------------------- */ 883/* ----------------------------------------------------------- */
907/* 884/*
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 231ae6c4dd22..5ee05f8f3fad 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1675,7 +1675,6 @@ static struct video_device cx8800_radio_template =
1675{ 1675{
1676 .name = "cx8800-radio", 1676 .name = "cx8800-radio",
1677 .type = VID_TYPE_TUNER, 1677 .type = VID_TYPE_TUNER,
1678 .hardware = 0,
1679 .fops = &radio_fops, 1678 .fops = &radio_fops,
1680 .minor = -1, 1679 .minor = -1,
1681 .vidioc_querycap = radio_querycap, 1680 .vidioc_querycap = radio_querycap,
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 77c37889232b..6ce5af488471 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -41,7 +41,7 @@ static void vp3054_bit_setscl(void *data, int state)
41{ 41{
42 struct cx8802_dev *dev = data; 42 struct cx8802_dev *dev = data;
43 struct cx88_core *core = dev->core; 43 struct cx88_core *core = dev->core;
44 struct vp3054_i2c_state *vp3054_i2c = dev->card_priv; 44 struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
45 45
46 if (state) { 46 if (state) {
47 vp3054_i2c->state |= 0x0001; /* SCL high */ 47 vp3054_i2c->state |= 0x0001; /* SCL high */
@@ -58,7 +58,7 @@ static void vp3054_bit_setsda(void *data, int state)
58{ 58{
59 struct cx8802_dev *dev = data; 59 struct cx8802_dev *dev = data;
60 struct cx88_core *core = dev->core; 60 struct cx88_core *core = dev->core;
61 struct vp3054_i2c_state *vp3054_i2c = dev->card_priv; 61 struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
62 62
63 if (state) { 63 if (state) {
64 vp3054_i2c->state |= 0x0002; /* SDA high */ 64 vp3054_i2c->state |= 0x0002; /* SDA high */
@@ -113,10 +113,10 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
113 if (core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO) 113 if (core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
114 return 0; 114 return 0;
115 115
116 dev->card_priv = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL); 116 vp3054_i2c = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
117 if (dev->card_priv == NULL) 117 if (vp3054_i2c == NULL)
118 return -ENOMEM; 118 return -ENOMEM;
119 vp3054_i2c = dev->card_priv; 119 dev->vp3054 = vp3054_i2c;
120 120
121 memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template, 121 memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
122 sizeof(vp3054_i2c->algo)); 122 sizeof(vp3054_i2c->algo));
@@ -139,8 +139,8 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
139 if (0 != rc) { 139 if (0 != rc) {
140 printk("%s: vp3054_i2c register FAILED\n", core->name); 140 printk("%s: vp3054_i2c register FAILED\n", core->name);
141 141
142 kfree(dev->card_priv); 142 kfree(dev->vp3054);
143 dev->card_priv = NULL; 143 dev->vp3054 = NULL;
144 } 144 }
145 145
146 return rc; 146 return rc;
@@ -148,7 +148,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
148 148
149void vp3054_i2c_remove(struct cx8802_dev *dev) 149void vp3054_i2c_remove(struct cx8802_dev *dev)
150{ 150{
151 struct vp3054_i2c_state *vp3054_i2c = dev->card_priv; 151 struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
152 152
153 if (vp3054_i2c == NULL || 153 if (vp3054_i2c == NULL ||
154 dev->core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO) 154 dev->core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 42e0a9b8c550..eb296bdecb1e 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -412,7 +412,9 @@ struct cx8802_suspend_state {
412 412
413struct cx8802_driver { 413struct cx8802_driver {
414 struct cx88_core *core; 414 struct cx88_core *core;
415 struct list_head devlist; 415
416 /* List of drivers attached to device */
417 struct list_head drvlist;
416 418
417 /* Type of driver and access required */ 419 /* Type of driver and access required */
418 enum cx88_board_type type_id; 420 enum cx88_board_type type_id;
@@ -453,27 +455,33 @@ struct cx8802_dev {
453 455
454 /* for blackbird only */ 456 /* for blackbird only */
455 struct list_head devlist; 457 struct list_head devlist;
458#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
459 defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
456 struct video_device *mpeg_dev; 460 struct video_device *mpeg_dev;
457 u32 mailbox; 461 u32 mailbox;
458 int width; 462 int width;
459 int height; 463 int height;
460 464
465 /* mpeg params */
466 struct cx2341x_mpeg_params params;
467#endif
468
461#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE) 469#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
462 /* for dvb only */ 470 /* for dvb only */
463 struct videobuf_dvb dvb; 471 struct videobuf_dvb dvb;
472#endif
464 473
465 void *card_priv; 474#if defined(CONFIG_VIDEO_CX88_VP3054) || \
475 defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
476 /* For VP3045 secondary I2C bus support */
477 struct vp3054_i2c_state *vp3054;
466#endif 478#endif
467 /* for switching modulation types */ 479 /* for switching modulation types */
468 unsigned char ts_gen_cntrl; 480 unsigned char ts_gen_cntrl;
469 481
470 /* mpeg params */
471 struct cx2341x_mpeg_params params;
472
473 /* List of attached drivers */ 482 /* List of attached drivers */
474 struct cx8802_driver drvlist; 483 struct list_head drvlist;
475 struct work_struct request_module_wk; 484 struct work_struct request_module_wk;
476
477}; 485};
478 486
479/* ----------------------------------------------------------- */ 487/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index d3282ec62c5b..d56484f20467 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -648,7 +648,7 @@ void em28xx_uninit_isoc(struct em28xx *dev)
648 */ 648 */
649int em28xx_init_isoc(struct em28xx *dev) 649int em28xx_init_isoc(struct em28xx *dev)
650{ 650{
651 /* change interface to 3 which allowes the biggest packet sizes */ 651 /* change interface to 3 which allows the biggest packet sizes */
652 int i, errCode; 652 int i, errCode;
653 const int sb_size = EM28XX_NUM_PACKETS * dev->max_pkt_size; 653 const int sb_size = EM28XX_NUM_PACKETS * dev->max_pkt_size;
654 654
@@ -673,6 +673,7 @@ int em28xx_init_isoc(struct em28xx *dev)
673 ("unable to allocate %i bytes for transfer buffer %i\n", 673 ("unable to allocate %i bytes for transfer buffer %i\n",
674 sb_size, i); 674 sb_size, i);
675 em28xx_uninit_isoc(dev); 675 em28xx_uninit_isoc(dev);
676 usb_free_urb(urb);
676 return -ENOMEM; 677 return -ENOMEM;
677 } 678 }
678 memset(dev->transfer_buffer[i], 0, sb_size); 679 memset(dev->transfer_buffer[i], 0, sb_size);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index e467682aabd7..a4c2a907124a 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1617,7 +1617,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1617 1617
1618 /* Fills VBI device info */ 1618 /* Fills VBI device info */
1619 dev->vbi_dev->type = VFL_TYPE_VBI; 1619 dev->vbi_dev->type = VFL_TYPE_VBI;
1620 dev->vbi_dev->hardware = 0;
1621 dev->vbi_dev->fops = &em28xx_v4l_fops; 1620 dev->vbi_dev->fops = &em28xx_v4l_fops;
1622 dev->vbi_dev->minor = -1; 1621 dev->vbi_dev->minor = -1;
1623 dev->vbi_dev->dev = &dev->udev->dev; 1622 dev->vbi_dev->dev = &dev->udev->dev;
@@ -1629,7 +1628,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1629 dev->vdev->type = VID_TYPE_CAPTURE; 1628 dev->vdev->type = VID_TYPE_CAPTURE;
1630 if (dev->has_tuner) 1629 if (dev->has_tuner)
1631 dev->vdev->type |= VID_TYPE_TUNER; 1630 dev->vdev->type |= VID_TYPE_TUNER;
1632 dev->vdev->hardware = 0;
1633 dev->vdev->fops = &em28xx_v4l_fops; 1631 dev->vdev->fops = &em28xx_v4l_fops;
1634 dev->vdev->minor = -1; 1632 dev->vdev->minor = -1;
1635 dev->vdev->dev = &dev->udev->dev; 1633 dev->vdev->dev = &dev->udev->dev;
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index d5fef4c01c87..d19d73b81ede 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -2585,7 +2585,6 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2585 strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera"); 2585 strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera");
2586 cam->v4ldev->owner = THIS_MODULE; 2586 cam->v4ldev->owner = THIS_MODULE;
2587 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES; 2587 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
2588 cam->v4ldev->hardware = 0;
2589 cam->v4ldev->fops = &et61x251_fops; 2588 cam->v4ldev->fops = &et61x251_fops;
2590 cam->v4ldev->minor = video_nr[dev_nr]; 2589 cam->v4ldev->minor = video_nr[dev_nr];
2591 cam->v4ldev->release = video_device_release; 2590 cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index d98dd0d1e373..29779d8bf7fb 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -528,6 +528,7 @@ static int ir_probe(struct i2c_adapter *adap)
528 break; 528 break;
529 case I2C_HW_B_CX2388x: 529 case I2C_HW_B_CX2388x:
530 probe = probe_cx88; 530 probe = probe_cx88;
531 break;
531 case I2C_HW_B_CX23885: 532 case I2C_HW_B_CX23885:
532 probe = probe_cx23885; 533 probe = probe_cx23885;
533 break; 534 break;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index fd7a932e1d33..6d2dd8764f81 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1003,8 +1003,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1003 1003
1004 IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr); 1004 IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
1005 1005
1006 mutex_lock(&itv->serialize_lock);
1007
1008 /* PCI Device Setup */ 1006 /* PCI Device Setup */
1009 if ((retval = ivtv_setup_pci(itv, dev, pci_id)) != 0) { 1007 if ((retval = ivtv_setup_pci(itv, dev, pci_id)) != 0) {
1010 if (retval == -EIO) 1008 if (retval == -EIO)
@@ -1064,7 +1062,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1064 IVTV_DEBUG_INFO("activating i2c...\n"); 1062 IVTV_DEBUG_INFO("activating i2c...\n");
1065 if (init_ivtv_i2c(itv)) { 1063 if (init_ivtv_i2c(itv)) {
1066 IVTV_ERR("Could not initialize i2c\n"); 1064 IVTV_ERR("Could not initialize i2c\n");
1067 goto free_irq; 1065 goto free_io;
1068 } 1066 }
1069 1067
1070 IVTV_DEBUG_INFO("Active card count: %d.\n", ivtv_cards_active); 1068 IVTV_DEBUG_INFO("Active card count: %d.\n", ivtv_cards_active);
@@ -1176,7 +1174,11 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1176 IVTV_ERR("Failed to register irq %d\n", retval); 1174 IVTV_ERR("Failed to register irq %d\n", retval);
1177 goto free_streams; 1175 goto free_streams;
1178 } 1176 }
1179 mutex_unlock(&itv->serialize_lock); 1177 retval = ivtv_streams_register(itv);
1178 if (retval) {
1179 IVTV_ERR("Error %d registering devices\n", retval);
1180 goto free_irq;
1181 }
1180 IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name); 1182 IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name);
1181 return 0; 1183 return 0;
1182 1184
@@ -1195,7 +1197,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1195 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); 1197 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
1196 free_workqueue: 1198 free_workqueue:
1197 destroy_workqueue(itv->irq_work_queues); 1199 destroy_workqueue(itv->irq_work_queues);
1198 mutex_unlock(&itv->serialize_lock);
1199 err: 1200 err:
1200 if (retval == 0) 1201 if (retval == 0)
1201 retval = -ENODEV; 1202 retval = -ENODEV;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index da50fa4a72a5..a200a8a95a2d 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -822,6 +822,11 @@ int ivtv_v4l2_close(struct inode *inode, struct file *filp)
822 crystal_freq.flags = 0; 822 crystal_freq.flags = 0;
823 ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq); 823 ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq);
824 } 824 }
825 if (atomic_read(&itv->capturing) > 0) {
826 /* Undo video mute */
827 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
828 itv->params.video_mute | (itv->params.video_mute_yuv << 8));
829 }
825 /* Done! Unmute and continue. */ 830 /* Done! Unmute and continue. */
826 ivtv_unmute(itv); 831 ivtv_unmute(itv);
827 ivtv_release_stream(s); 832 ivtv_release_stream(s);
@@ -892,6 +897,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
892 if (atomic_read(&itv->capturing) > 0) { 897 if (atomic_read(&itv->capturing) > 0) {
893 /* switching to radio while capture is 898 /* switching to radio while capture is
894 in progress is not polite */ 899 in progress is not polite */
900 ivtv_release_stream(s);
895 kfree(item); 901 kfree(item);
896 return -EBUSY; 902 return -EBUSY;
897 } 903 }
@@ -947,7 +953,7 @@ int ivtv_v4l2_open(struct inode *inode, struct file *filp)
947 if (itv == NULL) { 953 if (itv == NULL) {
948 /* Couldn't find a device registered 954 /* Couldn't find a device registered
949 on that minor, shouldn't happen! */ 955 on that minor, shouldn't happen! */
950 IVTV_WARN("No ivtv device found on minor %d\n", minor); 956 printk(KERN_WARNING "No ivtv device found on minor %d\n", minor);
951 return -ENXIO; 957 return -ENXIO;
952 } 958 }
953 959
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 206eee7542db..fd6826f472e3 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -555,6 +555,7 @@ static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype,
555 555
556 /* set window size */ 556 /* set window size */
557 if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 557 if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
558 struct cx2341x_mpeg_params *p = &itv->params;
558 int w = fmt->fmt.pix.width; 559 int w = fmt->fmt.pix.width;
559 int h = fmt->fmt.pix.height; 560 int h = fmt->fmt.pix.height;
560 561
@@ -566,17 +567,19 @@ static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype,
566 fmt->fmt.pix.width = w; 567 fmt->fmt.pix.width = w;
567 fmt->fmt.pix.height = h; 568 fmt->fmt.pix.height = h;
568 569
569 if (!set_fmt || (itv->params.width == w && itv->params.height == h)) 570 if (!set_fmt || (p->width == w && p->height == h))
570 return 0; 571 return 0;
571 if (atomic_read(&itv->capturing) > 0) 572 if (atomic_read(&itv->capturing) > 0)
572 return -EBUSY; 573 return -EBUSY;
573 574
574 itv->params.width = w; 575 p->width = w;
575 itv->params.height = h; 576 p->height = h;
576 if (w != 720 || h != (itv->is_50hz ? 576 : 480)) 577 if (w != 720 || h != (itv->is_50hz ? 576 : 480))
577 itv->params.video_temporal_filter = 0; 578 p->video_temporal_filter = 0;
578 else 579 else
579 itv->params.video_temporal_filter = 8; 580 p->video_temporal_filter = 8;
581 if (p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
582 fmt->fmt.pix.width /= 2;
580 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt); 583 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt);
581 return ivtv_get_fmt(itv, streamtype, fmt); 584 return ivtv_get_fmt(itv, streamtype, fmt);
582 } 585 }
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index fd135985e70f..aa03e61ef310 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -166,10 +166,9 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
166 ivtv_queue_init(&s->q_io); 166 ivtv_queue_init(&s->q_io);
167} 167}
168 168
169static int ivtv_reg_dev(struct ivtv *itv, int type) 169static int ivtv_prep_dev(struct ivtv *itv, int type)
170{ 170{
171 struct ivtv_stream *s = &itv->streams[type]; 171 struct ivtv_stream *s = &itv->streams[type];
172 int vfl_type = ivtv_stream_info[type].vfl_type;
173 int minor_offset = ivtv_stream_info[type].minor_offset; 172 int minor_offset = ivtv_stream_info[type].minor_offset;
174 int minor; 173 int minor;
175 174
@@ -187,15 +186,12 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
187 if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) 186 if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
188 return 0; 187 return 0;
189 188
190 if (minor_offset >= 0) 189 /* card number + user defined offset + device offset */
191 /* card number + user defined offset + device offset */ 190 minor = itv->num + ivtv_first_minor + minor_offset;
192 minor = itv->num + ivtv_first_minor + minor_offset;
193 else
194 minor = -1;
195 191
196 /* User explicitly selected 0 buffers for these streams, so don't 192 /* User explicitly selected 0 buffers for these streams, so don't
197 create them. */ 193 create them. */
198 if (minor >= 0 && ivtv_stream_info[type].dma != PCI_DMA_NONE && 194 if (ivtv_stream_info[type].dma != PCI_DMA_NONE &&
199 itv->options.kilobytes[type] == 0) { 195 itv->options.kilobytes[type] == 0) {
200 IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name); 196 IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
201 return 0; 197 return 0;
@@ -223,21 +219,53 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
223 s->v4l2dev->fops = ivtv_stream_info[type].fops; 219 s->v4l2dev->fops = ivtv_stream_info[type].fops;
224 s->v4l2dev->release = video_device_release; 220 s->v4l2dev->release = video_device_release;
225 221
226 if (minor >= 0) { 222 return 0;
227 /* Register device. First try the desired minor, then any free one. */ 223}
228 if (video_register_device(s->v4l2dev, vfl_type, minor) && 224
229 video_register_device(s->v4l2dev, vfl_type, -1)) { 225/* Initialize v4l2 variables and prepare v4l2 devices */
230 IVTV_ERR("Couldn't register v4l2 device for %s minor %d\n", 226int ivtv_streams_setup(struct ivtv *itv)
231 s->name, minor); 227{
232 video_device_release(s->v4l2dev); 228 int type;
233 s->v4l2dev = NULL; 229
234 return -ENOMEM; 230 /* Setup V4L2 Devices */
235 } 231 for (type = 0; type < IVTV_MAX_STREAMS; type++) {
232 /* Prepare device */
233 if (ivtv_prep_dev(itv, type))
234 break;
235
236 if (itv->streams[type].v4l2dev == NULL)
237 continue;
238
239 /* Allocate Stream */
240 if (ivtv_stream_alloc(&itv->streams[type]))
241 break;
236 } 242 }
237 else { 243 if (type == IVTV_MAX_STREAMS)
238 /* Don't register a 'hidden' stream (OSD) */
239 IVTV_INFO("Created framebuffer stream for %s\n", s->name);
240 return 0; 244 return 0;
245
246 /* One or more streams could not be initialized. Clean 'em all up. */
247 ivtv_streams_cleanup(itv);
248 return -ENOMEM;
249}
250
251static int ivtv_reg_dev(struct ivtv *itv, int type)
252{
253 struct ivtv_stream *s = &itv->streams[type];
254 int vfl_type = ivtv_stream_info[type].vfl_type;
255 int minor;
256
257 if (s->v4l2dev == NULL)
258 return 0;
259
260 minor = s->v4l2dev->minor;
261 /* Register device. First try the desired minor, then any free one. */
262 if (video_register_device(s->v4l2dev, vfl_type, minor) &&
263 video_register_device(s->v4l2dev, vfl_type, -1)) {
264 IVTV_ERR("Couldn't register v4l2 device for %s minor %d\n",
265 s->name, minor);
266 video_device_release(s->v4l2dev);
267 s->v4l2dev = NULL;
268 return -ENOMEM;
241 } 269 }
242 270
243 switch (vfl_type) { 271 switch (vfl_type) {
@@ -262,27 +290,18 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
262 return 0; 290 return 0;
263} 291}
264 292
265/* Initialize v4l2 variables and register v4l2 devices */ 293/* Register v4l2 devices */
266int ivtv_streams_setup(struct ivtv *itv) 294int ivtv_streams_register(struct ivtv *itv)
267{ 295{
268 int type; 296 int type;
297 int err = 0;
269 298
270 /* Setup V4L2 Devices */ 299 /* Register V4L2 devices */
271 for (type = 0; type < IVTV_MAX_STREAMS; type++) { 300 for (type = 0; type < IVTV_MAX_STREAMS; type++)
272 /* Register Device */ 301 err |= ivtv_reg_dev(itv, type);
273 if (ivtv_reg_dev(itv, type))
274 break;
275
276 if (itv->streams[type].v4l2dev == NULL)
277 continue;
278 302
279 /* Allocate Stream */ 303 if (err == 0)
280 if (ivtv_stream_alloc(&itv->streams[type]))
281 break;
282 }
283 if (type == IVTV_MAX_STREAMS) {
284 return 0; 304 return 0;
285 }
286 305
287 /* One or more streams could not be initialized. Clean 'em all up. */ 306 /* One or more streams could not be initialized. Clean 'em all up. */
288 ivtv_streams_cleanup(itv); 307 ivtv_streams_cleanup(itv);
@@ -303,11 +322,8 @@ void ivtv_streams_cleanup(struct ivtv *itv)
303 continue; 322 continue;
304 323
305 ivtv_stream_free(&itv->streams[type]); 324 ivtv_stream_free(&itv->streams[type]);
306 /* Free Device */ 325 /* Unregister device */
307 if (vdev->minor == -1) /* 'Hidden' never registered stream (OSD) */ 326 video_unregister_device(vdev);
308 video_device_release(vdev);
309 else /* All others, just unregister. */
310 video_unregister_device(vdev);
311 } 327 }
312} 328}
313 329
@@ -425,6 +441,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
425{ 441{
426 u32 data[CX2341X_MBOX_MAX_DATA]; 442 u32 data[CX2341X_MBOX_MAX_DATA];
427 struct ivtv *itv = s->itv; 443 struct ivtv *itv = s->itv;
444 struct cx2341x_mpeg_params *p = &itv->params;
428 int captype = 0, subtype = 0; 445 int captype = 0, subtype = 0;
429 int enable_passthrough = 0; 446 int enable_passthrough = 0;
430 447
@@ -445,7 +462,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
445 } 462 }
446 itv->mpg_data_received = itv->vbi_data_inserted = 0; 463 itv->mpg_data_received = itv->vbi_data_inserted = 0;
447 itv->dualwatch_jiffies = jiffies; 464 itv->dualwatch_jiffies = jiffies;
448 itv->dualwatch_stereo_mode = itv->params.audio_properties & 0x0300; 465 itv->dualwatch_stereo_mode = p->audio_properties & 0x0300;
449 itv->search_pack_header = 0; 466 itv->search_pack_header = 0;
450 break; 467 break;
451 468
@@ -477,9 +494,6 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
477 s->subtype = subtype; 494 s->subtype = subtype;
478 s->buffers_stolen = 0; 495 s->buffers_stolen = 0;
479 496
480 /* mute/unmute video */
481 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? 1 : 0);
482
483 /* Clear Streamoff flags in case left from last capture */ 497 /* Clear Streamoff flags in case left from last capture */
484 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags); 498 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
485 499
@@ -536,7 +550,12 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
536 itv->pgm_info_offset, itv->pgm_info_num); 550 itv->pgm_info_offset, itv->pgm_info_num);
537 551
538 /* Setup API for Stream */ 552 /* Setup API for Stream */
539 cx2341x_update(itv, ivtv_api_func, NULL, &itv->params); 553 cx2341x_update(itv, ivtv_api_func, NULL, p);
554
555 /* mute if capturing radio */
556 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
557 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
558 1 | (p->video_mute_yuv << 8));
540 } 559 }
541 560
542 /* Vsync Setup */ 561 /* Vsync Setup */
@@ -585,6 +604,7 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
585{ 604{
586 u32 data[CX2341X_MBOX_MAX_DATA]; 605 u32 data[CX2341X_MBOX_MAX_DATA];
587 struct ivtv *itv = s->itv; 606 struct ivtv *itv = s->itv;
607 struct cx2341x_mpeg_params *p = &itv->params;
588 int datatype; 608 int datatype;
589 609
590 if (s->v4l2dev == NULL) 610 if (s->v4l2dev == NULL)
@@ -623,7 +643,7 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
623 break; 643 break;
624 } 644 }
625 if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype, 645 if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype,
626 itv->params.width, itv->params.height, itv->params.audio_properties)) { 646 p->width, p->height, p->audio_properties)) {
627 IVTV_DEBUG_WARN("Couldn't initialize decoder source\n"); 647 IVTV_DEBUG_WARN("Couldn't initialize decoder source\n");
628 } 648 }
629 return 0; 649 return 0;
diff --git a/drivers/media/video/ivtv/ivtv-streams.h b/drivers/media/video/ivtv/ivtv-streams.h
index 8f5f5b1c7c89..3d76a415fbd8 100644
--- a/drivers/media/video/ivtv/ivtv-streams.h
+++ b/drivers/media/video/ivtv/ivtv-streams.h
@@ -22,6 +22,7 @@
22#define IVTV_STREAMS_H 22#define IVTV_STREAMS_H
23 23
24int ivtv_streams_setup(struct ivtv *itv); 24int ivtv_streams_setup(struct ivtv *itv);
25int ivtv_streams_register(struct ivtv *itv);
25void ivtv_streams_cleanup(struct ivtv *itv); 26void ivtv_streams_cleanup(struct ivtv *itv);
26 27
27/* Capture related */ 28/* Capture related */
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c
index c4626d1cdf41..912b424e5204 100644
--- a/drivers/media/video/ivtv/ivtv-udma.c
+++ b/drivers/media/video/ivtv/ivtv-udma.c
@@ -63,10 +63,10 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
63 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); 63 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
64 kunmap_atomic(src, KM_BOUNCE_READ); 64 kunmap_atomic(src, KM_BOUNCE_READ);
65 local_irq_restore(flags); 65 local_irq_restore(flags);
66 dma->SGlist[map_offset].page = dma->bouncemap[map_offset]; 66 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]);
67 } 67 }
68 else { 68 else {
69 dma->SGlist[map_offset].page = dma->map[map_offset]; 69 sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]);
70 } 70 }
71 offset = 0; 71 offset = 0;
72 map_offset++; 72 map_offset++;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index e2288f224ab6..9091c4837bbc 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -710,7 +710,7 @@ static u32 ivtv_yuv_window_setup (struct ivtv *itv, struct yuv_frame_info *windo
710 710
711 /* If there's nothing to safe to display, we may as well stop now */ 711 /* If there's nothing to safe to display, we may as well stop now */
712 if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) { 712 if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) {
713 return 0; 713 return IVTV_YUV_UPDATE_INVALID;
714 } 714 }
715 715
716 /* Ensure video remains inside OSD area */ 716 /* Ensure video remains inside OSD area */
@@ -791,7 +791,7 @@ static u32 ivtv_yuv_window_setup (struct ivtv *itv, struct yuv_frame_info *windo
791 791
792 /* Check again. If there's nothing to safe to display, stop now */ 792 /* Check again. If there's nothing to safe to display, stop now */
793 if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) { 793 if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) {
794 return 0; 794 return IVTV_YUV_UPDATE_INVALID;
795 } 795 }
796 796
797 /* Both x offset & width are linked, so they have to be done together */ 797 /* Both x offset & width are linked, so they have to be done together */
@@ -840,110 +840,118 @@ void ivtv_yuv_work_handler (struct ivtv *itv)
840 if (!(yuv_update = ivtv_yuv_window_setup (itv, &window))) 840 if (!(yuv_update = ivtv_yuv_window_setup (itv, &window)))
841 return; 841 return;
842 842
843 /* Update horizontal settings */ 843 if (yuv_update & IVTV_YUV_UPDATE_INVALID) {
844 if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL) 844 write_reg(0x01008080, 0x2898);
845 ivtv_yuv_handle_horizontal(itv, &window); 845 } else if (yuv_update) {
846 write_reg(0x00108080, 0x2898);
846 847
847 if (yuv_update & IVTV_YUV_UPDATE_VERTICAL) 848 if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL)
848 ivtv_yuv_handle_vertical(itv, &window); 849 ivtv_yuv_handle_horizontal(itv, &window);
850
851 if (yuv_update & IVTV_YUV_UPDATE_VERTICAL)
852 ivtv_yuv_handle_vertical(itv, &window);
853 }
849 854
850 memcpy(&itv->yuv_info.old_frame_info, &window, sizeof (itv->yuv_info.old_frame_info)); 855 memcpy(&itv->yuv_info.old_frame_info, &window, sizeof (itv->yuv_info.old_frame_info));
851} 856}
852 857
853static void ivtv_yuv_init (struct ivtv *itv) 858static void ivtv_yuv_init (struct ivtv *itv)
854{ 859{
860 struct yuv_playback_info *yi = &itv->yuv_info;
861
855 IVTV_DEBUG_YUV("ivtv_yuv_init\n"); 862 IVTV_DEBUG_YUV("ivtv_yuv_init\n");
856 863
857 /* Take a snapshot of the current register settings */ 864 /* Take a snapshot of the current register settings */
858 itv->yuv_info.reg_2834 = read_reg(0x02834); 865 yi->reg_2834 = read_reg(0x02834);
859 itv->yuv_info.reg_2838 = read_reg(0x02838); 866 yi->reg_2838 = read_reg(0x02838);
860 itv->yuv_info.reg_283c = read_reg(0x0283c); 867 yi->reg_283c = read_reg(0x0283c);
861 itv->yuv_info.reg_2840 = read_reg(0x02840); 868 yi->reg_2840 = read_reg(0x02840);
862 itv->yuv_info.reg_2844 = read_reg(0x02844); 869 yi->reg_2844 = read_reg(0x02844);
863 itv->yuv_info.reg_2848 = read_reg(0x02848); 870 yi->reg_2848 = read_reg(0x02848);
864 itv->yuv_info.reg_2854 = read_reg(0x02854); 871 yi->reg_2854 = read_reg(0x02854);
865 itv->yuv_info.reg_285c = read_reg(0x0285c); 872 yi->reg_285c = read_reg(0x0285c);
866 itv->yuv_info.reg_2864 = read_reg(0x02864); 873 yi->reg_2864 = read_reg(0x02864);
867 itv->yuv_info.reg_2870 = read_reg(0x02870); 874 yi->reg_2870 = read_reg(0x02870);
868 itv->yuv_info.reg_2874 = read_reg(0x02874); 875 yi->reg_2874 = read_reg(0x02874);
869 itv->yuv_info.reg_2898 = read_reg(0x02898); 876 yi->reg_2898 = read_reg(0x02898);
870 itv->yuv_info.reg_2890 = read_reg(0x02890); 877 yi->reg_2890 = read_reg(0x02890);
871 878
872 itv->yuv_info.reg_289c = read_reg(0x0289c); 879 yi->reg_289c = read_reg(0x0289c);
873 itv->yuv_info.reg_2918 = read_reg(0x02918); 880 yi->reg_2918 = read_reg(0x02918);
874 itv->yuv_info.reg_291c = read_reg(0x0291c); 881 yi->reg_291c = read_reg(0x0291c);
875 itv->yuv_info.reg_2920 = read_reg(0x02920); 882 yi->reg_2920 = read_reg(0x02920);
876 itv->yuv_info.reg_2924 = read_reg(0x02924); 883 yi->reg_2924 = read_reg(0x02924);
877 itv->yuv_info.reg_2928 = read_reg(0x02928); 884 yi->reg_2928 = read_reg(0x02928);
878 itv->yuv_info.reg_292c = read_reg(0x0292c); 885 yi->reg_292c = read_reg(0x0292c);
879 itv->yuv_info.reg_2930 = read_reg(0x02930); 886 yi->reg_2930 = read_reg(0x02930);
880 itv->yuv_info.reg_2934 = read_reg(0x02934); 887 yi->reg_2934 = read_reg(0x02934);
881 itv->yuv_info.reg_2938 = read_reg(0x02938); 888 yi->reg_2938 = read_reg(0x02938);
882 itv->yuv_info.reg_293c = read_reg(0x0293c); 889 yi->reg_293c = read_reg(0x0293c);
883 itv->yuv_info.reg_2940 = read_reg(0x02940); 890 yi->reg_2940 = read_reg(0x02940);
884 itv->yuv_info.reg_2944 = read_reg(0x02944); 891 yi->reg_2944 = read_reg(0x02944);
885 itv->yuv_info.reg_2948 = read_reg(0x02948); 892 yi->reg_2948 = read_reg(0x02948);
886 itv->yuv_info.reg_294c = read_reg(0x0294c); 893 yi->reg_294c = read_reg(0x0294c);
887 itv->yuv_info.reg_2950 = read_reg(0x02950); 894 yi->reg_2950 = read_reg(0x02950);
888 itv->yuv_info.reg_2954 = read_reg(0x02954); 895 yi->reg_2954 = read_reg(0x02954);
889 itv->yuv_info.reg_2958 = read_reg(0x02958); 896 yi->reg_2958 = read_reg(0x02958);
890 itv->yuv_info.reg_295c = read_reg(0x0295c); 897 yi->reg_295c = read_reg(0x0295c);
891 itv->yuv_info.reg_2960 = read_reg(0x02960); 898 yi->reg_2960 = read_reg(0x02960);
892 itv->yuv_info.reg_2964 = read_reg(0x02964); 899 yi->reg_2964 = read_reg(0x02964);
893 itv->yuv_info.reg_2968 = read_reg(0x02968); 900 yi->reg_2968 = read_reg(0x02968);
894 itv->yuv_info.reg_296c = read_reg(0x0296c); 901 yi->reg_296c = read_reg(0x0296c);
895 itv->yuv_info.reg_2970 = read_reg(0x02970); 902 yi->reg_2970 = read_reg(0x02970);
896 903
897 itv->yuv_info.v_filter_1 = -1; 904 yi->v_filter_1 = -1;
898 itv->yuv_info.v_filter_2 = -1; 905 yi->v_filter_2 = -1;
899 itv->yuv_info.h_filter = -1; 906 yi->h_filter = -1;
900 907
901 /* Set some valid size info */ 908 /* Set some valid size info */
902 itv->yuv_info.osd_x_offset = read_reg(0x02a04) & 0x00000FFF; 909 yi->osd_x_offset = read_reg(0x02a04) & 0x00000FFF;
903 itv->yuv_info.osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF; 910 yi->osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF;
904 911
905 /* Bit 2 of reg 2878 indicates current decoder output format 912 /* Bit 2 of reg 2878 indicates current decoder output format
906 0 : NTSC 1 : PAL */ 913 0 : NTSC 1 : PAL */
907 if (read_reg(0x2878) & 4) 914 if (read_reg(0x2878) & 4)
908 itv->yuv_info.decode_height = 576; 915 yi->decode_height = 576;
909 else 916 else
910 itv->yuv_info.decode_height = 480; 917 yi->decode_height = 480;
911 918
912 /* If no visible size set, assume full size */ 919 if (!itv->osd_info) {
913 if (!itv->yuv_info.osd_vis_w) 920 yi->osd_vis_w = 720 - yi->osd_x_offset;
914 itv->yuv_info.osd_vis_w = 720 - itv->yuv_info.osd_x_offset; 921 yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
915
916 if (!itv->yuv_info.osd_vis_h) {
917 itv->yuv_info.osd_vis_h = itv->yuv_info.decode_height - itv->yuv_info.osd_y_offset;
918 } else { 922 } else {
919 /* If output video standard has changed, requested height may 923 /* If no visible size set, assume full size */
920 not be legal */ 924 if (!yi->osd_vis_w)
921 if (itv->yuv_info.osd_vis_h + itv->yuv_info.osd_y_offset > itv->yuv_info.decode_height) { 925 yi->osd_vis_w = 720 - yi->osd_x_offset;
922 IVTV_DEBUG_WARN("Clipping yuv output - fb size (%d) exceeds video standard limit (%d)\n", 926
923 itv->yuv_info.osd_vis_h + itv->yuv_info.osd_y_offset, 927 if (!yi->osd_vis_h)
924 itv->yuv_info.decode_height); 928 yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
925 itv->yuv_info.osd_vis_h = itv->yuv_info.decode_height - itv->yuv_info.osd_y_offset; 929 else {
930 /* If output video standard has changed, requested height may
931 not be legal */
932 if (yi->osd_vis_h + yi->osd_y_offset > yi->decode_height) {
933 IVTV_DEBUG_WARN("Clipping yuv output - fb size (%d) exceeds video standard limit (%d)\n",
934 yi->osd_vis_h + yi->osd_y_offset,
935 yi->decode_height);
936 yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
937 }
926 } 938 }
927 } 939 }
928 940
929 /* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */ 941 /* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
930 itv->yuv_info.blanking_ptr = kzalloc(720*16,GFP_KERNEL); 942 yi->blanking_ptr = kzalloc(720*16, GFP_KERNEL);
931 if (itv->yuv_info.blanking_ptr) { 943 if (yi->blanking_ptr)
932 itv->yuv_info.blanking_dmaptr = pci_map_single(itv->dev, itv->yuv_info.blanking_ptr, 720*16, PCI_DMA_TODEVICE); 944 yi->blanking_dmaptr = pci_map_single(itv->dev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
933 }
934 else { 945 else {
935 itv->yuv_info.blanking_dmaptr = 0; 946 yi->blanking_dmaptr = 0;
936 IVTV_DEBUG_WARN ("Failed to allocate yuv blanking buffer\n"); 947 IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
937 } 948 }
938 949
939 IVTV_DEBUG_WARN("Enable video output\n");
940 write_reg_sync(0x00108080, 0x2898);
941
942 /* Enable YUV decoder output */ 950 /* Enable YUV decoder output */
943 write_reg_sync(0x01, IVTV_REG_VDM); 951 write_reg_sync(0x01, IVTV_REG_VDM);
944 952
945 set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags); 953 set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
946 atomic_set(&itv->yuv_info.next_dma_frame,0); 954 atomic_set(&yi->next_dma_frame, 0);
947} 955}
948 956
949int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args) 957int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
diff --git a/drivers/media/video/ivtv/ivtv-yuv.h b/drivers/media/video/ivtv/ivtv-yuv.h
index f7215eeca018..3b966f0a204a 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.h
+++ b/drivers/media/video/ivtv/ivtv-yuv.h
@@ -34,6 +34,7 @@
34 34
35#define IVTV_YUV_UPDATE_HORIZONTAL 0x01 35#define IVTV_YUV_UPDATE_HORIZONTAL 0x01
36#define IVTV_YUV_UPDATE_VERTICAL 0x02 36#define IVTV_YUV_UPDATE_VERTICAL 0x02
37#define IVTV_YUV_UPDATE_INVALID 0x04
37 38
38extern const u32 yuv_offset[4]; 39extern const u32 yuv_offset[4];
39 40
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 9684048fe56c..52ffd154a3d8 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -55,7 +55,6 @@
55static int ivtvfb_card_id = -1; 55static int ivtvfb_card_id = -1;
56static int ivtvfb_debug = 0; 56static int ivtvfb_debug = 0;
57static int osd_laced; 57static int osd_laced;
58static int osd_compat;
59static int osd_depth; 58static int osd_depth;
60static int osd_upper; 59static int osd_upper;
61static int osd_left; 60static int osd_left;
@@ -65,7 +64,6 @@ static int osd_xres;
65module_param(ivtvfb_card_id, int, 0444); 64module_param(ivtvfb_card_id, int, 0444);
66module_param_named(debug,ivtvfb_debug, int, 0644); 65module_param_named(debug,ivtvfb_debug, int, 0644);
67module_param(osd_laced, bool, 0444); 66module_param(osd_laced, bool, 0444);
68module_param(osd_compat, bool, 0444);
69module_param(osd_depth, int, 0444); 67module_param(osd_depth, int, 0444);
70module_param(osd_upper, int, 0444); 68module_param(osd_upper, int, 0444);
71module_param(osd_left, int, 0444); 69module_param(osd_left, int, 0444);
@@ -80,12 +78,6 @@ MODULE_PARM_DESC(debug,
80 "Debug level (bitmask). Default: errors only\n" 78 "Debug level (bitmask). Default: errors only\n"
81 "\t\t\t(debug = 3 gives full debugging)"); 79 "\t\t\t(debug = 3 gives full debugging)");
82 80
83MODULE_PARM_DESC(osd_compat,
84 "Compatibility mode - Display size is locked (use for old X drivers)\n"
85 "\t\t\t0=off\n"
86 "\t\t\t1=on\n"
87 "\t\t\tdefault off");
88
89/* Why upper, left, xres, yres, depth, laced ? To match terminology used 81/* Why upper, left, xres, yres, depth, laced ? To match terminology used
90 by fbset. 82 by fbset.
91 Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */ 83 Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */
@@ -166,9 +158,6 @@ struct osd_info {
166 unsigned long fb_end_aligned_physaddr; 158 unsigned long fb_end_aligned_physaddr;
167#endif 159#endif
168 160
169 /* Current osd mode */
170 int osd_mode;
171
172 /* Store the buffer offset */ 161 /* Store the buffer offset */
173 int set_osd_coords_x; 162 int set_osd_coords_x;
174 int set_osd_coords_y; 163 int set_osd_coords_y;
@@ -470,13 +459,11 @@ static int ivtvfb_set_var(struct ivtv *itv, struct fb_var_screeninfo *var)
470 IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n"); 459 IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n");
471 } 460 }
472 461
473 /* Change osd mode if needed. 462 /* Set video mode. Although rare, the display can become scrambled even
474 Although rare, things can go wrong. The extra mode 463 if we don't change mode. Always 'bounce' to osd_mode via mode 0 */
475 change seems to help... */ 464 if (osd_mode != -1) {
476 if (osd_mode != -1 && osd_mode != oi->osd_mode) {
477 ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, 0); 465 ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, 0);
478 ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, osd_mode); 466 ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, osd_mode);
479 oi->osd_mode = osd_mode;
480 } 467 }
481 468
482 oi->bits_per_pixel = var->bits_per_pixel; 469 oi->bits_per_pixel = var->bits_per_pixel;
@@ -579,14 +566,6 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
579 osd_height_limit = 480; 566 osd_height_limit = 480;
580 } 567 }
581 568
582 /* Check the bits per pixel */
583 if (osd_compat) {
584 if (var->bits_per_pixel != 32) {
585 IVTVFB_DEBUG_WARN("Invalid colour mode: %d\n", var->bits_per_pixel);
586 return -EINVAL;
587 }
588 }
589
590 if (var->bits_per_pixel == 8 || var->bits_per_pixel == 32) { 569 if (var->bits_per_pixel == 8 || var->bits_per_pixel == 32) {
591 var->transp.offset = 24; 570 var->transp.offset = 24;
592 var->transp.length = 8; 571 var->transp.length = 8;
@@ -638,32 +617,20 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
638 } 617 }
639 618
640 /* Check the resolution */ 619 /* Check the resolution */
641 if (osd_compat) { 620 if (var->xres > IVTV_OSD_MAX_WIDTH || var->yres > osd_height_limit) {
642 if (var->xres != oi->ivtvfb_defined.xres || 621 IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d\n",
643 var->yres != oi->ivtvfb_defined.yres || 622 var->xres, var->yres);
644 var->xres_virtual != oi->ivtvfb_defined.xres_virtual || 623 return -EINVAL;
645 var->yres_virtual != oi->ivtvfb_defined.yres_virtual) {
646 IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d (virtual %dx%d)\n",
647 var->xres, var->yres, var->xres_virtual, var->yres_virtual);
648 return -EINVAL;
649 }
650 } 624 }
651 else {
652 if (var->xres > IVTV_OSD_MAX_WIDTH || var->yres > osd_height_limit) {
653 IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d\n",
654 var->xres, var->yres);
655 return -EINVAL;
656 }
657 625
658 /* Max horizontal size is 1023 @ 32bpp, 2046 & 16bpp, 4092 @ 8bpp */ 626 /* Max horizontal size is 1023 @ 32bpp, 2046 & 16bpp, 4092 @ 8bpp */
659 if (var->xres_virtual > 4095 / (var->bits_per_pixel / 8) || 627 if (var->xres_virtual > 4095 / (var->bits_per_pixel / 8) ||
660 var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8) > oi->video_buffer_size || 628 var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8) > oi->video_buffer_size ||
661 var->xres_virtual < var->xres || 629 var->xres_virtual < var->xres ||
662 var->yres_virtual < var->yres) { 630 var->yres_virtual < var->yres) {
663 IVTVFB_DEBUG_WARN("Invalid virtual resolution: %dx%d\n", 631 IVTVFB_DEBUG_WARN("Invalid virtual resolution: %dx%d\n",
664 var->xres_virtual, var->yres_virtual); 632 var->xres_virtual, var->yres_virtual);
665 return -EINVAL; 633 return -EINVAL;
666 }
667 } 634 }
668 635
669 /* Some extra checks if in 8 bit mode */ 636 /* Some extra checks if in 8 bit mode */
@@ -877,17 +844,15 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
877 844
878 /* Color mode */ 845 /* Color mode */
879 846
880 if (osd_compat) osd_depth = 32; 847 if (osd_depth != 8 && osd_depth != 16 && osd_depth != 32)
881 if (osd_depth != 8 && osd_depth != 16 && osd_depth != 32) osd_depth = 8; 848 osd_depth = 8;
882 oi->bits_per_pixel = osd_depth; 849 oi->bits_per_pixel = osd_depth;
883 oi->bytes_per_pixel = oi->bits_per_pixel / 8; 850 oi->bytes_per_pixel = oi->bits_per_pixel / 8;
884 851
885 /* Invalidate current osd mode to force a mode switch later */
886 oi->osd_mode = -1;
887
888 /* Horizontal size & position */ 852 /* Horizontal size & position */
889 853
890 if (osd_xres > 720) osd_xres = 720; 854 if (osd_xres > 720)
855 osd_xres = 720;
891 856
892 /* Must be a multiple of 4 for 8bpp & 2 for 16bpp */ 857 /* Must be a multiple of 4 for 8bpp & 2 for 16bpp */
893 if (osd_depth == 8) 858 if (osd_depth == 8)
@@ -895,10 +860,7 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
895 else if (osd_depth == 16) 860 else if (osd_depth == 16)
896 osd_xres &= ~1; 861 osd_xres &= ~1;
897 862
898 if (osd_xres) 863 start_window.width = osd_xres ? osd_xres : 640;
899 start_window.width = osd_xres;
900 else
901 start_window.width = osd_compat ? 720: 640;
902 864
903 /* Check horizontal start (osd_left). */ 865 /* Check horizontal start (osd_left). */
904 if (osd_left && osd_left + start_window.width > 721) { 866 if (osd_left && osd_left + start_window.width > 721) {
@@ -921,10 +883,7 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
921 if (osd_yres > max_height) 883 if (osd_yres > max_height)
922 osd_yres = max_height; 884 osd_yres = max_height;
923 885
924 if (osd_yres) 886 start_window.height = osd_yres ? osd_yres : itv->is_50hz ? 480 : 400;
925 start_window.height = osd_yres;
926 else
927 start_window.height = osd_compat ? max_height : (itv->is_50hz ? 480 : 400);
928 887
929 /* Check vertical start (osd_upper). */ 888 /* Check vertical start (osd_upper). */
930 if (osd_upper + start_window.height > max_height + 1) { 889 if (osd_upper + start_window.height > max_height + 1) {
@@ -1127,10 +1086,6 @@ static int ivtvfb_init_card(struct ivtv *itv)
1127 /* Enable the osd */ 1086 /* Enable the osd */
1128 ivtvfb_blank(FB_BLANK_UNBLANK, &itv->osd_info->ivtvfb_info); 1087 ivtvfb_blank(FB_BLANK_UNBLANK, &itv->osd_info->ivtvfb_info);
1129 1088
1130 /* Note if we're running in compatibility mode */
1131 if (osd_compat)
1132 IVTVFB_INFO("Running in compatibility mode. Display resize & mode change disabled\n");
1133
1134 /* Allocate DMA */ 1089 /* Allocate DMA */
1135 ivtv_udma_alloc(itv); 1090 ivtv_udma_alloc(itv);
1136 return 0; 1091 return 0;
@@ -1177,9 +1132,12 @@ static void ivtvfb_cleanup(void)
1177 for (i = 0; i < ivtv_cards_active; i++) { 1132 for (i = 0; i < ivtv_cards_active; i++) {
1178 itv = ivtv_cards[i]; 1133 itv = ivtv_cards[i];
1179 if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) && itv->osd_info) { 1134 if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) && itv->osd_info) {
1135 if (unregister_framebuffer(&itv->osd_info->ivtvfb_info)) {
1136 IVTVFB_WARN("Framebuffer %d is in use, cannot unload\n", i);
1137 return;
1138 }
1180 IVTVFB_DEBUG_INFO("Unregister framebuffer %d\n", i); 1139 IVTVFB_DEBUG_INFO("Unregister framebuffer %d\n", i);
1181 ivtvfb_blank(FB_BLANK_POWERDOWN, &itv->osd_info->ivtvfb_info); 1140 ivtvfb_blank(FB_BLANK_POWERDOWN, &itv->osd_info->ivtvfb_info);
1182 unregister_framebuffer(&itv->osd_info->ivtvfb_info);
1183 ivtvfb_release_buffers(itv); 1141 ivtvfb_release_buffers(itv);
1184 itv->osd_video_pbase = 0; 1142 itv->osd_video_pbase = 0;
1185 } 1143 }
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 69283926a8dc..c31163290432 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1762,7 +1762,6 @@ static struct video_device meye_template = {
1762 .owner = THIS_MODULE, 1762 .owner = THIS_MODULE,
1763 .name = "meye", 1763 .name = "meye",
1764 .type = VID_TYPE_CAPTURE, 1764 .type = VID_TYPE_CAPTURE,
1765 .hardware = VID_HARDWARE_MEYE,
1766 .fops = &meye_fops, 1765 .fops = &meye_fops,
1767 .release = video_device_release, 1766 .release = video_device_release,
1768 .minor = -1, 1767 .minor = -1,
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index b8d4ac0d938e..d55d5800efb4 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -4668,7 +4668,6 @@ static struct video_device vdev_template = {
4668 .owner = THIS_MODULE, 4668 .owner = THIS_MODULE,
4669 .name = "OV511 USB Camera", 4669 .name = "OV511 USB Camera",
4670 .type = VID_TYPE_CAPTURE, 4670 .type = VID_TYPE_CAPTURE,
4671 .hardware = VID_HARDWARE_OV511,
4672 .fops = &ov511_fops, 4671 .fops = &ov511_fops,
4673 .release = video_device_release, 4672 .release = video_device_release,
4674 .minor = -1, 4673 .minor = -1,
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index 0ef73d9d5848..ce4b2f9791ee 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -2013,7 +2013,6 @@ static struct video_device planb_template=
2013 .owner = THIS_MODULE, 2013 .owner = THIS_MODULE,
2014 .name = PLANB_DEVICE_NAME, 2014 .name = PLANB_DEVICE_NAME,
2015 .type = VID_TYPE_OVERLAY, 2015 .type = VID_TYPE_OVERLAY,
2016 .hardware = VID_HARDWARE_PLANB,
2017 .open = planb_open, 2016 .open = planb_open,
2018 .close = planb_close, 2017 .close = planb_close,
2019 .read = planb_read, 2018 .read = planb_read,
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index b5a67f0dd19f..6820c2aabd30 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -895,7 +895,6 @@ static struct video_device pms_template=
895 .owner = THIS_MODULE, 895 .owner = THIS_MODULE,
896 .name = "Mediavision PMS", 896 .name = "Mediavision PMS",
897 .type = VID_TYPE_CAPTURE, 897 .type = VID_TYPE_CAPTURE,
898 .hardware = VID_HARDWARE_PMS,
899 .fops = &pms_fops, 898 .fops = &pms_fops,
900}; 899};
901 900
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index 20b614436d2c..205087a3e136 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -209,6 +209,11 @@ static int pvr2_encoder_cmd(void *ctxt,
209 209
210 LOCK_TAKE(hdw->ctl_lock); do { 210 LOCK_TAKE(hdw->ctl_lock); do {
211 211
212 if (!hdw->flag_encoder_ok) {
213 ret = -EIO;
214 break;
215 }
216
212 retry_flag = 0; 217 retry_flag = 0;
213 try_count++; 218 try_count++;
214 ret = 0; 219 ret = 0;
@@ -273,6 +278,7 @@ static int pvr2_encoder_cmd(void *ctxt,
273 ret = -EBUSY; 278 ret = -EBUSY;
274 } 279 }
275 if (ret) { 280 if (ret) {
281 hdw->flag_encoder_ok = 0;
276 pvr2_trace( 282 pvr2_trace(
277 PVR2_TRACE_ERROR_LEGS, 283 PVR2_TRACE_ERROR_LEGS,
278 "Giving up on command." 284 "Giving up on command."
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 985d9ae7f5ee..f873994b088c 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -225,11 +225,12 @@ struct pvr2_hdw {
225 unsigned int cmd_debug_write_len; // 225 unsigned int cmd_debug_write_len; //
226 unsigned int cmd_debug_read_len; // 226 unsigned int cmd_debug_read_len; //
227 227
228 int flag_ok; // device in known good state 228 int flag_ok; /* device in known good state */
229 int flag_disconnected; // flag_ok == 0 due to disconnect 229 int flag_disconnected; /* flag_ok == 0 due to disconnect */
230 int flag_init_ok; // true if structure is fully initialized 230 int flag_init_ok; /* true if structure is fully initialized */
231 int flag_streaming_enabled; // true if streaming should be on 231 int flag_streaming_enabled; /* true if streaming should be on */
232 int fw1_state; // current situation with fw1 232 int fw1_state; /* current situation with fw1 */
233 int flag_encoder_ok; /* True if encoder is healthy */
233 234
234 int flag_decoder_is_tuned; 235 int flag_decoder_is_tuned;
235 236
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 27b12b4b5c88..402c59488253 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -1248,6 +1248,8 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
1248 time we configure the encoder, then we'll fully configure it. */ 1248 time we configure the encoder, then we'll fully configure it. */
1249 hdw->enc_cur_valid = 0; 1249 hdw->enc_cur_valid = 0;
1250 1250
1251 hdw->flag_encoder_ok = 0;
1252
1251 /* First prepare firmware loading */ 1253 /* First prepare firmware loading */
1252 ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/ 1254 ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/
1253 ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/ 1255 ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/
@@ -1346,6 +1348,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
1346 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 1348 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1347 "firmware2 upload post-proc failure"); 1349 "firmware2 upload post-proc failure");
1348 } else { 1350 } else {
1351 hdw->flag_encoder_ok = !0;
1349 hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE); 1352 hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE);
1350 } 1353 }
1351 return ret; 1354 return ret;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 4563b3df8a0d..7a596ea7cfe6 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -1121,15 +1121,12 @@ static const struct file_operations vdev_fops = {
1121}; 1121};
1122 1122
1123 1123
1124#define VID_HARDWARE_PVRUSB2 38 /* FIXME : need a good value */
1125
1126static struct video_device vdev_template = { 1124static struct video_device vdev_template = {
1127 .owner = THIS_MODULE, 1125 .owner = THIS_MODULE,
1128 .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER, 1126 .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER,
1129 .type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE 1127 .type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE
1130 | V4L2_CAP_TUNER | V4L2_CAP_AUDIO 1128 | V4L2_CAP_TUNER | V4L2_CAP_AUDIO
1131 | V4L2_CAP_READWRITE), 1129 | V4L2_CAP_READWRITE),
1132 .hardware = VID_HARDWARE_PVRUSB2,
1133 .fops = &vdev_fops, 1130 .fops = &vdev_fops,
1134}; 1131};
1135 1132
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 950da2542148..7300ace8f44e 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -166,7 +166,6 @@ static struct video_device pwc_template = {
166 .owner = THIS_MODULE, 166 .owner = THIS_MODULE,
167 .name = "Philips Webcam", /* Filled in later */ 167 .name = "Philips Webcam", /* Filled in later */
168 .type = VID_TYPE_CAPTURE, 168 .type = VID_TYPE_CAPTURE,
169 .hardware = VID_HARDWARE_PWC,
170 .release = video_device_release, 169 .release = video_device_release,
171 .fops = &pwc_fops, 170 .fops = &pwc_fops,
172 .minor = -1, 171 .minor = -1,
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index 57f1f5d409e0..002e70a33a4f 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -71,7 +71,6 @@ static const struct v4l2_format v4l2_format_table[] =
71 71
72struct saa6752hs_state { 72struct saa6752hs_state {
73 struct i2c_client client; 73 struct i2c_client client;
74 struct v4l2_mpeg_compression old_params;
75 struct saa6752hs_mpeg_params params; 74 struct saa6752hs_mpeg_params params;
76 enum saa6752hs_videoformat video_format; 75 enum saa6752hs_videoformat video_format;
77 v4l2_std_id standard; 76 v4l2_std_id standard;
@@ -161,35 +160,6 @@ static struct saa6752hs_mpeg_params param_defaults =
161 .au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_256K, 160 .au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_256K,
162}; 161};
163 162
164static struct v4l2_mpeg_compression old_param_defaults =
165{
166 .st_type = V4L2_MPEG_TS_2,
167 .st_bitrate = {
168 .mode = V4L2_BITRATE_CBR,
169 .target = 7000,
170 },
171
172 .ts_pid_pmt = 16,
173 .ts_pid_video = 260,
174 .ts_pid_audio = 256,
175 .ts_pid_pcr = 259,
176
177 .vi_type = V4L2_MPEG_VI_2,
178 .vi_aspect_ratio = V4L2_MPEG_ASPECT_4_3,
179 .vi_bitrate = {
180 .mode = V4L2_BITRATE_VBR,
181 .target = 4000,
182 .max = 6000,
183 },
184
185 .au_type = V4L2_MPEG_AU_2_II,
186 .au_bitrate = {
187 .mode = V4L2_BITRATE_CBR,
188 .target = 256,
189 },
190
191};
192
193/* ---------------------------------------------------------------------- */ 163/* ---------------------------------------------------------------------- */
194 164
195static int saa6752hs_chip_command(struct i2c_client* client, 165static int saa6752hs_chip_command(struct i2c_client* client,
@@ -362,74 +332,6 @@ static void saa6752hs_set_subsampling(struct i2c_client* client,
362} 332}
363 333
364 334
365static void saa6752hs_old_set_params(struct i2c_client* client,
366 struct v4l2_mpeg_compression* params)
367{
368 struct saa6752hs_state *h = i2c_get_clientdata(client);
369
370 /* check PIDs */
371 if (params->ts_pid_pmt <= MPEG_PID_MAX) {
372 h->old_params.ts_pid_pmt = params->ts_pid_pmt;
373 h->params.ts_pid_pmt = params->ts_pid_pmt;
374 }
375 if (params->ts_pid_pcr <= MPEG_PID_MAX) {
376 h->old_params.ts_pid_pcr = params->ts_pid_pcr;
377 h->params.ts_pid_pcr = params->ts_pid_pcr;
378 }
379 if (params->ts_pid_video <= MPEG_PID_MAX) {
380 h->old_params.ts_pid_video = params->ts_pid_video;
381 h->params.ts_pid_video = params->ts_pid_video;
382 }
383 if (params->ts_pid_audio <= MPEG_PID_MAX) {
384 h->old_params.ts_pid_audio = params->ts_pid_audio;
385 h->params.ts_pid_audio = params->ts_pid_audio;
386 }
387
388 /* check bitrate parameters */
389 if ((params->vi_bitrate.mode == V4L2_BITRATE_CBR) ||
390 (params->vi_bitrate.mode == V4L2_BITRATE_VBR)) {
391 h->old_params.vi_bitrate.mode = params->vi_bitrate.mode;
392 h->params.vi_bitrate_mode = (params->vi_bitrate.mode == V4L2_BITRATE_VBR) ?
393 V4L2_MPEG_VIDEO_BITRATE_MODE_VBR : V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
394 }
395 if (params->vi_bitrate.mode != V4L2_BITRATE_NONE)
396 h->old_params.st_bitrate.target = params->st_bitrate.target;
397 if (params->vi_bitrate.mode != V4L2_BITRATE_NONE)
398 h->old_params.vi_bitrate.target = params->vi_bitrate.target;
399 if (params->vi_bitrate.mode == V4L2_BITRATE_VBR)
400 h->old_params.vi_bitrate.max = params->vi_bitrate.max;
401 if (params->au_bitrate.mode != V4L2_BITRATE_NONE)
402 h->old_params.au_bitrate.target = params->au_bitrate.target;
403
404 /* aspect ratio */
405 if (params->vi_aspect_ratio == V4L2_MPEG_ASPECT_4_3 ||
406 params->vi_aspect_ratio == V4L2_MPEG_ASPECT_16_9) {
407 h->old_params.vi_aspect_ratio = params->vi_aspect_ratio;
408 if (params->vi_aspect_ratio == V4L2_MPEG_ASPECT_4_3)
409 h->params.vi_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3;
410 else
411 h->params.vi_aspect = V4L2_MPEG_VIDEO_ASPECT_16x9;
412 }
413
414 /* range checks */
415 if (h->old_params.st_bitrate.target > MPEG_TOTAL_TARGET_BITRATE_MAX)
416 h->old_params.st_bitrate.target = MPEG_TOTAL_TARGET_BITRATE_MAX;
417 if (h->old_params.vi_bitrate.target > MPEG_VIDEO_TARGET_BITRATE_MAX)
418 h->old_params.vi_bitrate.target = MPEG_VIDEO_TARGET_BITRATE_MAX;
419 if (h->old_params.vi_bitrate.max > MPEG_VIDEO_MAX_BITRATE_MAX)
420 h->old_params.vi_bitrate.max = MPEG_VIDEO_MAX_BITRATE_MAX;
421 h->params.vi_bitrate = params->vi_bitrate.target;
422 h->params.vi_bitrate_peak = params->vi_bitrate.max;
423 if (h->old_params.au_bitrate.target <= 256) {
424 h->old_params.au_bitrate.target = 256;
425 h->params.au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_256K;
426 }
427 else {
428 h->old_params.au_bitrate.target = 384;
429 h->params.au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_384K;
430 }
431}
432
433static int handle_ctrl(struct saa6752hs_mpeg_params *params, 335static int handle_ctrl(struct saa6752hs_mpeg_params *params,
434 struct v4l2_ext_control *ctrl, unsigned int cmd) 336 struct v4l2_ext_control *ctrl, unsigned int cmd)
435{ 337{
@@ -697,7 +599,6 @@ static int saa6752hs_attach(struct i2c_adapter *adap, int addr, int kind)
697 return -ENOMEM; 599 return -ENOMEM;
698 h->client = client_template; 600 h->client = client_template;
699 h->params = param_defaults; 601 h->params = param_defaults;
700 h->old_params = old_param_defaults;
701 h->client.adapter = adap; 602 h->client.adapter = adap;
702 h->client.addr = addr; 603 h->client.addr = addr;
703 604
@@ -734,23 +635,11 @@ saa6752hs_command(struct i2c_client *client, unsigned int cmd, void *arg)
734{ 635{
735 struct saa6752hs_state *h = i2c_get_clientdata(client); 636 struct saa6752hs_state *h = i2c_get_clientdata(client);
736 struct v4l2_ext_controls *ctrls = arg; 637 struct v4l2_ext_controls *ctrls = arg;
737 struct v4l2_mpeg_compression *old_params = arg;
738 struct saa6752hs_mpeg_params params; 638 struct saa6752hs_mpeg_params params;
739 int err = 0; 639 int err = 0;
740 int i; 640 int i;
741 641
742 switch (cmd) { 642 switch (cmd) {
743 case VIDIOC_S_MPEGCOMP:
744 if (NULL == old_params) {
745 /* apply settings and start encoder */
746 saa6752hs_init(client);
747 break;
748 }
749 saa6752hs_old_set_params(client, old_params);
750 /* fall through */
751 case VIDIOC_G_MPEGCOMP:
752 *old_params = h->old_params;
753 break;
754 case VIDIOC_S_EXT_CTRLS: 643 case VIDIOC_S_EXT_CTRLS:
755 if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG) 644 if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
756 return -EINVAL; 645 return -EINVAL;
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 1a4a24471f20..a499eea379e6 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -429,7 +429,7 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
429 429
430 assert_spin_locked(&dev->slock); 430 assert_spin_locked(&dev->slock);
431 431
432 if (dev->inresume) 432 if (dev->insuspend)
433 return 0; 433 return 0;
434 434
435 /* video capture -- dma 0 + video task A */ 435 /* video capture -- dma 0 + video task A */
@@ -563,6 +563,9 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
563 unsigned long report,status; 563 unsigned long report,status;
564 int loop, handled = 0; 564 int loop, handled = 0;
565 565
566 if (dev->insuspend)
567 goto out;
568
566 for (loop = 0; loop < 10; loop++) { 569 for (loop = 0; loop < 10; loop++) {
567 report = saa_readl(SAA7134_IRQ_REPORT); 570 report = saa_readl(SAA7134_IRQ_REPORT);
568 status = saa_readl(SAA7134_IRQ_STATUS); 571 status = saa_readl(SAA7134_IRQ_STATUS);
@@ -1163,6 +1166,7 @@ static void __devexit saa7134_finidev(struct pci_dev *pci_dev)
1163 kfree(dev); 1166 kfree(dev);
1164} 1167}
1165 1168
1169#ifdef CONFIG_PM
1166static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state) 1170static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
1167{ 1171{
1168 1172
@@ -1176,6 +1180,19 @@ static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
1176 saa_writel(SAA7134_IRQ2, 0); 1180 saa_writel(SAA7134_IRQ2, 0);
1177 saa_writel(SAA7134_MAIN_CTRL, 0); 1181 saa_writel(SAA7134_MAIN_CTRL, 0);
1178 1182
1183 synchronize_irq(pci_dev->irq);
1184 dev->insuspend = 1;
1185
1186 /* Disable timeout timers - if we have active buffers, we will
1187 fill them on resume*/
1188
1189 del_timer(&dev->video_q.timeout);
1190 del_timer(&dev->vbi_q.timeout);
1191 del_timer(&dev->ts_q.timeout);
1192
1193 if (dev->remote)
1194 saa7134_ir_stop(dev);
1195
1179 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 1196 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1180 pci_save_state(pci_dev); 1197 pci_save_state(pci_dev);
1181 1198
@@ -1194,24 +1211,27 @@ static int saa7134_resume(struct pci_dev *pci_dev)
1194 /* Do things that are done in saa7134_initdev , 1211 /* Do things that are done in saa7134_initdev ,
1195 except of initializing memory structures.*/ 1212 except of initializing memory structures.*/
1196 1213
1197 dev->inresume = 1;
1198 saa7134_board_init1(dev); 1214 saa7134_board_init1(dev);
1199 1215
1216 /* saa7134_hwinit1 */
1200 if (saa7134_boards[dev->board].video_out) 1217 if (saa7134_boards[dev->board].video_out)
1201 saa7134_videoport_init(dev); 1218 saa7134_videoport_init(dev);
1202
1203 if (card_has_mpeg(dev)) 1219 if (card_has_mpeg(dev))
1204 saa7134_ts_init_hw(dev); 1220 saa7134_ts_init_hw(dev);
1205 1221 if (dev->remote)
1222 saa7134_ir_start(dev, dev->remote);
1206 saa7134_hw_enable1(dev); 1223 saa7134_hw_enable1(dev);
1207 saa7134_set_decoder(dev); 1224
1208 saa7134_i2c_call_clients(dev, VIDIOC_S_STD, &dev->tvnorm->id); 1225
1209 saa7134_board_init2(dev); 1226 saa7134_board_init2(dev);
1210 saa7134_hw_enable2(dev);
1211 1227
1228 /*saa7134_hwinit2*/
1229 saa7134_set_tvnorm_hw(dev);
1212 saa7134_tvaudio_setmute(dev); 1230 saa7134_tvaudio_setmute(dev);
1213 saa7134_tvaudio_setvolume(dev, dev->ctl_volume); 1231 saa7134_tvaudio_setvolume(dev, dev->ctl_volume);
1232 saa7134_tvaudio_do_scan(dev);
1214 saa7134_enable_i2s(dev); 1233 saa7134_enable_i2s(dev);
1234 saa7134_hw_enable2(dev);
1215 1235
1216 /*resume unfinished buffer(s)*/ 1236 /*resume unfinished buffer(s)*/
1217 spin_lock_irqsave(&dev->slock, flags); 1237 spin_lock_irqsave(&dev->slock, flags);
@@ -1219,13 +1239,19 @@ static int saa7134_resume(struct pci_dev *pci_dev)
1219 saa7134_buffer_requeue(dev, &dev->vbi_q); 1239 saa7134_buffer_requeue(dev, &dev->vbi_q);
1220 saa7134_buffer_requeue(dev, &dev->ts_q); 1240 saa7134_buffer_requeue(dev, &dev->ts_q);
1221 1241
1242 /* FIXME: Disable DMA audio sound - temporary till proper support
1243 is implemented*/
1244
1245 dev->dmasound.dma_running = 0;
1246
1222 /* start DMA now*/ 1247 /* start DMA now*/
1223 dev->inresume = 0; 1248 dev->insuspend = 0;
1224 saa7134_set_dmabits(dev); 1249 saa7134_set_dmabits(dev);
1225 spin_unlock_irqrestore(&dev->slock, flags); 1250 spin_unlock_irqrestore(&dev->slock, flags);
1226 1251
1227 return 0; 1252 return 0;
1228} 1253}
1254#endif
1229 1255
1230/* ----------------------------------------------------------- */ 1256/* ----------------------------------------------------------- */
1231 1257
@@ -1262,8 +1288,10 @@ static struct pci_driver saa7134_pci_driver = {
1262 .id_table = saa7134_pci_tbl, 1288 .id_table = saa7134_pci_tbl,
1263 .probe = saa7134_initdev, 1289 .probe = saa7134_initdev,
1264 .remove = __devexit_p(saa7134_finidev), 1290 .remove = __devexit_p(saa7134_finidev),
1291#ifdef CONFIG_PM
1265 .suspend = saa7134_suspend, 1292 .suspend = saa7134_suspend,
1266 .resume = saa7134_resume 1293 .resume = saa7134_resume
1294#endif
1267}; 1295};
1268 1296
1269static int saa7134_init(void) 1297static int saa7134_init(void)
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 34ca874dd7fe..75d0c5bf46d2 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -284,17 +284,6 @@ static int ts_do_ioctl(struct inode *inode, struct file *file,
284 case VIDIOC_S_CTRL: 284 case VIDIOC_S_CTRL:
285 return saa7134_common_ioctl(dev, cmd, arg); 285 return saa7134_common_ioctl(dev, cmd, arg);
286 286
287 case VIDIOC_S_MPEGCOMP:
288 printk(KERN_WARNING "VIDIOC_S_MPEGCOMP is obsolete. "
289 "Replace with VIDIOC_S_EXT_CTRLS!");
290 saa7134_i2c_call_clients(dev, VIDIOC_S_MPEGCOMP, arg);
291 ts_init_encoder(dev);
292 return 0;
293 case VIDIOC_G_MPEGCOMP:
294 printk(KERN_WARNING "VIDIOC_G_MPEGCOMP is obsolete. "
295 "Replace with VIDIOC_G_EXT_CTRLS!");
296 saa7134_i2c_call_clients(dev, VIDIOC_G_MPEGCOMP, arg);
297 return 0;
298 case VIDIOC_S_EXT_CTRLS: 287 case VIDIOC_S_EXT_CTRLS:
299 /* count == 0 is abused in saa6752hs.c, so that special 288 /* count == 0 is abused in saa6752hs.c, so that special
300 case is handled here explicitly. */ 289 case is handled here explicitly. */
@@ -342,7 +331,6 @@ static struct video_device saa7134_empress_template =
342 .name = "saa7134-empress", 331 .name = "saa7134-empress",
343 .type = 0 /* FIXME */, 332 .type = 0 /* FIXME */,
344 .type2 = 0 /* FIXME */, 333 .type2 = 0 /* FIXME */,
345 .hardware = 0,
346 .fops = &ts_fops, 334 .fops = &ts_fops,
347 .minor = -1, 335 .minor = -1,
348}; 336};
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 80d2644f765a..3abaa1b8ac9d 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -44,6 +44,14 @@ module_param(ir_rc5_remote_gap, int, 0644);
44static int ir_rc5_key_timeout = 115; 44static int ir_rc5_key_timeout = 115;
45module_param(ir_rc5_key_timeout, int, 0644); 45module_param(ir_rc5_key_timeout, int, 0644);
46 46
47static int repeat_delay = 500;
48module_param(repeat_delay, int, 0644);
49MODULE_PARM_DESC(repeat_delay, "delay before key repeat started");
50static int repeat_period = 33;
51module_param(repeat_period, int, 0644);
52MODULE_PARM_DESC(repeat_period, "repeat period between"
53 "keypresses when key is down");
54
47#define dprintk(fmt, arg...) if (ir_debug) \ 55#define dprintk(fmt, arg...) if (ir_debug) \
48 printk(KERN_DEBUG "%s/ir: " fmt, dev->name , ## arg) 56 printk(KERN_DEBUG "%s/ir: " fmt, dev->name , ## arg)
49#define i2cdprintk(fmt, arg...) if (ir_debug) \ 57#define i2cdprintk(fmt, arg...) if (ir_debug) \
@@ -59,6 +67,13 @@ static int build_key(struct saa7134_dev *dev)
59 struct card_ir *ir = dev->remote; 67 struct card_ir *ir = dev->remote;
60 u32 gpio, data; 68 u32 gpio, data;
61 69
70 /* here comes the additional handshake steps for some cards */
71 switch (dev->board) {
72 case SAA7134_BOARD_GOTVIEW_7135:
73 saa_setb(SAA7134_GPIO_GPSTATUS1, 0x80);
74 saa_clearb(SAA7134_GPIO_GPSTATUS1, 0x80);
75 break;
76 }
62 /* rising SAA7134_GPIO_GPRESCAN reads the status */ 77 /* rising SAA7134_GPIO_GPRESCAN reads the status */
63 saa_clearb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN); 78 saa_clearb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN);
64 saa_setb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN); 79 saa_setb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN);
@@ -159,7 +174,7 @@ static void saa7134_input_timer(unsigned long data)
159 mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling)); 174 mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
160} 175}
161 176
162static void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir) 177void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
163{ 178{
164 if (ir->polling) { 179 if (ir->polling) {
165 setup_timer(&ir->timer, saa7134_input_timer, 180 setup_timer(&ir->timer, saa7134_input_timer,
@@ -182,7 +197,7 @@ static void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
182 } 197 }
183} 198}
184 199
185static void saa7134_ir_stop(struct saa7134_dev *dev) 200void saa7134_ir_stop(struct saa7134_dev *dev)
186{ 201{
187 if (dev->remote->polling) 202 if (dev->remote->polling)
188 del_timer_sync(&dev->remote->timer); 203 del_timer_sync(&dev->remote->timer);
@@ -285,10 +300,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
285 break; 300 break;
286 case SAA7134_BOARD_GOTVIEW_7135: 301 case SAA7134_BOARD_GOTVIEW_7135:
287 ir_codes = ir_codes_gotview7135; 302 ir_codes = ir_codes_gotview7135;
288 mask_keycode = 0x0003EC; 303 mask_keycode = 0x0003CC;
289 mask_keyup = 0x008000;
290 mask_keydown = 0x000010; 304 mask_keydown = 0x000010;
291 polling = 50; // ms 305 polling = 5; /* ms */
306 saa_setb(SAA7134_GPIO_GPMODE1, 0x80);
292 break; 307 break;
293 case SAA7134_BOARD_VIDEOMATE_TV_PVR: 308 case SAA7134_BOARD_VIDEOMATE_TV_PVR:
294 case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS: 309 case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
@@ -386,6 +401,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
386 if (err) 401 if (err)
387 goto err_out_stop; 402 goto err_out_stop;
388 403
404 /* the remote isn't as bouncy as a keyboard */
405 ir->dev->rep[REP_DELAY] = repeat_delay;
406 ir->dev->rep[REP_PERIOD] = repeat_period;
407
389 return 0; 408 return 0;
390 409
391 err_out_stop: 410 err_out_stop:
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index 1b9e39a5ea47..f8e304c76232 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -27,6 +27,7 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/freezer.h>
30#include <asm/div64.h> 31#include <asm/div64.h>
31 32
32#include "saa7134-reg.h" 33#include "saa7134-reg.h"
@@ -231,7 +232,7 @@ static void mute_input_7134(struct saa7134_dev *dev)
231 } 232 }
232 233
233 if (dev->hw_mute == mute && 234 if (dev->hw_mute == mute &&
234 dev->hw_input == in && !dev->inresume) { 235 dev->hw_input == in && !dev->insuspend) {
235 dprintk("mute/input: nothing to do [mute=%d,input=%s]\n", 236 dprintk("mute/input: nothing to do [mute=%d,input=%s]\n",
236 mute,in->name); 237 mute,in->name);
237 return; 238 return;
@@ -502,13 +503,17 @@ static int tvaudio_thread(void *data)
502 unsigned int i, audio, nscan; 503 unsigned int i, audio, nscan;
503 int max1,max2,carrier,rx,mode,lastmode,default_carrier; 504 int max1,max2,carrier,rx,mode,lastmode,default_carrier;
504 505
505 allow_signal(SIGTERM); 506
507 set_freezable();
508
506 for (;;) { 509 for (;;) {
507 tvaudio_sleep(dev,-1); 510 tvaudio_sleep(dev,-1);
508 if (kthread_should_stop() || signal_pending(current)) 511 if (kthread_should_stop())
509 goto done; 512 goto done;
510 513
511 restart: 514 restart:
515 try_to_freeze();
516
512 dev->thread.scan1 = dev->thread.scan2; 517 dev->thread.scan1 = dev->thread.scan2;
513 dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1); 518 dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1);
514 dev->tvaudio = NULL; 519 dev->tvaudio = NULL;
@@ -612,9 +617,12 @@ static int tvaudio_thread(void *data)
612 617
613 lastmode = 42; 618 lastmode = 42;
614 for (;;) { 619 for (;;) {
620
621 try_to_freeze();
622
615 if (tvaudio_sleep(dev,5000)) 623 if (tvaudio_sleep(dev,5000))
616 goto restart; 624 goto restart;
617 if (kthread_should_stop() || signal_pending(current)) 625 if (kthread_should_stop())
618 break; 626 break;
619 if (UNSET == dev->thread.mode) { 627 if (UNSET == dev->thread.mode) {
620 rx = tvaudio_getstereo(dev,&tvaudio[i]); 628 rx = tvaudio_getstereo(dev,&tvaudio[i]);
@@ -630,6 +638,7 @@ static int tvaudio_thread(void *data)
630 } 638 }
631 639
632 done: 640 done:
641 dev->thread.stopped = 1;
633 return 0; 642 return 0;
634} 643}
635 644
@@ -777,7 +786,8 @@ static int tvaudio_thread_ddep(void *data)
777 struct saa7134_dev *dev = data; 786 struct saa7134_dev *dev = data;
778 u32 value, norms, clock; 787 u32 value, norms, clock;
779 788
780 allow_signal(SIGTERM); 789
790 set_freezable();
781 791
782 clock = saa7134_boards[dev->board].audio_clock; 792 clock = saa7134_boards[dev->board].audio_clock;
783 if (UNSET != audio_clock_override) 793 if (UNSET != audio_clock_override)
@@ -790,10 +800,13 @@ static int tvaudio_thread_ddep(void *data)
790 800
791 for (;;) { 801 for (;;) {
792 tvaudio_sleep(dev,-1); 802 tvaudio_sleep(dev,-1);
793 if (kthread_should_stop() || signal_pending(current)) 803 if (kthread_should_stop())
794 goto done; 804 goto done;
795 805
796 restart: 806 restart:
807
808 try_to_freeze();
809
797 dev->thread.scan1 = dev->thread.scan2; 810 dev->thread.scan1 = dev->thread.scan2;
798 dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1); 811 dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1);
799 812
@@ -870,6 +883,7 @@ static int tvaudio_thread_ddep(void *data)
870 } 883 }
871 884
872 done: 885 done:
886 dev->thread.stopped = 1;
873 return 0; 887 return 0;
874} 888}
875 889
@@ -997,7 +1011,7 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev)
997int saa7134_tvaudio_fini(struct saa7134_dev *dev) 1011int saa7134_tvaudio_fini(struct saa7134_dev *dev)
998{ 1012{
999 /* shutdown tvaudio thread */ 1013 /* shutdown tvaudio thread */
1000 if (dev->thread.thread) 1014 if (dev->thread.thread && !dev->thread.stopped)
1001 kthread_stop(dev->thread.thread); 1015 kthread_stop(dev->thread.thread);
1002 1016
1003 saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */ 1017 saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */
@@ -1013,7 +1027,9 @@ int saa7134_tvaudio_do_scan(struct saa7134_dev *dev)
1013 } else if (dev->thread.thread) { 1027 } else if (dev->thread.thread) {
1014 dev->thread.mode = UNSET; 1028 dev->thread.mode = UNSET;
1015 dev->thread.scan2++; 1029 dev->thread.scan2++;
1016 wake_up_process(dev->thread.thread); 1030
1031 if (!dev->insuspend && !dev->thread.stopped)
1032 wake_up_process(dev->thread.thread);
1017 } else { 1033 } else {
1018 dev->automute = 0; 1034 dev->automute = 0;
1019 saa7134_tvaudio_setmute(dev); 1035 saa7134_tvaudio_setmute(dev);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 471b92793c12..3b9ffb4b648a 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -560,15 +560,8 @@ void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm)
560 560
561 dev->crop_current = dev->crop_defrect; 561 dev->crop_current = dev->crop_defrect;
562 562
563 saa7134_set_decoder(dev); 563 saa7134_set_tvnorm_hw(dev);
564 564
565 if (card_in(dev, dev->ctl_input).tv) {
566 if ((card(dev).tuner_type == TUNER_PHILIPS_TDA8290)
567 && ((card(dev).tuner_config == 1)
568 || (card(dev).tuner_config == 2)))
569 saa7134_set_gpio(dev, 22, 5);
570 saa7134_i2c_call_clients(dev, VIDIOC_S_STD, &norm->id);
571 }
572} 565}
573 566
574static void video_mux(struct saa7134_dev *dev, int input) 567static void video_mux(struct saa7134_dev *dev, int input)
@@ -579,7 +572,8 @@ static void video_mux(struct saa7134_dev *dev, int input)
579 saa7134_tvaudio_setinput(dev, &card_in(dev, input)); 572 saa7134_tvaudio_setinput(dev, &card_in(dev, input));
580} 573}
581 574
582void saa7134_set_decoder(struct saa7134_dev *dev) 575
576static void saa7134_set_decoder(struct saa7134_dev *dev)
583{ 577{
584 int luma_control, sync_control, mux; 578 int luma_control, sync_control, mux;
585 579
@@ -630,6 +624,19 @@ void saa7134_set_decoder(struct saa7134_dev *dev)
630 saa_writeb(SAA7134_RAW_DATA_OFFSET, 0x80); 624 saa_writeb(SAA7134_RAW_DATA_OFFSET, 0x80);
631} 625}
632 626
627void saa7134_set_tvnorm_hw(struct saa7134_dev *dev)
628{
629 saa7134_set_decoder(dev);
630
631 if (card_in(dev, dev->ctl_input).tv) {
632 if ((card(dev).tuner_type == TUNER_PHILIPS_TDA8290)
633 && ((card(dev).tuner_config == 1)
634 || (card(dev).tuner_config == 2)))
635 saa7134_set_gpio(dev, 22, 5);
636 saa7134_i2c_call_clients(dev, VIDIOC_S_STD, &dev->tvnorm->id);
637 }
638}
639
633static void set_h_prescale(struct saa7134_dev *dev, int task, int prescale) 640static void set_h_prescale(struct saa7134_dev *dev, int task, int prescale)
634{ 641{
635 static const struct { 642 static const struct {
@@ -2352,7 +2359,6 @@ struct video_device saa7134_video_template =
2352 .name = "saa7134-video", 2359 .name = "saa7134-video",
2353 .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER| 2360 .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|
2354 VID_TYPE_CLIPPING|VID_TYPE_SCALES, 2361 VID_TYPE_CLIPPING|VID_TYPE_SCALES,
2355 .hardware = 0,
2356 .fops = &video_fops, 2362 .fops = &video_fops,
2357 .minor = -1, 2363 .minor = -1,
2358}; 2364};
@@ -2361,7 +2367,6 @@ struct video_device saa7134_vbi_template =
2361{ 2367{
2362 .name = "saa7134-vbi", 2368 .name = "saa7134-vbi",
2363 .type = VID_TYPE_TUNER|VID_TYPE_TELETEXT, 2369 .type = VID_TYPE_TUNER|VID_TYPE_TELETEXT,
2364 .hardware = 0,
2365 .fops = &video_fops, 2370 .fops = &video_fops,
2366 .minor = -1, 2371 .minor = -1,
2367}; 2372};
@@ -2370,7 +2375,6 @@ struct video_device saa7134_radio_template =
2370{ 2375{
2371 .name = "saa7134-radio", 2376 .name = "saa7134-radio",
2372 .type = VID_TYPE_TUNER, 2377 .type = VID_TYPE_TUNER,
2373 .hardware = 0,
2374 .fops = &radio_fops, 2378 .fops = &radio_fops,
2375 .minor = -1, 2379 .minor = -1,
2376}; 2380};
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 28ec6804bd5d..66a390c321a7 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -333,6 +333,7 @@ struct saa7134_thread {
333 unsigned int scan1; 333 unsigned int scan1;
334 unsigned int scan2; 334 unsigned int scan2;
335 unsigned int mode; 335 unsigned int mode;
336 unsigned int stopped;
336}; 337};
337 338
338/* buffer for one video/vbi/ts frame */ 339/* buffer for one video/vbi/ts frame */
@@ -524,7 +525,7 @@ struct saa7134_dev {
524 unsigned int hw_mute; 525 unsigned int hw_mute;
525 int last_carrier; 526 int last_carrier;
526 int nosignal; 527 int nosignal;
527 unsigned int inresume; 528 unsigned int insuspend;
528 529
529 /* SAA7134_MPEG_* */ 530 /* SAA7134_MPEG_* */
530 struct saa7134_ts ts; 531 struct saa7134_ts ts;
@@ -632,7 +633,7 @@ extern struct video_device saa7134_radio_template;
632 633
633void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm); 634void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm);
634int saa7134_videoport_init(struct saa7134_dev *dev); 635int saa7134_videoport_init(struct saa7134_dev *dev);
635void saa7134_set_decoder(struct saa7134_dev *dev); 636void saa7134_set_tvnorm_hw(struct saa7134_dev *dev);
636 637
637int saa7134_common_ioctl(struct saa7134_dev *dev, 638int saa7134_common_ioctl(struct saa7134_dev *dev,
638 unsigned int cmd, void *arg); 639 unsigned int cmd, void *arg);
@@ -706,6 +707,8 @@ int saa7134_input_init1(struct saa7134_dev *dev);
706void saa7134_input_fini(struct saa7134_dev *dev); 707void saa7134_input_fini(struct saa7134_dev *dev);
707void saa7134_input_irq(struct saa7134_dev *dev); 708void saa7134_input_irq(struct saa7134_dev *dev);
708void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir); 709void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir);
710void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
711void saa7134_ir_stop(struct saa7134_dev *dev);
709 712
710 713
711/* 714/*
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 93fb04ed99a0..d5d7d6cf734a 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1231,7 +1231,6 @@ static struct video_device se401_template = {
1231 .owner = THIS_MODULE, 1231 .owner = THIS_MODULE,
1232 .name = "se401 USB camera", 1232 .name = "se401 USB camera",
1233 .type = VID_TYPE_CAPTURE, 1233 .type = VID_TYPE_CAPTURE,
1234 .hardware = VID_HARDWARE_SE401,
1235 .fops = &se401_fops, 1234 .fops = &se401_fops,
1236}; 1235};
1237 1236
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 6991e06f7651..511847912c48 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -3319,7 +3319,6 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3319 strcpy(cam->v4ldev->name, "SN9C1xx PC Camera"); 3319 strcpy(cam->v4ldev->name, "SN9C1xx PC Camera");
3320 cam->v4ldev->owner = THIS_MODULE; 3320 cam->v4ldev->owner = THIS_MODULE;
3321 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES; 3321 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
3322 cam->v4ldev->hardware = 0;
3323 cam->v4ldev->fops = &sn9c102_fops; 3322 cam->v4ldev->fops = &sn9c102_fops;
3324 cam->v4ldev->minor = video_nr[dev_nr]; 3323 cam->v4ldev->minor = video_nr[dev_nr];
3325 cam->v4ldev->release = video_device_release; 3324 cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index eb220461ac77..3fb85af5d1f2 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -1917,7 +1917,6 @@ static const struct file_operations saa_fops = {
1917static struct video_device saa_template = { 1917static struct video_device saa_template = {
1918 .name = "SAA7146A", 1918 .name = "SAA7146A",
1919 .type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY, 1919 .type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY,
1920 .hardware = VID_HARDWARE_SAA7146,
1921 .fops = &saa_fops, 1920 .fops = &saa_fops,
1922 .minor = -1, 1921 .minor = -1,
1923}; 1922};
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 9e009a7ab863..afc32aa56fde 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1398,7 +1398,6 @@ static struct video_device stv680_template = {
1398 .owner = THIS_MODULE, 1398 .owner = THIS_MODULE,
1399 .name = "STV0680 USB camera", 1399 .name = "STV0680 USB camera",
1400 .type = VID_TYPE_CAPTURE, 1400 .type = VID_TYPE_CAPTURE,
1401 .hardware = VID_HARDWARE_SE401,
1402 .fops = &stv680_fops, 1401 .fops = &stv680_fops,
1403 .release = video_device_release, 1402 .release = video_device_release,
1404 .minor = -1, 1403 .minor = -1,
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 94843086cda9..6a777604f070 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -113,7 +113,7 @@ static void fe_standby(struct tuner *t)
113static int fe_has_signal(struct tuner *t) 113static int fe_has_signal(struct tuner *t)
114{ 114{
115 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; 115 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
116 u16 strength; 116 u16 strength = 0;
117 117
118 if (fe_tuner_ops->get_rf_strength) 118 if (fe_tuner_ops->get_rf_strength)
119 fe_tuner_ops->get_rf_strength(&t->fe, &strength); 119 fe_tuner_ops->get_rf_strength(&t->fe, &strength);
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 37ce36b9e587..fb434b5602a3 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -952,7 +952,6 @@ static const struct file_operations usbvideo_fops = {
952static const struct video_device usbvideo_template = { 952static const struct video_device usbvideo_template = {
953 .owner = THIS_MODULE, 953 .owner = THIS_MODULE,
954 .type = VID_TYPE_CAPTURE, 954 .type = VID_TYPE_CAPTURE,
955 .hardware = VID_HARDWARE_CPIA,
956 .fops = &usbvideo_fops, 955 .fops = &usbvideo_fops,
957}; 956};
958 957
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index db3c9e3deb26..da1ba0211108 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -1074,7 +1074,6 @@ static struct video_device vicam_template = {
1074 .owner = THIS_MODULE, 1074 .owner = THIS_MODULE,
1075 .name = "ViCam-based USB Camera", 1075 .name = "ViCam-based USB Camera",
1076 .type = VID_TYPE_CAPTURE, 1076 .type = VID_TYPE_CAPTURE,
1077 .hardware = VID_HARDWARE_VICAM,
1078 .fops = &vicam_fops, 1077 .fops = &vicam_fops,
1079 .minor = -1, 1078 .minor = -1,
1080}; 1079};
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index e2f3c01cfa13..36e689fa16c0 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1400,7 +1400,6 @@ static const struct file_operations usbvision_fops = {
1400static struct video_device usbvision_video_template = { 1400static struct video_device usbvision_video_template = {
1401 .owner = THIS_MODULE, 1401 .owner = THIS_MODULE,
1402 .type = VID_TYPE_TUNER | VID_TYPE_CAPTURE, 1402 .type = VID_TYPE_TUNER | VID_TYPE_CAPTURE,
1403 .hardware = VID_HARDWARE_USBVISION,
1404 .fops = &usbvision_fops, 1403 .fops = &usbvision_fops,
1405 .name = "usbvision-video", 1404 .name = "usbvision-video",
1406 .release = video_device_release, 1405 .release = video_device_release,
@@ -1455,7 +1454,6 @@ static struct video_device usbvision_radio_template=
1455{ 1454{
1456 .owner = THIS_MODULE, 1455 .owner = THIS_MODULE,
1457 .type = VID_TYPE_TUNER, 1456 .type = VID_TYPE_TUNER,
1458 .hardware = VID_HARDWARE_USBVISION,
1459 .fops = &usbvision_radio_fops, 1457 .fops = &usbvision_radio_fops,
1460 .name = "usbvision-radio", 1458 .name = "usbvision-radio",
1461 .release = video_device_release, 1459 .release = video_device_release,
@@ -1492,7 +1490,6 @@ static struct video_device usbvision_vbi_template=
1492{ 1490{
1493 .owner = THIS_MODULE, 1491 .owner = THIS_MODULE,
1494 .type = VID_TYPE_TUNER, 1492 .type = VID_TYPE_TUNER,
1495 .hardware = VID_HARDWARE_USBVISION,
1496 .fops = &usbvision_vbi_fops, 1493 .fops = &usbvision_vbi_fops,
1497 .release = video_device_release, 1494 .release = video_device_release,
1498 .name = "usbvision-vbi", 1495 .name = "usbvision-vbi",
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 321249240d05..1141b4bf41ce 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -317,8 +317,6 @@ static const char *v4l2_ioctls[] = {
317 [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT", 317 [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
318 [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT", 318 [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
319 [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT", 319 [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
320 [_IOC_NR(VIDIOC_G_MPEGCOMP)] = "VIDIOC_G_MPEGCOMP",
321 [_IOC_NR(VIDIOC_S_MPEGCOMP)] = "VIDIOC_S_MPEGCOMP",
322 [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS", 320 [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
323 [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF", 321 [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
324 [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF", 322 [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index 5599a36490fc..89a44f16f0ba 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -967,6 +967,7 @@ int videobuf_cgmbuf(struct videobuf_queue *q,
967 967
968 return 0; 968 return 0;
969} 969}
970EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
970#endif 971#endif
971 972
972/* --------------------------------------------------------------------- */ 973/* --------------------------------------------------------------------- */
@@ -985,7 +986,6 @@ EXPORT_SYMBOL_GPL(videobuf_reqbufs);
985EXPORT_SYMBOL_GPL(videobuf_querybuf); 986EXPORT_SYMBOL_GPL(videobuf_querybuf);
986EXPORT_SYMBOL_GPL(videobuf_qbuf); 987EXPORT_SYMBOL_GPL(videobuf_qbuf);
987EXPORT_SYMBOL_GPL(videobuf_dqbuf); 988EXPORT_SYMBOL_GPL(videobuf_dqbuf);
988EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
989EXPORT_SYMBOL_GPL(videobuf_streamon); 989EXPORT_SYMBOL_GPL(videobuf_streamon);
990EXPORT_SYMBOL_GPL(videobuf_streamoff); 990EXPORT_SYMBOL_GPL(videobuf_streamoff);
991 991
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 3eb6123227b2..0a18286279d3 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -60,12 +60,13 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
60 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); 60 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
61 if (NULL == sglist) 61 if (NULL == sglist)
62 return NULL; 62 return NULL;
63 sg_init_table(sglist, nr_pages);
63 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { 64 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
64 pg = vmalloc_to_page(virt); 65 pg = vmalloc_to_page(virt);
65 if (NULL == pg) 66 if (NULL == pg)
66 goto err; 67 goto err;
67 BUG_ON(PageHighMem(pg)); 68 BUG_ON(PageHighMem(pg));
68 sglist[i].page = pg; 69 sg_set_page(&sglist[i], pg);
69 sglist[i].length = PAGE_SIZE; 70 sglist[i].length = PAGE_SIZE;
70 } 71 }
71 return sglist; 72 return sglist;
@@ -86,13 +87,14 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
86 sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL); 87 sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
87 if (NULL == sglist) 88 if (NULL == sglist)
88 return NULL; 89 return NULL;
90 sg_init_table(sglist, nr_pages);
89 91
90 if (NULL == pages[0]) 92 if (NULL == pages[0])
91 goto nopage; 93 goto nopage;
92 if (PageHighMem(pages[0])) 94 if (PageHighMem(pages[0]))
93 /* DMA to highmem pages might not work */ 95 /* DMA to highmem pages might not work */
94 goto highmem; 96 goto highmem;
95 sglist[0].page = pages[0]; 97 sg_set_page(&sglist[0], pages[0]);
96 sglist[0].offset = offset; 98 sglist[0].offset = offset;
97 sglist[0].length = PAGE_SIZE - offset; 99 sglist[0].length = PAGE_SIZE - offset;
98 for (i = 1; i < nr_pages; i++) { 100 for (i = 1; i < nr_pages; i++) {
@@ -100,7 +102,7 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
100 goto nopage; 102 goto nopage;
101 if (PageHighMem(pages[i])) 103 if (PageHighMem(pages[i]))
102 goto highmem; 104 goto highmem;
103 sglist[i].page = pages[i]; 105 sg_set_page(&sglist[i], pages[i]);
104 sglist[i].length = PAGE_SIZE; 106 sglist[i].length = PAGE_SIZE;
105 } 107 }
106 return sglist; 108 return sglist;
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index f2bbd7a4d562..87951ec8254f 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -86,8 +86,8 @@ videocodec_attach (struct videocodec_master *master)
86 } 86 }
87 87
88 dprintk(2, 88 dprintk(2,
89 "videocodec_attach: '%s', type: %x, flags %lx, magic %lx\n", 89 "videocodec_attach: '%s', flags %lx, magic %lx\n",
90 master->name, master->type, master->flags, master->magic); 90 master->name, master->flags, master->magic);
91 91
92 if (!h) { 92 if (!h) {
93 dprintk(1, 93 dprintk(1,
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 8d8e517b344f..9611c3990285 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -1313,48 +1313,6 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1313 ret=vfd->vidioc_cropcap(file, fh, p); 1313 ret=vfd->vidioc_cropcap(file, fh, p);
1314 break; 1314 break;
1315 } 1315 }
1316 case VIDIOC_G_MPEGCOMP:
1317 {
1318 struct v4l2_mpeg_compression *p=arg;
1319
1320 /*FIXME: Several fields not shown */
1321 if (!vfd->vidioc_g_mpegcomp)
1322 break;
1323 ret=vfd->vidioc_g_mpegcomp(file, fh, p);
1324 if (!ret)
1325 dbgarg (cmd, "ts_pid_pmt=%d, ts_pid_audio=%d,"
1326 " ts_pid_video=%d, ts_pid_pcr=%d, "
1327 "ps_size=%d, au_sample_rate=%d, "
1328 "au_pesid=%c, vi_frame_rate=%d, "
1329 "vi_frames_per_gop=%d, "
1330 "vi_bframes_count=%d, vi_pesid=%c\n",
1331 p->ts_pid_pmt,p->ts_pid_audio,
1332 p->ts_pid_video,p->ts_pid_pcr,
1333 p->ps_size, p->au_sample_rate,
1334 p->au_pesid, p->vi_frame_rate,
1335 p->vi_frames_per_gop,
1336 p->vi_bframes_count, p->vi_pesid);
1337 break;
1338 }
1339 case VIDIOC_S_MPEGCOMP:
1340 {
1341 struct v4l2_mpeg_compression *p=arg;
1342 /*FIXME: Several fields not shown */
1343 if (!vfd->vidioc_s_mpegcomp)
1344 break;
1345 dbgarg (cmd, "ts_pid_pmt=%d, ts_pid_audio=%d, "
1346 "ts_pid_video=%d, ts_pid_pcr=%d, ps_size=%d, "
1347 "au_sample_rate=%d, au_pesid=%c, "
1348 "vi_frame_rate=%d, vi_frames_per_gop=%d, "
1349 "vi_bframes_count=%d, vi_pesid=%c\n",
1350 p->ts_pid_pmt,p->ts_pid_audio, p->ts_pid_video,
1351 p->ts_pid_pcr, p->ps_size, p->au_sample_rate,
1352 p->au_pesid, p->vi_frame_rate,
1353 p->vi_frames_per_gop, p->vi_bframes_count,
1354 p->vi_pesid);
1355 ret=vfd->vidioc_s_mpegcomp(file, fh, p);
1356 break;
1357 }
1358 case VIDIOC_G_JPEGCOMP: 1316 case VIDIOC_G_JPEGCOMP:
1359 { 1317 {
1360 struct v4l2_jpegcompression *p=arg; 1318 struct v4l2_jpegcompression *p=arg;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index b532aa280a1b..ee73dc75131c 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1119,7 +1119,6 @@ static const struct file_operations vivi_fops = {
1119static struct video_device vivi = { 1119static struct video_device vivi = {
1120 .name = "vivi", 1120 .name = "vivi",
1121 .type = VID_TYPE_CAPTURE, 1121 .type = VID_TYPE_CAPTURE,
1122 .hardware = 0,
1123 .fops = &vivi_fops, 1122 .fops = &vivi_fops,
1124 .minor = -1, 1123 .minor = -1,
1125// .release = video_device_release, 1124// .release = video_device_release,
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 47366408637c..08aaae07c7e0 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -196,7 +196,6 @@ static struct video_device w9966_template = {
196 .owner = THIS_MODULE, 196 .owner = THIS_MODULE,
197 .name = W9966_DRIVERNAME, 197 .name = W9966_DRIVERNAME,
198 .type = VID_TYPE_CAPTURE | VID_TYPE_SCALES, 198 .type = VID_TYPE_CAPTURE | VID_TYPE_SCALES,
199 .hardware = VID_HARDWARE_W9966,
200 .fops = &w9966_fops, 199 .fops = &w9966_fops,
201}; 200};
202 201
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 9e7f3e685d73..2ae1430f5f7d 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -3549,7 +3549,6 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3549 strcpy(cam->v4ldev->name, symbolic(camlist, mod_id)); 3549 strcpy(cam->v4ldev->name, symbolic(camlist, mod_id));
3550 cam->v4ldev->owner = THIS_MODULE; 3550 cam->v4ldev->owner = THIS_MODULE;
3551 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES; 3551 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
3552 cam->v4ldev->hardware = VID_HARDWARE_W9968CF;
3553 cam->v4ldev->fops = &w9968cf_fops; 3552 cam->v4ldev->fops = &w9968cf_fops;
3554 cam->v4ldev->minor = video_nr[dev_nr]; 3553 cam->v4ldev->minor = video_nr[dev_nr];
3555 cam->v4ldev->release = video_device_release; 3554 cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 08a93c31c0a0..2c5665c82442 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1985,7 +1985,6 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
1985 strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera"); 1985 strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera");
1986 cam->v4ldev->owner = THIS_MODULE; 1986 cam->v4ldev->owner = THIS_MODULE;
1987 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES; 1987 cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
1988 cam->v4ldev->hardware = 0;
1989 cam->v4ldev->fops = &zc0301_fops; 1988 cam->v4ldev->fops = &zc0301_fops;
1990 cam->v4ldev->minor = video_nr[dev_nr]; 1989 cam->v4ldev->minor = video_nr[dev_nr];
1991 cam->v4ldev->release = video_device_release; 1990 cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index 48da36a15fca..6e0ac4c5c379 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -1235,8 +1235,14 @@ zoran_setup_videocodec (struct zoran *zr,
1235 return m; 1235 return m;
1236 } 1236 }
1237 1237
1238 m->magic = 0L; /* magic not used */ 1238 /* magic and type are unused for master struct. Makes sense only at
1239 m->type = VID_HARDWARE_ZR36067; 1239 codec structs.
1240 In the past, .type were initialized to the old V4L1 .hardware
1241 value, as VID_HARDWARE_ZR36067
1242 */
1243 m->magic = 0L;
1244 m->type = 0;
1245
1240 m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER; 1246 m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER;
1241 strncpy(m->name, ZR_DEVNAME(zr), sizeof(m->name)); 1247 strncpy(m->name, ZR_DEVNAME(zr), sizeof(m->name));
1242 m->data = zr; 1248 m->data = zr;
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index 419e5af78533..dd3d7d2c8b0e 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -60,7 +60,6 @@
60 60
61#include <linux/spinlock.h> 61#include <linux/spinlock.h>
62#define MAP_NR(x) virt_to_page(x) 62#define MAP_NR(x) virt_to_page(x)
63#define ZORAN_HARDWARE VID_HARDWARE_ZR36067
64#define ZORAN_VID_TYPE ( \ 63#define ZORAN_VID_TYPE ( \
65 VID_TYPE_CAPTURE | \ 64 VID_TYPE_CAPTURE | \
66 VID_TYPE_OVERLAY | \ 65 VID_TYPE_OVERLAY | \
@@ -4659,7 +4658,6 @@ struct video_device zoran_template __devinitdata = {
4659#ifdef CONFIG_VIDEO_V4L2 4658#ifdef CONFIG_VIDEO_V4L2
4660 .type2 = ZORAN_V4L2_VID_FLAGS, 4659 .type2 = ZORAN_V4L2_VID_FLAGS,
4661#endif 4660#endif
4662 .hardware = ZORAN_HARDWARE,
4663 .fops = &zoran_fops, 4661 .fops = &zoran_fops,
4664 .release = &zoran_vdev_release, 4662 .release = &zoran_vdev_release,
4665 .minor = -1 4663 .minor = -1
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index a5d0354bbbda..9203a0b221b3 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -13,6 +13,7 @@
13#include <linux/blkdev.h> 13#include <linux/blkdev.h>
14#include <linux/freezer.h> 14#include <linux/freezer.h>
15#include <linux/kthread.h> 15#include <linux/kthread.h>
16#include <linux/scatterlist.h>
16 17
17#include <linux/mmc/card.h> 18#include <linux/mmc/card.h>
18#include <linux/mmc/host.h> 19#include <linux/mmc/host.h>
@@ -153,19 +154,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
153 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
154 blk_queue_max_segment_size(mq->queue, bouncesz); 155 blk_queue_max_segment_size(mq->queue, bouncesz);
155 156
156 mq->sg = kzalloc(sizeof(struct scatterlist), 157 mq->sg = kmalloc(sizeof(struct scatterlist),
157 GFP_KERNEL); 158 GFP_KERNEL);
158 if (!mq->sg) { 159 if (!mq->sg) {
159 ret = -ENOMEM; 160 ret = -ENOMEM;
160 goto cleanup_queue; 161 goto cleanup_queue;
161 } 162 }
163 sg_init_table(mq->sg, 1);
162 164
163 mq->bounce_sg = kzalloc(sizeof(struct scatterlist) * 165 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
164 bouncesz / 512, GFP_KERNEL); 166 bouncesz / 512, GFP_KERNEL);
165 if (!mq->bounce_sg) { 167 if (!mq->bounce_sg) {
166 ret = -ENOMEM; 168 ret = -ENOMEM;
167 goto cleanup_queue; 169 goto cleanup_queue;
168 } 170 }
171 sg_init_table(mq->bounce_sg, bouncesz / 512);
169 } 172 }
170 } 173 }
171#endif 174#endif
@@ -302,12 +305,12 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
302 BUG_ON(dst_len == 0); 305 BUG_ON(dst_len == 0);
303 306
304 if (dst_size == 0) { 307 if (dst_size == 0) {
305 dst_buf = page_address(dst->page) + dst->offset; 308 dst_buf = sg_virt(dst);
306 dst_size = dst->length; 309 dst_size = dst->length;
307 } 310 }
308 311
309 if (src_size == 0) { 312 if (src_size == 0) {
310 src_buf = page_address(src->page) + src->offset; 313 src_buf = sg_virt(dst);
311 src_size = src->length; 314 src_size = src->length;
312 } 315 }
313 316
@@ -353,9 +356,7 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
353 return 1; 356 return 1;
354 } 357 }
355 358
356 mq->sg[0].page = virt_to_page(mq->bounce_buf); 359 sg_init_one(mq->sg, mq->bounce_buf, 0);
357 mq->sg[0].offset = offset_in_page(mq->bounce_buf);
358 mq->sg[0].length = 0;
359 360
360 while (sg_len) { 361 while (sg_len) {
361 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; 362 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 7a452c2ad1f9..b1edcefdd4f9 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -149,7 +149,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
149 149
150 sg = &data->sg[i]; 150 sg = &data->sg[i];
151 151
152 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; 152 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
153 amount = min(size, sg->length); 153 amount = min(size, sg->length);
154 size -= amount; 154 size -= amount;
155 155
@@ -226,7 +226,7 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host)
226 sg = &data->sg[host->transfer_index++]; 226 sg = &data->sg[host->transfer_index++];
227 pr_debug("sg = %p\n", sg); 227 pr_debug("sg = %p\n", sg);
228 228
229 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE); 229 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
230 230
231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length); 231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
232 232
@@ -283,7 +283,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
283 int index; 283 int index;
284 284
285 /* Swap the contents of the buffer */ 285 /* Swap the contents of the buffer */
286 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; 286 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length); 287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
288 288
289 for (index = 0; index < (sg->length / 4); index++) 289 for (index = 0; index < (sg->length / 4); index++)
@@ -292,7 +292,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ); 292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
293 } 293 }
294 294
295 flush_dcache_page(sg->page); 295 flush_dcache_page(sg_page(sg));
296 } 296 }
297 297
298 /* Is there another transfer to trigger? */ 298 /* Is there another transfer to trigger? */
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 92c4d0dfee43..bcbb6d247bf7 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -340,7 +340,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
340 340
341 /* This is the pointer to the data buffer */ 341 /* This is the pointer to the data buffer */
342 sg = &data->sg[host->pio.index]; 342 sg = &data->sg[host->pio.index];
343 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset; 343 sg_ptr = sg_virt(sg) + host->pio.offset;
344 344
345 /* This is the space left inside the buffer */ 345 /* This is the space left inside the buffer */
346 sg_len = data->sg[host->pio.index].length - host->pio.offset; 346 sg_len = data->sg[host->pio.index].length - host->pio.offset;
@@ -400,7 +400,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
400 400
401 if (host->pio.index < host->dma.len) { 401 if (host->pio.index < host->dma.len) {
402 sg = &data->sg[host->pio.index]; 402 sg = &data->sg[host->pio.index];
403 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset; 403 sg_ptr = sg_virt(sg) + host->pio.offset;
404 404
405 /* This is the space left inside the buffer */ 405 /* This is the space left inside the buffer */
406 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; 406 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
@@ -613,14 +613,11 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
613 613
614 if (host->flags & HOST_F_XMIT){ 614 if (host->flags & HOST_F_XMIT){
615 ret = au1xxx_dbdma_put_source_flags(channel, 615 ret = au1xxx_dbdma_put_source_flags(channel,
616 (void *) (page_address(sg->page) + 616 (void *) sg_virt(sg), len, flags);
617 sg->offset),
618 len, flags);
619 } 617 }
620 else { 618 else {
621 ret = au1xxx_dbdma_put_dest_flags(channel, 619 ret = au1xxx_dbdma_put_dest_flags(channel,
622 (void *) (page_address(sg->page) + 620 (void *) sg_virt(sg),
623 sg->offset),
624 len, flags); 621 len, flags);
625 } 622 }
626 623
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 6ebc41e7592c..fc72e1fadb6a 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -262,7 +262,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
262 } 262 }
263 263
264 /* Convert back to virtual address */ 264 /* Convert back to virtual address */
265 host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset); 265 host->data_ptr = (u16*)sg_virt(sg);
266 host->data_cnt = 0; 266 host->data_cnt = 0;
267 267
268 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); 268 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 7ae18eaed6c5..12c2d807c145 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -813,7 +813,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
813 && dir == DMA_FROM_DEVICE) 813 && dir == DMA_FROM_DEVICE)
814 dir = DMA_BIDIRECTIONAL; 814 dir = DMA_BIDIRECTIONAL;
815 815
816 dma_addr = dma_map_page(dma_dev, sg->page, 0, 816 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
817 PAGE_SIZE, dir); 817 PAGE_SIZE, dir);
818 if (direction == DMA_TO_DEVICE) 818 if (direction == DMA_TO_DEVICE)
819 t->tx_dma = dma_addr + sg->offset; 819 t->tx_dma = dma_addr + sg->offset;
@@ -822,7 +822,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
822 } 822 }
823 823
824 /* allow pio too; we don't allow highmem */ 824 /* allow pio too; we don't allow highmem */
825 kmap_addr = kmap(sg->page); 825 kmap_addr = kmap(sg_page(sg));
826 if (direction == DMA_TO_DEVICE) 826 if (direction == DMA_TO_DEVICE)
827 t->tx_buf = kmap_addr + sg->offset; 827 t->tx_buf = kmap_addr + sg->offset;
828 else 828 else
@@ -855,8 +855,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
855 855
856 /* discard mappings */ 856 /* discard mappings */
857 if (direction == DMA_FROM_DEVICE) 857 if (direction == DMA_FROM_DEVICE)
858 flush_kernel_dcache_page(sg->page); 858 flush_kernel_dcache_page(sg_page(sg));
859 kunmap(sg->page); 859 kunmap(sg_page(sg));
860 if (dma_dev) 860 if (dma_dev)
861 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); 861 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
862 862
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 60a67dfcda6a..971e18b91f4a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -24,10 +24,10 @@
24#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h> 25#include <linux/mmc/card.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/scatterlist.h>
27 28
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/irq.h> 30#include <asm/irq.h>
30#include <asm/scatterlist.h>
31#include <asm/mach-types.h> 31#include <asm/mach-types.h>
32 32
33#include <asm/arch/board.h> 33#include <asm/arch/board.h>
@@ -383,7 +383,7 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
383 383
384 sg = host->data->sg + host->sg_idx; 384 sg = host->data->sg + host->sg_idx;
385 host->buffer_bytes_left = sg->length; 385 host->buffer_bytes_left = sg->length;
386 host->buffer = page_address(sg->page) + sg->offset; 386 host->buffer = sg_virt(sg);
387 if (host->buffer_bytes_left > host->total_bytes_left) 387 if (host->buffer_bytes_left > host->total_bytes_left)
388 host->buffer_bytes_left = host->total_bytes_left; 388 host->buffer_bytes_left = host->total_bytes_left;
389} 389}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b397121b947d..0db837e44b77 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -231,7 +231,7 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
231 231
232static inline char* sdhci_sg_to_buffer(struct sdhci_host* host) 232static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
233{ 233{
234 return page_address(host->cur_sg->page) + host->cur_sg->offset; 234 return sg_virt(host->cur_sg);
235} 235}
236 236
237static inline int sdhci_next_sg(struct sdhci_host* host) 237static inline int sdhci_next_sg(struct sdhci_host* host)
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 9b904795eb77..c11a3d256051 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -192,7 +192,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host)
192 } 192 }
193 off = sg[host->sg_pos].offset + host->block_pos; 193 off = sg[host->sg_pos].offset + host->block_pos;
194 194
195 pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT); 195 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
196 p_off = offset_in_page(off); 196 p_off = offset_in_page(off);
197 p_cnt = PAGE_SIZE - p_off; 197 p_cnt = PAGE_SIZE - p_off;
198 p_cnt = min(p_cnt, cnt); 198 p_cnt = min(p_cnt, cnt);
@@ -241,18 +241,18 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
241 } 241 }
242 off = sg[host->sg_pos].offset + host->block_pos; 242 off = sg[host->sg_pos].offset + host->block_pos;
243 243
244 pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT); 244 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
245 p_off = offset_in_page(off); 245 p_off = offset_in_page(off);
246 p_cnt = PAGE_SIZE - p_off; 246 p_cnt = PAGE_SIZE - p_off;
247 p_cnt = min(p_cnt, cnt); 247 p_cnt = min(p_cnt, cnt);
248 p_cnt = min(p_cnt, t_size); 248 p_cnt = min(p_cnt, t_size);
249 249
250 if (r_data->flags & MMC_DATA_WRITE) 250 if (r_data->flags & MMC_DATA_WRITE)
251 tifm_sd_copy_page(host->bounce_buf.page, 251 tifm_sd_copy_page(sg_page(&host->bounce_buf),
252 r_data->blksz - t_size, 252 r_data->blksz - t_size,
253 pg, p_off, p_cnt); 253 pg, p_off, p_cnt);
254 else if (r_data->flags & MMC_DATA_READ) 254 else if (r_data->flags & MMC_DATA_READ)
255 tifm_sd_copy_page(pg, p_off, host->bounce_buf.page, 255 tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
256 r_data->blksz - t_size, p_cnt); 256 r_data->blksz - t_size, p_cnt);
257 257
258 t_size -= p_cnt; 258 t_size -= p_cnt;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 80db11c05f2a..fa4c8c53cc7a 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -269,7 +269,7 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
269 269
270static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) 270static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
271{ 271{
272 return page_address(host->cur_sg->page) + host->cur_sg->offset; 272 return sg_virt(host->cur_sg);
273} 273}
274 274
275static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) 275static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -283,7 +283,7 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
283 len = data->sg_len; 283 len = data->sg_len;
284 284
285 for (i = 0; i < len; i++) { 285 for (i = 0; i < len; i++) {
286 sgbuf = page_address(sg[i].page) + sg[i].offset; 286 sgbuf = sg_virt(&sg[i]);
287 memcpy(dmabuf, sgbuf, sg[i].length); 287 memcpy(dmabuf, sgbuf, sg[i].length);
288 dmabuf += sg[i].length; 288 dmabuf += sg[i].length;
289 } 289 }
@@ -300,7 +300,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
300 len = data->sg_len; 300 len = data->sg_len;
301 301
302 for (i = 0; i < len; i++) { 302 for (i = 0; i < len; i++) {
303 sgbuf = page_address(sg[i].page) + sg[i].offset; 303 sgbuf = sg_virt(&sg[i]);
304 memcpy(sgbuf, dmabuf, sg[i].length); 304 memcpy(sgbuf, dmabuf, sg[i].length);
305 dmabuf += sg[i].length; 305 dmabuf += sg[i].length;
306 } 306 }
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 3aa3dca56ae6..a9eb1c516247 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -85,6 +85,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, 85static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86 size_t len); 86 size_t len);
87 87
88static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
88static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 89static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 90static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
90#include "fwh_lock.h" 91#include "fwh_lock.h"
@@ -641,73 +642,13 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
641/* 642/*
642 * *********** CHIP ACCESS FUNCTIONS *********** 643 * *********** CHIP ACCESS FUNCTIONS ***********
643 */ 644 */
644 645static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
645static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
646{ 646{
647 DECLARE_WAITQUEUE(wait, current); 647 DECLARE_WAITQUEUE(wait, current);
648 struct cfi_private *cfi = map->fldrv_priv; 648 struct cfi_private *cfi = map->fldrv_priv;
649 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01); 649 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
650 unsigned long timeo;
651 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 650 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
652 651 unsigned long timeo = jiffies + HZ;
653 resettime:
654 timeo = jiffies + HZ;
655 retry:
656 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
657 /*
658 * OK. We have possibility for contension on the write/erase
659 * operations which are global to the real chip and not per
660 * partition. So let's fight it over in the partition which
661 * currently has authority on the operation.
662 *
663 * The rules are as follows:
664 *
665 * - any write operation must own shared->writing.
666 *
667 * - any erase operation must own _both_ shared->writing and
668 * shared->erasing.
669 *
670 * - contension arbitration is handled in the owner's context.
671 *
672 * The 'shared' struct can be read and/or written only when
673 * its lock is taken.
674 */
675 struct flchip_shared *shared = chip->priv;
676 struct flchip *contender;
677 spin_lock(&shared->lock);
678 contender = shared->writing;
679 if (contender && contender != chip) {
680 /*
681 * The engine to perform desired operation on this
682 * partition is already in use by someone else.
683 * Let's fight over it in the context of the chip
684 * currently using it. If it is possible to suspend,
685 * that other partition will do just that, otherwise
686 * it'll happily send us to sleep. In any case, when
687 * get_chip returns success we're clear to go ahead.
688 */
689 int ret = spin_trylock(contender->mutex);
690 spin_unlock(&shared->lock);
691 if (!ret)
692 goto retry;
693 spin_unlock(chip->mutex);
694 ret = get_chip(map, contender, contender->start, mode);
695 spin_lock(chip->mutex);
696 if (ret) {
697 spin_unlock(contender->mutex);
698 return ret;
699 }
700 timeo = jiffies + HZ;
701 spin_lock(&shared->lock);
702 spin_unlock(contender->mutex);
703 }
704
705 /* We now own it */
706 shared->writing = chip;
707 if (mode == FL_ERASING)
708 shared->erasing = chip;
709 spin_unlock(&shared->lock);
710 }
711 652
712 switch (chip->state) { 653 switch (chip->state) {
713 654
@@ -722,16 +663,11 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
722 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) 663 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
723 break; 664 break;
724 665
725 if (time_after(jiffies, timeo)) {
726 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727 map->name, status.x[0]);
728 return -EIO;
729 }
730 spin_unlock(chip->mutex); 666 spin_unlock(chip->mutex);
731 cfi_udelay(1); 667 cfi_udelay(1);
732 spin_lock(chip->mutex); 668 spin_lock(chip->mutex);
733 /* Someone else might have been playing with it. */ 669 /* Someone else might have been playing with it. */
734 goto retry; 670 return -EAGAIN;
735 } 671 }
736 672
737 case FL_READY: 673 case FL_READY:
@@ -809,10 +745,82 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
809 schedule(); 745 schedule();
810 remove_wait_queue(&chip->wq, &wait); 746 remove_wait_queue(&chip->wq, &wait);
811 spin_lock(chip->mutex); 747 spin_lock(chip->mutex);
812 goto resettime; 748 return -EAGAIN;
813 } 749 }
814} 750}
815 751
752static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753{
754 int ret;
755
756 retry:
757 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
758 || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
759 /*
760 * OK. We have possibility for contention on the write/erase
761 * operations which are global to the real chip and not per
762 * partition. So let's fight it over in the partition which
763 * currently has authority on the operation.
764 *
765 * The rules are as follows:
766 *
767 * - any write operation must own shared->writing.
768 *
769 * - any erase operation must own _both_ shared->writing and
770 * shared->erasing.
771 *
772 * - contention arbitration is handled in the owner's context.
773 *
774 * The 'shared' struct can be read and/or written only when
775 * its lock is taken.
776 */
777 struct flchip_shared *shared = chip->priv;
778 struct flchip *contender;
779 spin_lock(&shared->lock);
780 contender = shared->writing;
781 if (contender && contender != chip) {
782 /*
783 * The engine to perform desired operation on this
784 * partition is already in use by someone else.
785 * Let's fight over it in the context of the chip
786 * currently using it. If it is possible to suspend,
787 * that other partition will do just that, otherwise
788 * it'll happily send us to sleep. In any case, when
789 * get_chip returns success we're clear to go ahead.
790 */
791 ret = spin_trylock(contender->mutex);
792 spin_unlock(&shared->lock);
793 if (!ret)
794 goto retry;
795 spin_unlock(chip->mutex);
796 ret = chip_ready(map, contender, contender->start, mode);
797 spin_lock(chip->mutex);
798
799 if (ret == -EAGAIN) {
800 spin_unlock(contender->mutex);
801 goto retry;
802 }
803 if (ret) {
804 spin_unlock(contender->mutex);
805 return ret;
806 }
807 spin_lock(&shared->lock);
808 spin_unlock(contender->mutex);
809 }
810
811 /* We now own it */
812 shared->writing = chip;
813 if (mode == FL_ERASING)
814 shared->erasing = chip;
815 spin_unlock(&shared->lock);
816 }
817 ret = chip_ready(map, chip, adr, mode);
818 if (ret == -EAGAIN)
819 goto retry;
820
821 return ret;
822}
823
816static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 824static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
817{ 825{
818 struct cfi_private *cfi = map->fldrv_priv; 826 struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8f9c3baeb38e..246d4512f64b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -300,7 +300,7 @@ config MTD_NAND_PLATFORM
300 via platform_data. 300 via platform_data.
301 301
302config MTD_ALAUDA 302config MTD_ALAUDA
303 tristate "MTD driver for Olympus MAUSB-10 and Fijufilm DPC-R1" 303 tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1"
304 depends on MTD_NAND && USB 304 depends on MTD_NAND && USB
305 help 305 help
306 These two (and possibly other) Alauda-based cardreaders for 306 These two (and possibly other) Alauda-based cardreaders for
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index ab9f5c5db38d..0e72153b3297 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -220,7 +220,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
220 } 220 }
221 } 221 }
222 /* If the parity is wrong, no rescue possible */ 222 /* If the parity is wrong, no rescue possible */
223 return parity ? -1 : nerr; 223 return parity ? -EBADMSG : nerr;
224} 224}
225 225
226static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) 226static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
@@ -1034,7 +1034,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
1034 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); 1034 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
1035 else 1035 else
1036 WriteDOC(DOC_ECC_DIS, docptr, ECCConf); 1036 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
1037 if (no_ecc_failures && (ret == -1)) { 1037 if (no_ecc_failures && (ret == -EBADMSG)) {
1038 printk(KERN_ERR "suppressing ECC failure\n"); 1038 printk(KERN_ERR "suppressing ECC failure\n");
1039 ret = 0; 1039 ret = 0;
1040 } 1040 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b4e0e7723894..e29c1da7f56e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -789,7 +789,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
789 int stat; 789 int stat;
790 790
791 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 791 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
792 if (stat == -1) 792 if (stat < 0)
793 mtd->ecc_stats.failed++; 793 mtd->ecc_stats.failed++;
794 else 794 else
795 mtd->ecc_stats.corrected += stat; 795 mtd->ecc_stats.corrected += stat;
@@ -833,7 +833,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
833 int stat; 833 int stat;
834 834
835 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 835 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
836 if (stat == -1) 836 if (stat < 0)
837 mtd->ecc_stats.failed++; 837 mtd->ecc_stats.failed++;
838 else 838 else
839 mtd->ecc_stats.corrected += stat; 839 mtd->ecc_stats.corrected += stat;
@@ -874,7 +874,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
874 chip->read_buf(mtd, oob, eccbytes); 874 chip->read_buf(mtd, oob, eccbytes);
875 stat = chip->ecc.correct(mtd, p, oob, NULL); 875 stat = chip->ecc.correct(mtd, p, oob, NULL);
876 876
877 if (stat == -1) 877 if (stat < 0)
878 mtd->ecc_stats.failed++; 878 mtd->ecc_stats.failed++;
879 else 879 else
880 mtd->ecc_stats.corrected += stat; 880 mtd->ecc_stats.corrected += stat;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index fde593e5e634..9003a135e050 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -189,7 +189,7 @@ int nand_correct_data(struct mtd_info *mtd, u_char *dat,
189 if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1) 189 if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
190 return 1; 190 return 1;
191 191
192 return -1; 192 return -EBADMSG;
193} 193}
194EXPORT_SYMBOL(nand_correct_data); 194EXPORT_SYMBOL(nand_correct_data);
195 195
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a7574807dc46..10490b48d9f7 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -511,7 +511,7 @@ static int init_nandsim(struct mtd_info *mtd)
511 } 511 }
512 512
513 if (ns->options & OPT_SMALLPAGE) { 513 if (ns->options & OPT_SMALLPAGE) {
514 if (ns->geom.totsz < (64 << 20)) { 514 if (ns->geom.totsz < (32 << 20)) {
515 ns->geom.pgaddrbytes = 3; 515 ns->geom.pgaddrbytes = 3;
516 ns->geom.secaddrbytes = 2; 516 ns->geom.secaddrbytes = 2;
517 } else { 517 } else {
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 21b921dd6aab..66f76e9618dd 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -488,12 +488,24 @@ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
488 readsb(this->IO_ADDR_R, buf, len); 488 readsb(this->IO_ADDR_R, buf, len);
489} 489}
490 490
491static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
492{
493 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
494 readsl(info->regs + S3C2440_NFDATA, buf, len / 4);
495}
496
491static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 497static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
492{ 498{
493 struct nand_chip *this = mtd->priv; 499 struct nand_chip *this = mtd->priv;
494 writesb(this->IO_ADDR_W, buf, len); 500 writesb(this->IO_ADDR_W, buf, len);
495} 501}
496 502
503static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
504{
505 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
506 writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
507}
508
497/* device management functions */ 509/* device management functions */
498 510
499static int s3c2410_nand_remove(struct platform_device *pdev) 511static int s3c2410_nand_remove(struct platform_device *pdev)
@@ -604,6 +616,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
604 info->sel_bit = S3C2440_NFCONT_nFCE; 616 info->sel_bit = S3C2440_NFCONT_nFCE;
605 chip->cmd_ctrl = s3c2440_nand_hwcontrol; 617 chip->cmd_ctrl = s3c2440_nand_hwcontrol;
606 chip->dev_ready = s3c2440_nand_devready; 618 chip->dev_ready = s3c2440_nand_devready;
619 chip->read_buf = s3c2440_nand_read_buf;
620 chip->write_buf = s3c2440_nand_write_buf;
607 break; 621 break;
608 622
609 case TYPE_S3C2412: 623 case TYPE_S3C2412:
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index 0d89ad5776fa..d64200b7c94b 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -88,11 +88,11 @@ do { \
88 88
89/** 89/**
90 * onenand_lock_handle - Handle Lock scheme 90 * onenand_lock_handle - Handle Lock scheme
91 * @param this OneNAND device structure 91 * @this: OneNAND device structure
92 * @param cmd The command to be sent 92 * @cmd: The command to be sent
93 * 93 *
94 * Send lock command to OneNAND device. 94 * Send lock command to OneNAND device.
95 * The lock scheme is depends on chip type. 95 * The lock scheme depends on chip type.
96 */ 96 */
97static void onenand_lock_handle(struct onenand_chip *this, int cmd) 97static void onenand_lock_handle(struct onenand_chip *this, int cmd)
98{ 98{
@@ -131,8 +131,8 @@ static void onenand_lock_handle(struct onenand_chip *this, int cmd)
131 131
132/** 132/**
133 * onenand_bootram_handle - Handle BootRAM area 133 * onenand_bootram_handle - Handle BootRAM area
134 * @param this OneNAND device structure 134 * @this: OneNAND device structure
135 * @param cmd The command to be sent 135 * @cmd: The command to be sent
136 * 136 *
137 * Emulate BootRAM area. It is possible to do basic operation using BootRAM. 137 * Emulate BootRAM area. It is possible to do basic operation using BootRAM.
138 */ 138 */
@@ -153,10 +153,10 @@ static void onenand_bootram_handle(struct onenand_chip *this, int cmd)
153 153
154/** 154/**
155 * onenand_update_interrupt - Set interrupt register 155 * onenand_update_interrupt - Set interrupt register
156 * @param this OneNAND device structure 156 * @this: OneNAND device structure
157 * @param cmd The command to be sent 157 * @cmd: The command to be sent
158 * 158 *
159 * Update interrupt register. The status is depends on command. 159 * Update interrupt register. The status depends on command.
160 */ 160 */
161static void onenand_update_interrupt(struct onenand_chip *this, int cmd) 161static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
162{ 162{
@@ -189,11 +189,12 @@ static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
189} 189}
190 190
191/** 191/**
192 * onenand_check_overwrite - Check over-write if happend 192 * onenand_check_overwrite - Check if over-write happened
193 * @param dest The destination pointer 193 * @dest: The destination pointer
194 * @param src The source pointer 194 * @src: The source pointer
195 * @param count The length to be check 195 * @count: The length to be check
196 * @return 0 on same, otherwise 1 196 *
197 * Returns: 0 on same, otherwise 1
197 * 198 *
198 * Compare the source with destination 199 * Compare the source with destination
199 */ 200 */
@@ -213,10 +214,10 @@ static int onenand_check_overwrite(void *dest, void *src, size_t count)
213 214
214/** 215/**
215 * onenand_data_handle - Handle OneNAND Core and DataRAM 216 * onenand_data_handle - Handle OneNAND Core and DataRAM
216 * @param this OneNAND device structure 217 * @this: OneNAND device structure
217 * @param cmd The command to be sent 218 * @cmd: The command to be sent
218 * @param dataram Which dataram used 219 * @dataram: Which dataram used
219 * @param offset The offset to OneNAND Core 220 * @offset: The offset to OneNAND Core
220 * 221 *
221 * Copy data from OneNAND Core to DataRAM (read) 222 * Copy data from OneNAND Core to DataRAM (read)
222 * Copy data from DataRAM to OneNAND Core (write) 223 * Copy data from DataRAM to OneNAND Core (write)
@@ -295,8 +296,8 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
295 296
296/** 297/**
297 * onenand_command_handle - Handle command 298 * onenand_command_handle - Handle command
298 * @param this OneNAND device structure 299 * @this: OneNAND device structure
299 * @param cmd The command to be sent 300 * @cmd: The command to be sent
300 * 301 *
301 * Emulate OneNAND command. 302 * Emulate OneNAND command.
302 */ 303 */
@@ -350,8 +351,8 @@ static void onenand_command_handle(struct onenand_chip *this, int cmd)
350 351
351/** 352/**
352 * onenand_writew - [OneNAND Interface] Emulate write operation 353 * onenand_writew - [OneNAND Interface] Emulate write operation
353 * @param value value to write 354 * @value: value to write
354 * @param addr address to write 355 * @addr: address to write
355 * 356 *
356 * Write OneNAND register with value 357 * Write OneNAND register with value
357 */ 358 */
@@ -373,7 +374,7 @@ static void onenand_writew(unsigned short value, void __iomem * addr)
373 374
374/** 375/**
375 * flash_init - Initialize OneNAND simulator 376 * flash_init - Initialize OneNAND simulator
376 * @param flash OneNAND simulaotr data strucutres 377 * @flash: OneNAND simulator data strucutres
377 * 378 *
378 * Initialize OneNAND simulator. 379 * Initialize OneNAND simulator.
379 */ 380 */
@@ -416,7 +417,7 @@ static int __init flash_init(struct onenand_flash *flash)
416 417
417/** 418/**
418 * flash_exit - Clean up OneNAND simulator 419 * flash_exit - Clean up OneNAND simulator
419 * @param flash OneNAND simulaotr data strucutres 420 * @flash: OneNAND simulator data structures
420 * 421 *
421 * Clean up OneNAND simulator. 422 * Clean up OneNAND simulator.
422 */ 423 */
@@ -424,7 +425,6 @@ static void flash_exit(struct onenand_flash *flash)
424{ 425{
425 vfree(ONENAND_CORE(flash)); 426 vfree(ONENAND_CORE(flash));
426 kfree(flash->base); 427 kfree(flash->base);
427 kfree(flash);
428} 428}
429 429
430static int __init onenand_sim_init(void) 430static int __init onenand_sim_init(void)
@@ -449,7 +449,7 @@ static int __init onenand_sim_init(void)
449 info->onenand.write_word = onenand_writew; 449 info->onenand.write_word = onenand_writew;
450 450
451 if (flash_init(&info->flash)) { 451 if (flash_init(&info->flash)) {
452 printk(KERN_ERR "Unable to allocat flash.\n"); 452 printk(KERN_ERR "Unable to allocate flash.\n");
453 kfree(ffchars); 453 kfree(ffchars);
454 kfree(info); 454 kfree(info);
455 return -ENOMEM; 455 return -ENOMEM;
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index ed53aaab4c02..ae419736158e 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -471,7 +471,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
471 } 471 }
472 472
473 len = max(skb->len, ETH_ZLEN); 473 len = max(skb->len, ETH_ZLEN);
474 queue = skb->queue_mapping; 474 queue = skb_get_queue_mapping(skb);
475#ifdef CONFIG_NETDEVICES_MULTIQUEUE 475#ifdef CONFIG_NETDEVICES_MULTIQUEUE
476 netif_stop_subqueue(dev, queue); 476 netif_stop_subqueue(dev, queue);
477#else 477#else
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2b5782056dda..0fbf1bbbaee9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -751,13 +751,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
751 if (mii_head) { 751 if (mii_head) {
752 mii_tail->mii_next = mip; 752 mii_tail->mii_next = mip;
753 mii_tail = mip; 753 mii_tail = mip;
754 } 754 } else {
755 else {
756 mii_head = mii_tail = mip; 755 mii_head = mii_tail = mip;
757 fep->hwp->fec_mii_data = regval; 756 fep->hwp->fec_mii_data = regval;
758 } 757 }
759 } 758 } else {
760 else {
761 retval = 1; 759 retval = 1;
762 } 760 }
763 761
@@ -768,14 +766,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
768 766
769static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 767static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
770{ 768{
771 int k;
772
773 if(!c) 769 if(!c)
774 return; 770 return;
775 771
776 for(k = 0; (c+k)->mii_data != mk_mii_end; k++) { 772 for (; c->mii_data != mk_mii_end; c++)
777 mii_queue(dev, (c+k)->mii_data, (c+k)->funct); 773 mii_queue(dev, c->mii_data, c->funct);
778 }
779} 774}
780 775
781static void mii_parse_sr(uint mii_reg, struct net_device *dev) 776static void mii_parse_sr(uint mii_reg, struct net_device *dev)
@@ -792,7 +787,6 @@ static void mii_parse_sr(uint mii_reg, struct net_device *dev)
792 status |= PHY_STAT_FAULT; 787 status |= PHY_STAT_FAULT;
793 if (mii_reg & 0x0020) 788 if (mii_reg & 0x0020)
794 status |= PHY_STAT_ANC; 789 status |= PHY_STAT_ANC;
795
796 *s = status; 790 *s = status;
797} 791}
798 792
@@ -1239,7 +1233,6 @@ mii_link_interrupt(int irq, void * dev_id);
1239#endif 1233#endif
1240 1234
1241#if defined(CONFIG_M5272) 1235#if defined(CONFIG_M5272)
1242
1243/* 1236/*
1244 * Code specific to Coldfire 5272 setup. 1237 * Code specific to Coldfire 5272 setup.
1245 */ 1238 */
@@ -2020,8 +2013,7 @@ static void mii_relink(struct work_struct *work)
2020 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 2013 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
2021 duplex = 1; 2014 duplex = 1;
2022 fec_restart(dev, duplex); 2015 fec_restart(dev, duplex);
2023 } 2016 } else
2024 else
2025 fec_stop(dev); 2017 fec_stop(dev);
2026 2018
2027#if 0 2019#if 0
@@ -2119,8 +2111,7 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
2119 fep->phy_id = phytype << 16; 2111 fep->phy_id = phytype << 16;
2120 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 2112 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
2121 mii_discover_phy3); 2113 mii_discover_phy3);
2122 } 2114 } else {
2123 else {
2124 fep->phy_addr++; 2115 fep->phy_addr++;
2125 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 2116 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
2126 mii_discover_phy); 2117 mii_discover_phy);
@@ -2574,8 +2565,7 @@ fec_restart(struct net_device *dev, int duplex)
2574 if (duplex) { 2565 if (duplex) {
2575 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 2566 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */
2576 fecp->fec_x_cntrl = 0x04; /* FD enable */ 2567 fecp->fec_x_cntrl = 0x04; /* FD enable */
2577 } 2568 } else {
2578 else {
2579 /* MII enable|No Rcv on Xmit */ 2569 /* MII enable|No Rcv on Xmit */
2580 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 2570 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06;
2581 fecp->fec_x_cntrl = 0x00; 2571 fecp->fec_x_cntrl = 0x00;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 4b3c109d5eae..887633b207d9 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -60,7 +60,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
60 PCI_DMA_BIDIRECTIONAL); 60 PCI_DMA_BIDIRECTIONAL);
61 61
62 for (i = 0; i < chunk->npages; ++i) 62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(chunk->mem[i].page, 63 __free_pages(sg_page(&chunk->mem[i]),
64 get_order(chunk->mem[i].length)); 64 get_order(chunk->mem[i].length));
65} 65}
66 66
@@ -70,7 +70,7 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
70 70
71 for (i = 0; i < chunk->npages; ++i) 71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
73 lowmem_page_address(chunk->mem[i].page), 73 lowmem_page_address(sg_page(&chunk->mem[i])),
74 sg_dma_address(&chunk->mem[i])); 74 sg_dma_address(&chunk->mem[i]));
75} 75}
76 76
@@ -95,10 +95,13 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
95 95
96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
97{ 97{
98 mem->page = alloc_pages(gfp_mask, order); 98 struct page *page;
99 if (!mem->page) 99
100 page = alloc_pages(gfp_mask, order);
101 if (!page)
100 return -ENOMEM; 102 return -ENOMEM;
101 103
104 sg_set_page(mem, page);
102 mem->length = PAGE_SIZE << order; 105 mem->length = PAGE_SIZE << order;
103 mem->offset = 0; 106 mem->offset = 0;
104 return 0; 107 return 0;
@@ -145,6 +148,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
145 if (!chunk) 148 if (!chunk)
146 goto fail; 149 goto fail;
147 150
151 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
148 chunk->npages = 0; 152 chunk->npages = 0;
149 chunk->nsg = 0; 153 chunk->nsg = 0;
150 list_add_tail(&chunk->list, &icm->chunk_list); 154 list_add_tail(&chunk->list, &icm->chunk_list);
@@ -334,7 +338,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
334 * been assigned to. 338 * been assigned to.
335 */ 339 */
336 if (chunk->mem[i].length > offset) { 340 if (chunk->mem[i].length > offset) {
337 page = chunk->mem[i].page; 341 page = sg_page(&chunk->mem[i]);
338 goto out; 342 goto out;
339 } 343 }
340 offset -= chunk->mem[i].length; 344 offset -= chunk->mem[i].length;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ed1f9bbb2a32..112ab079ce7d 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3103,31 +3103,12 @@ static int niu_alloc_tx_ring_info(struct niu *np,
3103 3103
3104static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 3104static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
3105{ 3105{
3106 u16 bs; 3106 u16 bss;
3107 3107
3108 switch (PAGE_SIZE) { 3108 bss = min(PAGE_SHIFT, 15);
3109 case 4 * 1024:
3110 case 8 * 1024:
3111 case 16 * 1024:
3112 case 32 * 1024:
3113 rp->rbr_block_size = PAGE_SIZE;
3114 rp->rbr_blocks_per_page = 1;
3115 break;
3116 3109
3117 default: 3110 rp->rbr_block_size = 1 << bss;
3118 if (PAGE_SIZE % (32 * 1024) == 0) 3111 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
3119 bs = 32 * 1024;
3120 else if (PAGE_SIZE % (16 * 1024) == 0)
3121 bs = 16 * 1024;
3122 else if (PAGE_SIZE % (8 * 1024) == 0)
3123 bs = 8 * 1024;
3124 else if (PAGE_SIZE % (4 * 1024) == 0)
3125 bs = 4 * 1024;
3126 else
3127 BUG();
3128 rp->rbr_block_size = bs;
3129 rp->rbr_blocks_per_page = PAGE_SIZE / bs;
3130 }
3131 3112
3132 rp->rbr_sizes[0] = 256; 3113 rp->rbr_sizes[0] = 256;
3133 rp->rbr_sizes[1] = 1024; 3114 rp->rbr_sizes[1] = 1024;
@@ -7902,12 +7883,7 @@ static int __init niu_init(void)
7902{ 7883{
7903 int err = 0; 7884 int err = 0;
7904 7885
7905 BUILD_BUG_ON((PAGE_SIZE < 4 * 1024) || 7886 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
7906 ((PAGE_SIZE > 32 * 1024) &&
7907 ((PAGE_SIZE % (32 * 1024)) != 0 &&
7908 (PAGE_SIZE % (16 * 1024)) != 0 &&
7909 (PAGE_SIZE % (8 * 1024)) != 0 &&
7910 (PAGE_SIZE % (4 * 1024)) != 0)));
7911 7887
7912 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 7888 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
7913 7889
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index c0b6d19d1457..bcb0885011c8 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,7 +55,7 @@
55#include <linux/mm.h> 55#include <linux/mm.h>
56#include <linux/ppp_defs.h> 56#include <linux/ppp_defs.h>
57#include <linux/ppp-comp.h> 57#include <linux/ppp-comp.h>
58#include <asm/scatterlist.h> 58#include <linux/scatterlist.h>
59 59
60#include "ppp_mppe.h" 60#include "ppp_mppe.h"
61 61
@@ -68,9 +68,7 @@ MODULE_VERSION("1.0.2");
68static unsigned int 68static unsigned int
69setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 69setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
70{ 70{
71 sg[0].page = virt_to_page(address); 71 sg_init_one(sg, address, length);
72 sg[0].offset = offset_in_page(address);
73 sg[0].length = length;
74 return length; 72 return length;
75} 73}
76 74
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 014dc2cfe4d6..09440d783e65 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 66#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.84" 67#define DRV_MODULE_VERSION "3.85"
68#define DRV_MODULE_RELDATE "October 12, 2007" 68#define DRV_MODULE_RELDATE "October 18, 2007"
69 69
70#define TG3_DEF_MAC_MODE 0 70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 71#define TG3_DEF_RX_MODE 0
@@ -200,6 +200,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
@@ -5028,10 +5029,7 @@ static int tg3_poll_fw(struct tg3 *tp)
5028/* Save PCI command register before chip reset */ 5029/* Save PCI command register before chip reset */
5029static void tg3_save_pci_state(struct tg3 *tp) 5030static void tg3_save_pci_state(struct tg3 *tp)
5030{ 5031{
5031 u32 val; 5032 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5032
5033 pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5034 tp->pci_cmd = val;
5035} 5033}
5036 5034
5037/* Restore PCI state after chip reset */ 5035/* Restore PCI state after chip reset */
@@ -5054,7 +5052,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
5054 PCISTATE_ALLOW_APE_SHMEM_WR; 5052 PCISTATE_ALLOW_APE_SHMEM_WR;
5055 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 5053 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5056 5054
5057 pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); 5055 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5058 5056
5059 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 5057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5060 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 5058 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
@@ -10820,9 +10818,24 @@ out_not_found:
10820 strcpy(tp->board_part_number, "none"); 10818 strcpy(tp->board_part_number, "none");
10821} 10819}
10822 10820
10821static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10822{
10823 u32 val;
10824
10825 if (tg3_nvram_read_swab(tp, offset, &val) ||
10826 (val & 0xfc000000) != 0x0c000000 ||
10827 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10828 val != 0)
10829 return 0;
10830
10831 return 1;
10832}
10833
10823static void __devinit tg3_read_fw_ver(struct tg3 *tp) 10834static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10824{ 10835{
10825 u32 val, offset, start; 10836 u32 val, offset, start;
10837 u32 ver_offset;
10838 int i, bcnt;
10826 10839
10827 if (tg3_nvram_read_swab(tp, 0, &val)) 10840 if (tg3_nvram_read_swab(tp, 0, &val))
10828 return; 10841 return;
@@ -10835,29 +10848,71 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10835 return; 10848 return;
10836 10849
10837 offset = tg3_nvram_logical_addr(tp, offset); 10850 offset = tg3_nvram_logical_addr(tp, offset);
10838 if (tg3_nvram_read_swab(tp, offset, &val)) 10851
10852 if (!tg3_fw_img_is_valid(tp, offset) ||
10853 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10839 return; 10854 return;
10840 10855
10841 if ((val & 0xfc000000) == 0x0c000000) { 10856 offset = offset + ver_offset - start;
10842 u32 ver_offset, addr; 10857 for (i = 0; i < 16; i += 4) {
10843 int i; 10858 if (tg3_nvram_read(tp, offset + i, &val))
10859 return;
10844 10860
10845 if (tg3_nvram_read_swab(tp, offset + 4, &val) || 10861 val = le32_to_cpu(val);
10846 tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) 10862 memcpy(tp->fw_ver + i, &val, 4);
10863 }
10864
10865 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10866 (tp->tg3_flags & TG3_FLG3_ENABLE_APE))
10867 return;
10868
10869 for (offset = TG3_NVM_DIR_START;
10870 offset < TG3_NVM_DIR_END;
10871 offset += TG3_NVM_DIRENT_SIZE) {
10872 if (tg3_nvram_read_swab(tp, offset, &val))
10847 return; 10873 return;
10848 10874
10849 if (val != 0) 10875 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10876 break;
10877 }
10878
10879 if (offset == TG3_NVM_DIR_END)
10880 return;
10881
10882 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10883 start = 0x08000000;
10884 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10885 return;
10886
10887 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10888 !tg3_fw_img_is_valid(tp, offset) ||
10889 tg3_nvram_read_swab(tp, offset + 8, &val))
10890 return;
10891
10892 offset += val - start;
10893
10894 bcnt = strlen(tp->fw_ver);
10895
10896 tp->fw_ver[bcnt++] = ',';
10897 tp->fw_ver[bcnt++] = ' ';
10898
10899 for (i = 0; i < 4; i++) {
10900 if (tg3_nvram_read(tp, offset, &val))
10850 return; 10901 return;
10851 10902
10852 addr = offset + ver_offset - start; 10903 val = le32_to_cpu(val);
10853 for (i = 0; i < 16; i += 4) { 10904 offset += sizeof(val);
10854 if (tg3_nvram_read(tp, addr + i, &val))
10855 return;
10856 10905
10857 val = cpu_to_le32(val); 10906 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10858 memcpy(tp->fw_ver + i, &val, 4); 10907 memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10908 break;
10859 } 10909 }
10910
10911 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10912 bcnt += sizeof(val);
10860 } 10913 }
10914
10915 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10861} 10916}
10862 10917
10863static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 10918static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 6dbdad2b8f88..1d5b2a3dd29d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1540,6 +1540,12 @@
1540#define TG3_EEPROM_MAGIC_HW 0xabcd 1540#define TG3_EEPROM_MAGIC_HW 0xabcd
1541#define TG3_EEPROM_MAGIC_HW_MSK 0xffff 1541#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
1542 1542
1543#define TG3_NVM_DIR_START 0x18
1544#define TG3_NVM_DIR_END 0x78
1545#define TG3_NVM_DIRENT_SIZE 0xc
1546#define TG3_NVM_DIRTYPE_SHIFT 24
1547#define TG3_NVM_DIRTYPE_ASFINI 1
1548
1543/* 32K Window into NIC internal memory */ 1549/* 32K Window into NIC internal memory */
1544#define NIC_SRAM_WIN_BASE 0x00008000 1550#define NIC_SRAM_WIN_BASE 0x00008000
1545 1551
@@ -2415,10 +2421,11 @@ struct tg3 {
2415#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2421#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2416 2422
2417 u32 led_ctrl; 2423 u32 led_ctrl;
2418 u32 pci_cmd; 2424 u16 pci_cmd;
2419 2425
2420 char board_part_number[24]; 2426 char board_part_number[24];
2421 char fw_ver[16]; 2427#define TG3_VER_SIZE 32
2428 char fw_ver[TG3_VER_SIZE];
2422 u32 nic_sram_data_cfg; 2429 u32 nic_sram_data_cfg;
2423 u32 pci_clock_ctrl; 2430 u32 pci_clock_ctrl;
2424 struct pci_dev *pdev_peer; 2431 struct pci_dev *pdev_peer;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index b3c4dbff26b8..7c60cbd85dc8 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -42,6 +42,7 @@
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45#include <linux/scatterlist.h>
45 46
46#include <asm/byteorder.h> 47#include <asm/byteorder.h>
47#include <asm/cache.h> /* for L1_CACHE_BYTES */ 48#include <asm/cache.h> /* for L1_CACHE_BYTES */
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e5c323936eae..e527a0e1d6c0 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -28,6 +28,7 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/scatterlist.h>
31 32
32#include <asm/byteorder.h> 33#include <asm/byteorder.h>
33#include <asm/io.h> 34#include <asm/io.h>
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 006054a40995..555055650733 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -20,6 +20,9 @@ obj-$(CONFIG_PCI_MSI) += msi.o
20# Build the Hypertransport interrupt support 20# Build the Hypertransport interrupt support
21obj-$(CONFIG_HT_IRQ) += htirq.o 21obj-$(CONFIG_HT_IRQ) += htirq.o
22 22
23# Build Intel IOMMU support
24obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
25
23# 26#
24# Some architectures use the generic PCI setup functions 27# Some architectures use the generic PCI setup functions
25# 28#
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
new file mode 100644
index 000000000000..5dfdfdac92e1
--- /dev/null
+++ b/drivers/pci/dmar.c
@@ -0,0 +1,329 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
19 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 *
21 * This file implements early detection/parsing of DMA Remapping Devices
22 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
23 * tables.
24 */
25
26#include <linux/pci.h>
27#include <linux/dmar.h>
28
29#undef PREFIX
30#define PREFIX "DMAR:"
31
32/* No locks are needed as DMA remapping hardware unit
33 * list is constructed at boot time and hotplug of
34 * these units are not supported by the architecture.
35 */
36LIST_HEAD(dmar_drhd_units);
37LIST_HEAD(dmar_rmrr_units);
38
39static struct acpi_table_header * __initdata dmar_tbl;
40
41static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
42{
43 /*
44 * add INCLUDE_ALL at the tail, so scan the list will find it at
45 * the very end.
46 */
47 if (drhd->include_all)
48 list_add_tail(&drhd->list, &dmar_drhd_units);
49 else
50 list_add(&drhd->list, &dmar_drhd_units);
51}
52
53static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
54{
55 list_add(&rmrr->list, &dmar_rmrr_units);
56}
57
58static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
59 struct pci_dev **dev, u16 segment)
60{
61 struct pci_bus *bus;
62 struct pci_dev *pdev = NULL;
63 struct acpi_dmar_pci_path *path;
64 int count;
65
66 bus = pci_find_bus(segment, scope->bus);
67 path = (struct acpi_dmar_pci_path *)(scope + 1);
68 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
69 / sizeof(struct acpi_dmar_pci_path);
70
71 while (count) {
72 if (pdev)
73 pci_dev_put(pdev);
74 /*
75 * Some BIOSes list non-exist devices in DMAR table, just
76 * ignore it
77 */
78 if (!bus) {
79 printk(KERN_WARNING
80 PREFIX "Device scope bus [%d] not found\n",
81 scope->bus);
82 break;
83 }
84 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
85 if (!pdev) {
86 printk(KERN_WARNING PREFIX
87 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
88 segment, bus->number, path->dev, path->fn);
89 break;
90 }
91 path ++;
92 count --;
93 bus = pdev->subordinate;
94 }
95 if (!pdev) {
96 printk(KERN_WARNING PREFIX
97 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
98 segment, scope->bus, path->dev, path->fn);
99 *dev = NULL;
100 return 0;
101 }
102 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
103 pdev->subordinate) || (scope->entry_type == \
104 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
105 pci_dev_put(pdev);
106 printk(KERN_WARNING PREFIX
107 "Device scope type does not match for %s\n",
108 pci_name(pdev));
109 return -EINVAL;
110 }
111 *dev = pdev;
112 return 0;
113}
114
115static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
116 struct pci_dev ***devices, u16 segment)
117{
118 struct acpi_dmar_device_scope *scope;
119 void * tmp = start;
120 int index;
121 int ret;
122
123 *cnt = 0;
124 while (start < end) {
125 scope = start;
126 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
127 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
128 (*cnt)++;
129 else
130 printk(KERN_WARNING PREFIX
131 "Unsupported device scope\n");
132 start += scope->length;
133 }
134 if (*cnt == 0)
135 return 0;
136
137 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
138 if (!*devices)
139 return -ENOMEM;
140
141 start = tmp;
142 index = 0;
143 while (start < end) {
144 scope = start;
145 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
146 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
147 ret = dmar_parse_one_dev_scope(scope,
148 &(*devices)[index], segment);
149 if (ret) {
150 kfree(*devices);
151 return ret;
152 }
153 index ++;
154 }
155 start += scope->length;
156 }
157
158 return 0;
159}
160
161/**
162 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
163 * structure which uniquely represent one DMA remapping hardware unit
164 * present in the platform
165 */
166static int __init
167dmar_parse_one_drhd(struct acpi_dmar_header *header)
168{
169 struct acpi_dmar_hardware_unit *drhd;
170 struct dmar_drhd_unit *dmaru;
171 int ret = 0;
172 static int include_all;
173
174 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
175 if (!dmaru)
176 return -ENOMEM;
177
178 drhd = (struct acpi_dmar_hardware_unit *)header;
179 dmaru->reg_base_addr = drhd->address;
180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
181
182 if (!dmaru->include_all)
183 ret = dmar_parse_dev_scope((void *)(drhd + 1),
184 ((void *)drhd) + header->length,
185 &dmaru->devices_cnt, &dmaru->devices,
186 drhd->segment);
187 else {
188 /* Only allow one INCLUDE_ALL */
189 if (include_all) {
190 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
191 "device scope is allowed\n");
192 ret = -EINVAL;
193 }
194 include_all = 1;
195 }
196
197 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all))
198 kfree(dmaru);
199 else
200 dmar_register_drhd_unit(dmaru);
201 return ret;
202}
203
204static int __init
205dmar_parse_one_rmrr(struct acpi_dmar_header *header)
206{
207 struct acpi_dmar_reserved_memory *rmrr;
208 struct dmar_rmrr_unit *rmrru;
209 int ret = 0;
210
211 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
212 if (!rmrru)
213 return -ENOMEM;
214
215 rmrr = (struct acpi_dmar_reserved_memory *)header;
216 rmrru->base_address = rmrr->base_address;
217 rmrru->end_address = rmrr->end_address;
218 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
219 ((void *)rmrr) + header->length,
220 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
221
222 if (ret || (rmrru->devices_cnt == 0))
223 kfree(rmrru);
224 else
225 dmar_register_rmrr_unit(rmrru);
226 return ret;
227}
228
229static void __init
230dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
231{
232 struct acpi_dmar_hardware_unit *drhd;
233 struct acpi_dmar_reserved_memory *rmrr;
234
235 switch (header->type) {
236 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
237 drhd = (struct acpi_dmar_hardware_unit *)header;
238 printk (KERN_INFO PREFIX
239 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
240 drhd->flags, drhd->address);
241 break;
242 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
243 rmrr = (struct acpi_dmar_reserved_memory *)header;
244
245 printk (KERN_INFO PREFIX
246 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
247 rmrr->base_address, rmrr->end_address);
248 break;
249 }
250}
251
252/**
253 * parse_dmar_table - parses the DMA reporting table
254 */
255static int __init
256parse_dmar_table(void)
257{
258 struct acpi_table_dmar *dmar;
259 struct acpi_dmar_header *entry_header;
260 int ret = 0;
261
262 dmar = (struct acpi_table_dmar *)dmar_tbl;
263 if (!dmar)
264 return -ENODEV;
265
266 if (!dmar->width) {
267 printk (KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
268 return -EINVAL;
269 }
270
271 printk (KERN_INFO PREFIX "Host address width %d\n",
272 dmar->width + 1);
273
274 entry_header = (struct acpi_dmar_header *)(dmar + 1);
275 while (((unsigned long)entry_header) <
276 (((unsigned long)dmar) + dmar_tbl->length)) {
277 dmar_table_print_dmar_entry(entry_header);
278
279 switch (entry_header->type) {
280 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
281 ret = dmar_parse_one_drhd(entry_header);
282 break;
283 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
284 ret = dmar_parse_one_rmrr(entry_header);
285 break;
286 default:
287 printk(KERN_WARNING PREFIX
288 "Unknown DMAR structure type\n");
289 ret = 0; /* for forward compatibility */
290 break;
291 }
292 if (ret)
293 break;
294
295 entry_header = ((void *)entry_header + entry_header->length);
296 }
297 return ret;
298}
299
300
301int __init dmar_table_init(void)
302{
303
304 parse_dmar_table();
305 if (list_empty(&dmar_drhd_units)) {
306 printk(KERN_INFO PREFIX "No DMAR devices found\n");
307 return -ENODEV;
308 }
309 return 0;
310}
311
312/**
313 * early_dmar_detect - checks to see if the platform supports DMAR devices
314 */
315int __init early_dmar_detect(void)
316{
317 acpi_status status = AE_OK;
318
319 /* if we could find DMAR table, then there are DMAR devices */
320 status = acpi_get_table(ACPI_SIG_DMAR, 0,
321 (struct acpi_table_header **)&dmar_tbl);
322
323 if (ACPI_SUCCESS(status) && !dmar_tbl) {
324 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
325 status = AE_NOT_FOUND;
326 }
327
328 return (ACPI_SUCCESS(status) ? 1 : 0);
329}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
new file mode 100644
index 000000000000..0c4ab3b07274
--- /dev/null
+++ b/drivers/pci/intel-iommu.c
@@ -0,0 +1,2271 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
19 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 */
21
22#include <linux/init.h>
23#include <linux/bitmap.h>
24#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
27#include <linux/sysdev.h>
28#include <linux/spinlock.h>
29#include <linux/pci.h>
30#include <linux/dmar.h>
31#include <linux/dma-mapping.h>
32#include <linux/mempool.h>
33#include "iova.h"
34#include "intel-iommu.h"
35#include <asm/proto.h> /* force_iommu in this header in x86-64*/
36#include <asm/cacheflush.h>
37#include <asm/iommu.h>
38#include "pci.h"
39
40#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
41#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
42
43#define IOAPIC_RANGE_START (0xfee00000)
44#define IOAPIC_RANGE_END (0xfeefffff)
45#define IOVA_START_ADDR (0x1000)
46
47#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
48
49#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
50
51#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
52
53static void domain_remove_dev_info(struct dmar_domain *domain);
54
55static int dmar_disabled;
56static int __initdata dmar_map_gfx = 1;
57static int dmar_forcedac;
58
59#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
60static DEFINE_SPINLOCK(device_domain_lock);
61static LIST_HEAD(device_domain_list);
62
63static int __init intel_iommu_setup(char *str)
64{
65 if (!str)
66 return -EINVAL;
67 while (*str) {
68 if (!strncmp(str, "off", 3)) {
69 dmar_disabled = 1;
70 printk(KERN_INFO"Intel-IOMMU: disabled\n");
71 } else if (!strncmp(str, "igfx_off", 8)) {
72 dmar_map_gfx = 0;
73 printk(KERN_INFO
74 "Intel-IOMMU: disable GFX device mapping\n");
75 } else if (!strncmp(str, "forcedac", 8)) {
76 printk (KERN_INFO
77 "Intel-IOMMU: Forcing DAC for PCI devices\n");
78 dmar_forcedac = 1;
79 }
80
81 str += strcspn(str, ",");
82 while (*str == ',')
83 str++;
84 }
85 return 0;
86}
87__setup("intel_iommu=", intel_iommu_setup);
88
89static struct kmem_cache *iommu_domain_cache;
90static struct kmem_cache *iommu_devinfo_cache;
91static struct kmem_cache *iommu_iova_cache;
92
93static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
94{
95 unsigned int flags;
96 void *vaddr;
97
98 /* trying to avoid low memory issues */
99 flags = current->flags & PF_MEMALLOC;
100 current->flags |= PF_MEMALLOC;
101 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
102 current->flags &= (~PF_MEMALLOC | flags);
103 return vaddr;
104}
105
106
107static inline void *alloc_pgtable_page(void)
108{
109 unsigned int flags;
110 void *vaddr;
111
112 /* trying to avoid low memory issues */
113 flags = current->flags & PF_MEMALLOC;
114 current->flags |= PF_MEMALLOC;
115 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
116 current->flags &= (~PF_MEMALLOC | flags);
117 return vaddr;
118}
119
120static inline void free_pgtable_page(void *vaddr)
121{
122 free_page((unsigned long)vaddr);
123}
124
125static inline void *alloc_domain_mem(void)
126{
127 return iommu_kmem_cache_alloc(iommu_domain_cache);
128}
129
130static inline void free_domain_mem(void *vaddr)
131{
132 kmem_cache_free(iommu_domain_cache, vaddr);
133}
134
135static inline void * alloc_devinfo_mem(void)
136{
137 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
138}
139
140static inline void free_devinfo_mem(void *vaddr)
141{
142 kmem_cache_free(iommu_devinfo_cache, vaddr);
143}
144
145struct iova *alloc_iova_mem(void)
146{
147 return iommu_kmem_cache_alloc(iommu_iova_cache);
148}
149
150void free_iova_mem(struct iova *iova)
151{
152 kmem_cache_free(iommu_iova_cache, iova);
153}
154
155static inline void __iommu_flush_cache(
156 struct intel_iommu *iommu, void *addr, int size)
157{
158 if (!ecap_coherent(iommu->ecap))
159 clflush_cache_range(addr, size);
160}
161
162/* Gets context entry for a given bus and devfn */
163static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
164 u8 bus, u8 devfn)
165{
166 struct root_entry *root;
167 struct context_entry *context;
168 unsigned long phy_addr;
169 unsigned long flags;
170
171 spin_lock_irqsave(&iommu->lock, flags);
172 root = &iommu->root_entry[bus];
173 context = get_context_addr_from_root(root);
174 if (!context) {
175 context = (struct context_entry *)alloc_pgtable_page();
176 if (!context) {
177 spin_unlock_irqrestore(&iommu->lock, flags);
178 return NULL;
179 }
180 __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K);
181 phy_addr = virt_to_phys((void *)context);
182 set_root_value(root, phy_addr);
183 set_root_present(root);
184 __iommu_flush_cache(iommu, root, sizeof(*root));
185 }
186 spin_unlock_irqrestore(&iommu->lock, flags);
187 return &context[devfn];
188}
189
190static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
191{
192 struct root_entry *root;
193 struct context_entry *context;
194 int ret;
195 unsigned long flags;
196
197 spin_lock_irqsave(&iommu->lock, flags);
198 root = &iommu->root_entry[bus];
199 context = get_context_addr_from_root(root);
200 if (!context) {
201 ret = 0;
202 goto out;
203 }
204 ret = context_present(context[devfn]);
205out:
206 spin_unlock_irqrestore(&iommu->lock, flags);
207 return ret;
208}
209
210static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
211{
212 struct root_entry *root;
213 struct context_entry *context;
214 unsigned long flags;
215
216 spin_lock_irqsave(&iommu->lock, flags);
217 root = &iommu->root_entry[bus];
218 context = get_context_addr_from_root(root);
219 if (context) {
220 context_clear_entry(context[devfn]);
221 __iommu_flush_cache(iommu, &context[devfn], \
222 sizeof(*context));
223 }
224 spin_unlock_irqrestore(&iommu->lock, flags);
225}
226
227static void free_context_table(struct intel_iommu *iommu)
228{
229 struct root_entry *root;
230 int i;
231 unsigned long flags;
232 struct context_entry *context;
233
234 spin_lock_irqsave(&iommu->lock, flags);
235 if (!iommu->root_entry) {
236 goto out;
237 }
238 for (i = 0; i < ROOT_ENTRY_NR; i++) {
239 root = &iommu->root_entry[i];
240 context = get_context_addr_from_root(root);
241 if (context)
242 free_pgtable_page(context);
243 }
244 free_pgtable_page(iommu->root_entry);
245 iommu->root_entry = NULL;
246out:
247 spin_unlock_irqrestore(&iommu->lock, flags);
248}
249
250/* page table handling */
251#define LEVEL_STRIDE (9)
252#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
253
254static inline int agaw_to_level(int agaw)
255{
256 return agaw + 2;
257}
258
259static inline int agaw_to_width(int agaw)
260{
261 return 30 + agaw * LEVEL_STRIDE;
262
263}
264
265static inline int width_to_agaw(int width)
266{
267 return (width - 30) / LEVEL_STRIDE;
268}
269
270static inline unsigned int level_to_offset_bits(int level)
271{
272 return (12 + (level - 1) * LEVEL_STRIDE);
273}
274
275static inline int address_level_offset(u64 addr, int level)
276{
277 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
278}
279
280static inline u64 level_mask(int level)
281{
282 return ((u64)-1 << level_to_offset_bits(level));
283}
284
285static inline u64 level_size(int level)
286{
287 return ((u64)1 << level_to_offset_bits(level));
288}
289
290static inline u64 align_to_level(u64 addr, int level)
291{
292 return ((addr + level_size(level) - 1) & level_mask(level));
293}
294
295static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
296{
297 int addr_width = agaw_to_width(domain->agaw);
298 struct dma_pte *parent, *pte = NULL;
299 int level = agaw_to_level(domain->agaw);
300 int offset;
301 unsigned long flags;
302
303 BUG_ON(!domain->pgd);
304
305 addr &= (((u64)1) << addr_width) - 1;
306 parent = domain->pgd;
307
308 spin_lock_irqsave(&domain->mapping_lock, flags);
309 while (level > 0) {
310 void *tmp_page;
311
312 offset = address_level_offset(addr, level);
313 pte = &parent[offset];
314 if (level == 1)
315 break;
316
317 if (!dma_pte_present(*pte)) {
318 tmp_page = alloc_pgtable_page();
319
320 if (!tmp_page) {
321 spin_unlock_irqrestore(&domain->mapping_lock,
322 flags);
323 return NULL;
324 }
325 __iommu_flush_cache(domain->iommu, tmp_page,
326 PAGE_SIZE_4K);
327 dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
328 /*
329 * high level table always sets r/w, last level page
330 * table control read/write
331 */
332 dma_set_pte_readable(*pte);
333 dma_set_pte_writable(*pte);
334 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
335 }
336 parent = phys_to_virt(dma_pte_addr(*pte));
337 level--;
338 }
339
340 spin_unlock_irqrestore(&domain->mapping_lock, flags);
341 return pte;
342}
343
344/* return address's pte at specific level */
345static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
346 int level)
347{
348 struct dma_pte *parent, *pte = NULL;
349 int total = agaw_to_level(domain->agaw);
350 int offset;
351
352 parent = domain->pgd;
353 while (level <= total) {
354 offset = address_level_offset(addr, total);
355 pte = &parent[offset];
356 if (level == total)
357 return pte;
358
359 if (!dma_pte_present(*pte))
360 break;
361 parent = phys_to_virt(dma_pte_addr(*pte));
362 total--;
363 }
364 return NULL;
365}
366
367/* clear one page's page table */
368static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
369{
370 struct dma_pte *pte = NULL;
371
372 /* get last level pte */
373 pte = dma_addr_level_pte(domain, addr, 1);
374
375 if (pte) {
376 dma_clear_pte(*pte);
377 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
378 }
379}
380
381/* clear last level pte, a tlb flush should be followed */
382static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
383{
384 int addr_width = agaw_to_width(domain->agaw);
385
386 start &= (((u64)1) << addr_width) - 1;
387 end &= (((u64)1) << addr_width) - 1;
388 /* in case it's partial page */
389 start = PAGE_ALIGN_4K(start);
390 end &= PAGE_MASK_4K;
391
392 /* we don't need lock here, nobody else touches the iova range */
393 while (start < end) {
394 dma_pte_clear_one(domain, start);
395 start += PAGE_SIZE_4K;
396 }
397}
398
399/* free page table pages. last level pte should already be cleared */
400static void dma_pte_free_pagetable(struct dmar_domain *domain,
401 u64 start, u64 end)
402{
403 int addr_width = agaw_to_width(domain->agaw);
404 struct dma_pte *pte;
405 int total = agaw_to_level(domain->agaw);
406 int level;
407 u64 tmp;
408
409 start &= (((u64)1) << addr_width) - 1;
410 end &= (((u64)1) << addr_width) - 1;
411
412 /* we don't need lock here, nobody else touches the iova range */
413 level = 2;
414 while (level <= total) {
415 tmp = align_to_level(start, level);
416 if (tmp >= end || (tmp + level_size(level) > end))
417 return;
418
419 while (tmp < end) {
420 pte = dma_addr_level_pte(domain, tmp, level);
421 if (pte) {
422 free_pgtable_page(
423 phys_to_virt(dma_pte_addr(*pte)));
424 dma_clear_pte(*pte);
425 __iommu_flush_cache(domain->iommu,
426 pte, sizeof(*pte));
427 }
428 tmp += level_size(level);
429 }
430 level++;
431 }
432 /* free pgd */
433 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
434 free_pgtable_page(domain->pgd);
435 domain->pgd = NULL;
436 }
437}
438
439/* iommu handling */
440static int iommu_alloc_root_entry(struct intel_iommu *iommu)
441{
442 struct root_entry *root;
443 unsigned long flags;
444
445 root = (struct root_entry *)alloc_pgtable_page();
446 if (!root)
447 return -ENOMEM;
448
449 __iommu_flush_cache(iommu, root, PAGE_SIZE_4K);
450
451 spin_lock_irqsave(&iommu->lock, flags);
452 iommu->root_entry = root;
453 spin_unlock_irqrestore(&iommu->lock, flags);
454
455 return 0;
456}
457
458#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
459{\
460 unsigned long start_time = jiffies;\
461 while (1) {\
462 sts = op (iommu->reg + offset);\
463 if (cond)\
464 break;\
465 if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\
466 panic("DMAR hardware is malfunctioning\n");\
467 cpu_relax();\
468 }\
469}
470
471static void iommu_set_root_entry(struct intel_iommu *iommu)
472{
473 void *addr;
474 u32 cmd, sts;
475 unsigned long flag;
476
477 addr = iommu->root_entry;
478
479 spin_lock_irqsave(&iommu->register_lock, flag);
480 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
481
482 cmd = iommu->gcmd | DMA_GCMD_SRTP;
483 writel(cmd, iommu->reg + DMAR_GCMD_REG);
484
485 /* Make sure hardware complete it */
486 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
487 readl, (sts & DMA_GSTS_RTPS), sts);
488
489 spin_unlock_irqrestore(&iommu->register_lock, flag);
490}
491
492static void iommu_flush_write_buffer(struct intel_iommu *iommu)
493{
494 u32 val;
495 unsigned long flag;
496
497 if (!cap_rwbf(iommu->cap))
498 return;
499 val = iommu->gcmd | DMA_GCMD_WBF;
500
501 spin_lock_irqsave(&iommu->register_lock, flag);
502 writel(val, iommu->reg + DMAR_GCMD_REG);
503
504 /* Make sure hardware complete it */
505 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
506 readl, (!(val & DMA_GSTS_WBFS)), val);
507
508 spin_unlock_irqrestore(&iommu->register_lock, flag);
509}
510
511/* return value determine if we need a write buffer flush */
512static int __iommu_flush_context(struct intel_iommu *iommu,
513 u16 did, u16 source_id, u8 function_mask, u64 type,
514 int non_present_entry_flush)
515{
516 u64 val = 0;
517 unsigned long flag;
518
519 /*
520 * In the non-present entry flush case, if hardware doesn't cache
521 * non-present entry we do nothing and if hardware cache non-present
522 * entry, we flush entries of domain 0 (the domain id is used to cache
523 * any non-present entries)
524 */
525 if (non_present_entry_flush) {
526 if (!cap_caching_mode(iommu->cap))
527 return 1;
528 else
529 did = 0;
530 }
531
532 switch (type) {
533 case DMA_CCMD_GLOBAL_INVL:
534 val = DMA_CCMD_GLOBAL_INVL;
535 break;
536 case DMA_CCMD_DOMAIN_INVL:
537 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
538 break;
539 case DMA_CCMD_DEVICE_INVL:
540 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
541 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
542 break;
543 default:
544 BUG();
545 }
546 val |= DMA_CCMD_ICC;
547
548 spin_lock_irqsave(&iommu->register_lock, flag);
549 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
550
551 /* Make sure hardware complete it */
552 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
553 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
554
555 spin_unlock_irqrestore(&iommu->register_lock, flag);
556
557 /* flush context entry will implictly flush write buffer */
558 return 0;
559}
560
561static int inline iommu_flush_context_global(struct intel_iommu *iommu,
562 int non_present_entry_flush)
563{
564 return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
565 non_present_entry_flush);
566}
567
568static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
569 int non_present_entry_flush)
570{
571 return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
572 non_present_entry_flush);
573}
574
575static int inline iommu_flush_context_device(struct intel_iommu *iommu,
576 u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
577{
578 return __iommu_flush_context(iommu, did, source_id, function_mask,
579 DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
580}
581
582/* return value determine if we need a write buffer flush */
583static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
584 u64 addr, unsigned int size_order, u64 type,
585 int non_present_entry_flush)
586{
587 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
588 u64 val = 0, val_iva = 0;
589 unsigned long flag;
590
591 /*
592 * In the non-present entry flush case, if hardware doesn't cache
593 * non-present entry we do nothing and if hardware cache non-present
594 * entry, we flush entries of domain 0 (the domain id is used to cache
595 * any non-present entries)
596 */
597 if (non_present_entry_flush) {
598 if (!cap_caching_mode(iommu->cap))
599 return 1;
600 else
601 did = 0;
602 }
603
604 switch (type) {
605 case DMA_TLB_GLOBAL_FLUSH:
606 /* global flush doesn't need set IVA_REG */
607 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
608 break;
609 case DMA_TLB_DSI_FLUSH:
610 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
611 break;
612 case DMA_TLB_PSI_FLUSH:
613 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
614 /* Note: always flush non-leaf currently */
615 val_iva = size_order | addr;
616 break;
617 default:
618 BUG();
619 }
620 /* Note: set drain read/write */
621#if 0
622 /*
623 * This is probably to be super secure.. Looks like we can
624 * ignore it without any impact.
625 */
626 if (cap_read_drain(iommu->cap))
627 val |= DMA_TLB_READ_DRAIN;
628#endif
629 if (cap_write_drain(iommu->cap))
630 val |= DMA_TLB_WRITE_DRAIN;
631
632 spin_lock_irqsave(&iommu->register_lock, flag);
633 /* Note: Only uses first TLB reg currently */
634 if (val_iva)
635 dmar_writeq(iommu->reg + tlb_offset, val_iva);
636 dmar_writeq(iommu->reg + tlb_offset + 8, val);
637
638 /* Make sure hardware complete it */
639 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
640 dmar_readq, (!(val & DMA_TLB_IVT)), val);
641
642 spin_unlock_irqrestore(&iommu->register_lock, flag);
643
644 /* check IOTLB invalidation granularity */
645 if (DMA_TLB_IAIG(val) == 0)
646 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
647 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
648 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
649 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
650 /* flush context entry will implictly flush write buffer */
651 return 0;
652}
653
654static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
655 int non_present_entry_flush)
656{
657 return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
658 non_present_entry_flush);
659}
660
661static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
662 int non_present_entry_flush)
663{
664 return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
665 non_present_entry_flush);
666}
667
668static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
669 u64 addr, unsigned int pages, int non_present_entry_flush)
670{
671 unsigned int mask;
672
673 BUG_ON(addr & (~PAGE_MASK_4K));
674 BUG_ON(pages == 0);
675
676 /* Fallback to domain selective flush if no PSI support */
677 if (!cap_pgsel_inv(iommu->cap))
678 return iommu_flush_iotlb_dsi(iommu, did,
679 non_present_entry_flush);
680
681 /*
682 * PSI requires page size to be 2 ^ x, and the base address is naturally
683 * aligned to the size
684 */
685 mask = ilog2(__roundup_pow_of_two(pages));
686 /* Fallback to domain selective flush if size is too big */
687 if (mask > cap_max_amask_val(iommu->cap))
688 return iommu_flush_iotlb_dsi(iommu, did,
689 non_present_entry_flush);
690
691 return __iommu_flush_iotlb(iommu, did, addr, mask,
692 DMA_TLB_PSI_FLUSH, non_present_entry_flush);
693}
694
695static int iommu_enable_translation(struct intel_iommu *iommu)
696{
697 u32 sts;
698 unsigned long flags;
699
700 spin_lock_irqsave(&iommu->register_lock, flags);
701 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
702
703 /* Make sure hardware complete it */
704 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
705 readl, (sts & DMA_GSTS_TES), sts);
706
707 iommu->gcmd |= DMA_GCMD_TE;
708 spin_unlock_irqrestore(&iommu->register_lock, flags);
709 return 0;
710}
711
712static int iommu_disable_translation(struct intel_iommu *iommu)
713{
714 u32 sts;
715 unsigned long flag;
716
717 spin_lock_irqsave(&iommu->register_lock, flag);
718 iommu->gcmd &= ~DMA_GCMD_TE;
719 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
720
721 /* Make sure hardware complete it */
722 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
723 readl, (!(sts & DMA_GSTS_TES)), sts);
724
725 spin_unlock_irqrestore(&iommu->register_lock, flag);
726 return 0;
727}
728
729/* iommu interrupt handling. Most stuff are MSI-like. */
730
731static char *fault_reason_strings[] =
732{
733 "Software",
734 "Present bit in root entry is clear",
735 "Present bit in context entry is clear",
736 "Invalid context entry",
737 "Access beyond MGAW",
738 "PTE Write access is not set",
739 "PTE Read access is not set",
740 "Next page table ptr is invalid",
741 "Root table address invalid",
742 "Context table ptr is invalid",
743 "non-zero reserved fields in RTP",
744 "non-zero reserved fields in CTP",
745 "non-zero reserved fields in PTE",
746 "Unknown"
747};
748#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings)
749
750char *dmar_get_fault_reason(u8 fault_reason)
751{
752 if (fault_reason > MAX_FAULT_REASON_IDX)
753 return fault_reason_strings[MAX_FAULT_REASON_IDX];
754 else
755 return fault_reason_strings[fault_reason];
756}
757
758void dmar_msi_unmask(unsigned int irq)
759{
760 struct intel_iommu *iommu = get_irq_data(irq);
761 unsigned long flag;
762
763 /* unmask it */
764 spin_lock_irqsave(&iommu->register_lock, flag);
765 writel(0, iommu->reg + DMAR_FECTL_REG);
766 /* Read a reg to force flush the post write */
767 readl(iommu->reg + DMAR_FECTL_REG);
768 spin_unlock_irqrestore(&iommu->register_lock, flag);
769}
770
771void dmar_msi_mask(unsigned int irq)
772{
773 unsigned long flag;
774 struct intel_iommu *iommu = get_irq_data(irq);
775
776 /* mask it */
777 spin_lock_irqsave(&iommu->register_lock, flag);
778 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
779 /* Read a reg to force flush the post write */
780 readl(iommu->reg + DMAR_FECTL_REG);
781 spin_unlock_irqrestore(&iommu->register_lock, flag);
782}
783
784void dmar_msi_write(int irq, struct msi_msg *msg)
785{
786 struct intel_iommu *iommu = get_irq_data(irq);
787 unsigned long flag;
788
789 spin_lock_irqsave(&iommu->register_lock, flag);
790 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
791 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
792 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
793 spin_unlock_irqrestore(&iommu->register_lock, flag);
794}
795
796void dmar_msi_read(int irq, struct msi_msg *msg)
797{
798 struct intel_iommu *iommu = get_irq_data(irq);
799 unsigned long flag;
800
801 spin_lock_irqsave(&iommu->register_lock, flag);
802 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
803 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
804 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
805 spin_unlock_irqrestore(&iommu->register_lock, flag);
806}
807
808static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
809 u8 fault_reason, u16 source_id, u64 addr)
810{
811 char *reason;
812
813 reason = dmar_get_fault_reason(fault_reason);
814
815 printk(KERN_ERR
816 "DMAR:[%s] Request device [%02x:%02x.%d] "
817 "fault addr %llx \n"
818 "DMAR:[fault reason %02d] %s\n",
819 (type ? "DMA Read" : "DMA Write"),
820 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
821 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
822 return 0;
823}
824
825#define PRIMARY_FAULT_REG_LEN (16)
826static irqreturn_t iommu_page_fault(int irq, void *dev_id)
827{
828 struct intel_iommu *iommu = dev_id;
829 int reg, fault_index;
830 u32 fault_status;
831 unsigned long flag;
832
833 spin_lock_irqsave(&iommu->register_lock, flag);
834 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
835
836 /* TBD: ignore advanced fault log currently */
837 if (!(fault_status & DMA_FSTS_PPF))
838 goto clear_overflow;
839
840 fault_index = dma_fsts_fault_record_index(fault_status);
841 reg = cap_fault_reg_offset(iommu->cap);
842 while (1) {
843 u8 fault_reason;
844 u16 source_id;
845 u64 guest_addr;
846 int type;
847 u32 data;
848
849 /* highest 32 bits */
850 data = readl(iommu->reg + reg +
851 fault_index * PRIMARY_FAULT_REG_LEN + 12);
852 if (!(data & DMA_FRCD_F))
853 break;
854
855 fault_reason = dma_frcd_fault_reason(data);
856 type = dma_frcd_type(data);
857
858 data = readl(iommu->reg + reg +
859 fault_index * PRIMARY_FAULT_REG_LEN + 8);
860 source_id = dma_frcd_source_id(data);
861
862 guest_addr = dmar_readq(iommu->reg + reg +
863 fault_index * PRIMARY_FAULT_REG_LEN);
864 guest_addr = dma_frcd_page_addr(guest_addr);
865 /* clear the fault */
866 writel(DMA_FRCD_F, iommu->reg + reg +
867 fault_index * PRIMARY_FAULT_REG_LEN + 12);
868
869 spin_unlock_irqrestore(&iommu->register_lock, flag);
870
871 iommu_page_fault_do_one(iommu, type, fault_reason,
872 source_id, guest_addr);
873
874 fault_index++;
875 if (fault_index > cap_num_fault_regs(iommu->cap))
876 fault_index = 0;
877 spin_lock_irqsave(&iommu->register_lock, flag);
878 }
879clear_overflow:
880 /* clear primary fault overflow */
881 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
882 if (fault_status & DMA_FSTS_PFO)
883 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
884
885 spin_unlock_irqrestore(&iommu->register_lock, flag);
886 return IRQ_HANDLED;
887}
888
889int dmar_set_interrupt(struct intel_iommu *iommu)
890{
891 int irq, ret;
892
893 irq = create_irq();
894 if (!irq) {
895 printk(KERN_ERR "IOMMU: no free vectors\n");
896 return -EINVAL;
897 }
898
899 set_irq_data(irq, iommu);
900 iommu->irq = irq;
901
902 ret = arch_setup_dmar_msi(irq);
903 if (ret) {
904 set_irq_data(irq, NULL);
905 iommu->irq = 0;
906 destroy_irq(irq);
907 return 0;
908 }
909
910 /* Force fault register is cleared */
911 iommu_page_fault(irq, iommu);
912
913 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
914 if (ret)
915 printk(KERN_ERR "IOMMU: can't request irq\n");
916 return ret;
917}
918
919static int iommu_init_domains(struct intel_iommu *iommu)
920{
921 unsigned long ndomains;
922 unsigned long nlongs;
923
924 ndomains = cap_ndoms(iommu->cap);
925 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
926 nlongs = BITS_TO_LONGS(ndomains);
927
928 /* TBD: there might be 64K domains,
929 * consider other allocation for future chip
930 */
931 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
932 if (!iommu->domain_ids) {
933 printk(KERN_ERR "Allocating domain id array failed\n");
934 return -ENOMEM;
935 }
936 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
937 GFP_KERNEL);
938 if (!iommu->domains) {
939 printk(KERN_ERR "Allocating domain array failed\n");
940 kfree(iommu->domain_ids);
941 return -ENOMEM;
942 }
943
944 /*
945 * if Caching mode is set, then invalid translations are tagged
946 * with domainid 0. Hence we need to pre-allocate it.
947 */
948 if (cap_caching_mode(iommu->cap))
949 set_bit(0, iommu->domain_ids);
950 return 0;
951}
952
953static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
954{
955 struct intel_iommu *iommu;
956 int ret;
957 int map_size;
958 u32 ver;
959
960 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
961 if (!iommu)
962 return NULL;
963 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
964 if (!iommu->reg) {
965 printk(KERN_ERR "IOMMU: can't map the region\n");
966 goto error;
967 }
968 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
969 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
970
971 /* the registers might be more than one page */
972 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
973 cap_max_fault_reg_offset(iommu->cap));
974 map_size = PAGE_ALIGN_4K(map_size);
975 if (map_size > PAGE_SIZE_4K) {
976 iounmap(iommu->reg);
977 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
978 if (!iommu->reg) {
979 printk(KERN_ERR "IOMMU: can't map the region\n");
980 goto error;
981 }
982 }
983
984 ver = readl(iommu->reg + DMAR_VER_REG);
985 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
986 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
987 iommu->cap, iommu->ecap);
988 ret = iommu_init_domains(iommu);
989 if (ret)
990 goto error_unmap;
991 spin_lock_init(&iommu->lock);
992 spin_lock_init(&iommu->register_lock);
993
994 drhd->iommu = iommu;
995 return iommu;
996error_unmap:
997 iounmap(iommu->reg);
998 iommu->reg = 0;
999error:
1000 kfree(iommu);
1001 return NULL;
1002}
1003
1004static void domain_exit(struct dmar_domain *domain);
1005static void free_iommu(struct intel_iommu *iommu)
1006{
1007 struct dmar_domain *domain;
1008 int i;
1009
1010 if (!iommu)
1011 return;
1012
1013 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1014 for (; i < cap_ndoms(iommu->cap); ) {
1015 domain = iommu->domains[i];
1016 clear_bit(i, iommu->domain_ids);
1017 domain_exit(domain);
1018 i = find_next_bit(iommu->domain_ids,
1019 cap_ndoms(iommu->cap), i+1);
1020 }
1021
1022 if (iommu->gcmd & DMA_GCMD_TE)
1023 iommu_disable_translation(iommu);
1024
1025 if (iommu->irq) {
1026 set_irq_data(iommu->irq, NULL);
1027 /* This will mask the irq */
1028 free_irq(iommu->irq, iommu);
1029 destroy_irq(iommu->irq);
1030 }
1031
1032 kfree(iommu->domains);
1033 kfree(iommu->domain_ids);
1034
1035 /* free context mapping */
1036 free_context_table(iommu);
1037
1038 if (iommu->reg)
1039 iounmap(iommu->reg);
1040 kfree(iommu);
1041}
1042
1043static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1044{
1045 unsigned long num;
1046 unsigned long ndomains;
1047 struct dmar_domain *domain;
1048 unsigned long flags;
1049
1050 domain = alloc_domain_mem();
1051 if (!domain)
1052 return NULL;
1053
1054 ndomains = cap_ndoms(iommu->cap);
1055
1056 spin_lock_irqsave(&iommu->lock, flags);
1057 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1058 if (num >= ndomains) {
1059 spin_unlock_irqrestore(&iommu->lock, flags);
1060 free_domain_mem(domain);
1061 printk(KERN_ERR "IOMMU: no free domain ids\n");
1062 return NULL;
1063 }
1064
1065 set_bit(num, iommu->domain_ids);
1066 domain->id = num;
1067 domain->iommu = iommu;
1068 iommu->domains[num] = domain;
1069 spin_unlock_irqrestore(&iommu->lock, flags);
1070
1071 return domain;
1072}
1073
1074static void iommu_free_domain(struct dmar_domain *domain)
1075{
1076 unsigned long flags;
1077
1078 spin_lock_irqsave(&domain->iommu->lock, flags);
1079 clear_bit(domain->id, domain->iommu->domain_ids);
1080 spin_unlock_irqrestore(&domain->iommu->lock, flags);
1081}
1082
1083static struct iova_domain reserved_iova_list;
1084
1085static void dmar_init_reserved_ranges(void)
1086{
1087 struct pci_dev *pdev = NULL;
1088 struct iova *iova;
1089 int i;
1090 u64 addr, size;
1091
1092 init_iova_domain(&reserved_iova_list);
1093
1094 /* IOAPIC ranges shouldn't be accessed by DMA */
1095 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1096 IOVA_PFN(IOAPIC_RANGE_END));
1097 if (!iova)
1098 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1099
1100 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1101 for_each_pci_dev(pdev) {
1102 struct resource *r;
1103
1104 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1105 r = &pdev->resource[i];
1106 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1107 continue;
1108 addr = r->start;
1109 addr &= PAGE_MASK_4K;
1110 size = r->end - addr;
1111 size = PAGE_ALIGN_4K(size);
1112 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1113 IOVA_PFN(size + addr) - 1);
1114 if (!iova)
1115 printk(KERN_ERR "Reserve iova failed\n");
1116 }
1117 }
1118
1119}
1120
1121static void domain_reserve_special_ranges(struct dmar_domain *domain)
1122{
1123 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1124}
1125
1126static inline int guestwidth_to_adjustwidth(int gaw)
1127{
1128 int agaw;
1129 int r = (gaw - 12) % 9;
1130
1131 if (r == 0)
1132 agaw = gaw;
1133 else
1134 agaw = gaw + 9 - r;
1135 if (agaw > 64)
1136 agaw = 64;
1137 return agaw;
1138}
1139
1140static int domain_init(struct dmar_domain *domain, int guest_width)
1141{
1142 struct intel_iommu *iommu;
1143 int adjust_width, agaw;
1144 unsigned long sagaw;
1145
1146 init_iova_domain(&domain->iovad);
1147 spin_lock_init(&domain->mapping_lock);
1148
1149 domain_reserve_special_ranges(domain);
1150
1151 /* calculate AGAW */
1152 iommu = domain->iommu;
1153 if (guest_width > cap_mgaw(iommu->cap))
1154 guest_width = cap_mgaw(iommu->cap);
1155 domain->gaw = guest_width;
1156 adjust_width = guestwidth_to_adjustwidth(guest_width);
1157 agaw = width_to_agaw(adjust_width);
1158 sagaw = cap_sagaw(iommu->cap);
1159 if (!test_bit(agaw, &sagaw)) {
1160 /* hardware doesn't support it, choose a bigger one */
1161 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1162 agaw = find_next_bit(&sagaw, 5, agaw);
1163 if (agaw >= 5)
1164 return -ENODEV;
1165 }
1166 domain->agaw = agaw;
1167 INIT_LIST_HEAD(&domain->devices);
1168
1169 /* always allocate the top pgd */
1170 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1171 if (!domain->pgd)
1172 return -ENOMEM;
1173 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);
1174 return 0;
1175}
1176
1177static void domain_exit(struct dmar_domain *domain)
1178{
1179 u64 end;
1180
1181 /* Domain 0 is reserved, so dont process it */
1182 if (!domain)
1183 return;
1184
1185 domain_remove_dev_info(domain);
1186 /* destroy iovas */
1187 put_iova_domain(&domain->iovad);
1188 end = DOMAIN_MAX_ADDR(domain->gaw);
1189 end = end & (~PAGE_MASK_4K);
1190
1191 /* clear ptes */
1192 dma_pte_clear_range(domain, 0, end);
1193
1194 /* free page tables */
1195 dma_pte_free_pagetable(domain, 0, end);
1196
1197 iommu_free_domain(domain);
1198 free_domain_mem(domain);
1199}
1200
1201static int domain_context_mapping_one(struct dmar_domain *domain,
1202 u8 bus, u8 devfn)
1203{
1204 struct context_entry *context;
1205 struct intel_iommu *iommu = domain->iommu;
1206 unsigned long flags;
1207
1208 pr_debug("Set context mapping for %02x:%02x.%d\n",
1209 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1210 BUG_ON(!domain->pgd);
1211 context = device_to_context_entry(iommu, bus, devfn);
1212 if (!context)
1213 return -ENOMEM;
1214 spin_lock_irqsave(&iommu->lock, flags);
1215 if (context_present(*context)) {
1216 spin_unlock_irqrestore(&iommu->lock, flags);
1217 return 0;
1218 }
1219
1220 context_set_domain_id(*context, domain->id);
1221 context_set_address_width(*context, domain->agaw);
1222 context_set_address_root(*context, virt_to_phys(domain->pgd));
1223 context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
1224 context_set_fault_enable(*context);
1225 context_set_present(*context);
1226 __iommu_flush_cache(iommu, context, sizeof(*context));
1227
1228 /* it's a non-present to present mapping */
1229 if (iommu_flush_context_device(iommu, domain->id,
1230 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
1231 iommu_flush_write_buffer(iommu);
1232 else
1233 iommu_flush_iotlb_dsi(iommu, 0, 0);
1234 spin_unlock_irqrestore(&iommu->lock, flags);
1235 return 0;
1236}
1237
1238static int
1239domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1240{
1241 int ret;
1242 struct pci_dev *tmp, *parent;
1243
1244 ret = domain_context_mapping_one(domain, pdev->bus->number,
1245 pdev->devfn);
1246 if (ret)
1247 return ret;
1248
1249 /* dependent device mapping */
1250 tmp = pci_find_upstream_pcie_bridge(pdev);
1251 if (!tmp)
1252 return 0;
1253 /* Secondary interface's bus number and devfn 0 */
1254 parent = pdev->bus->self;
1255 while (parent != tmp) {
1256 ret = domain_context_mapping_one(domain, parent->bus->number,
1257 parent->devfn);
1258 if (ret)
1259 return ret;
1260 parent = parent->bus->self;
1261 }
1262 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1263 return domain_context_mapping_one(domain,
1264 tmp->subordinate->number, 0);
1265 else /* this is a legacy PCI bridge */
1266 return domain_context_mapping_one(domain,
1267 tmp->bus->number, tmp->devfn);
1268}
1269
1270static int domain_context_mapped(struct dmar_domain *domain,
1271 struct pci_dev *pdev)
1272{
1273 int ret;
1274 struct pci_dev *tmp, *parent;
1275
1276 ret = device_context_mapped(domain->iommu,
1277 pdev->bus->number, pdev->devfn);
1278 if (!ret)
1279 return ret;
1280 /* dependent device mapping */
1281 tmp = pci_find_upstream_pcie_bridge(pdev);
1282 if (!tmp)
1283 return ret;
1284 /* Secondary interface's bus number and devfn 0 */
1285 parent = pdev->bus->self;
1286 while (parent != tmp) {
1287 ret = device_context_mapped(domain->iommu, parent->bus->number,
1288 parent->devfn);
1289 if (!ret)
1290 return ret;
1291 parent = parent->bus->self;
1292 }
1293 if (tmp->is_pcie)
1294 return device_context_mapped(domain->iommu,
1295 tmp->subordinate->number, 0);
1296 else
1297 return device_context_mapped(domain->iommu,
1298 tmp->bus->number, tmp->devfn);
1299}
1300
1301static int
1302domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1303 u64 hpa, size_t size, int prot)
1304{
1305 u64 start_pfn, end_pfn;
1306 struct dma_pte *pte;
1307 int index;
1308
1309 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1310 return -EINVAL;
1311 iova &= PAGE_MASK_4K;
1312 start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;
1313 end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;
1314 index = 0;
1315 while (start_pfn < end_pfn) {
1316 pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
1317 if (!pte)
1318 return -ENOMEM;
1319 /* We don't need lock here, nobody else
1320 * touches the iova range
1321 */
1322 BUG_ON(dma_pte_addr(*pte));
1323 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
1324 dma_set_pte_prot(*pte, prot);
1325 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
1326 start_pfn++;
1327 index++;
1328 }
1329 return 0;
1330}
1331
1332static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
1333{
1334 clear_context_table(domain->iommu, bus, devfn);
1335 iommu_flush_context_global(domain->iommu, 0);
1336 iommu_flush_iotlb_global(domain->iommu, 0);
1337}
1338
1339static void domain_remove_dev_info(struct dmar_domain *domain)
1340{
1341 struct device_domain_info *info;
1342 unsigned long flags;
1343
1344 spin_lock_irqsave(&device_domain_lock, flags);
1345 while (!list_empty(&domain->devices)) {
1346 info = list_entry(domain->devices.next,
1347 struct device_domain_info, link);
1348 list_del(&info->link);
1349 list_del(&info->global);
1350 if (info->dev)
1351 info->dev->dev.archdata.iommu = NULL;
1352 spin_unlock_irqrestore(&device_domain_lock, flags);
1353
1354 detach_domain_for_dev(info->domain, info->bus, info->devfn);
1355 free_devinfo_mem(info);
1356
1357 spin_lock_irqsave(&device_domain_lock, flags);
1358 }
1359 spin_unlock_irqrestore(&device_domain_lock, flags);
1360}
1361
1362/*
1363 * find_domain
1364 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1365 */
1366struct dmar_domain *
1367find_domain(struct pci_dev *pdev)
1368{
1369 struct device_domain_info *info;
1370
1371 /* No lock here, assumes no domain exit in normal case */
1372 info = pdev->dev.archdata.iommu;
1373 if (info)
1374 return info->domain;
1375 return NULL;
1376}
1377
1378static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
1379 struct pci_dev *dev)
1380{
1381 int index;
1382
1383 while (dev) {
1384 for (index = 0; index < cnt; index ++)
1385 if (dev == devices[index])
1386 return 1;
1387
1388 /* Check our parent */
1389 dev = dev->bus->self;
1390 }
1391
1392 return 0;
1393}
1394
1395static struct dmar_drhd_unit *
1396dmar_find_matched_drhd_unit(struct pci_dev *dev)
1397{
1398 struct dmar_drhd_unit *drhd = NULL;
1399
1400 list_for_each_entry(drhd, &dmar_drhd_units, list) {
1401 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
1402 drhd->devices_cnt, dev))
1403 return drhd;
1404 }
1405
1406 return NULL;
1407}
1408
1409/* domain is initialized */
1410static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1411{
1412 struct dmar_domain *domain, *found = NULL;
1413 struct intel_iommu *iommu;
1414 struct dmar_drhd_unit *drhd;
1415 struct device_domain_info *info, *tmp;
1416 struct pci_dev *dev_tmp;
1417 unsigned long flags;
1418 int bus = 0, devfn = 0;
1419
1420 domain = find_domain(pdev);
1421 if (domain)
1422 return domain;
1423
1424 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1425 if (dev_tmp) {
1426 if (dev_tmp->is_pcie) {
1427 bus = dev_tmp->subordinate->number;
1428 devfn = 0;
1429 } else {
1430 bus = dev_tmp->bus->number;
1431 devfn = dev_tmp->devfn;
1432 }
1433 spin_lock_irqsave(&device_domain_lock, flags);
1434 list_for_each_entry(info, &device_domain_list, global) {
1435 if (info->bus == bus && info->devfn == devfn) {
1436 found = info->domain;
1437 break;
1438 }
1439 }
1440 spin_unlock_irqrestore(&device_domain_lock, flags);
1441 /* pcie-pci bridge already has a domain, uses it */
1442 if (found) {
1443 domain = found;
1444 goto found_domain;
1445 }
1446 }
1447
1448 /* Allocate new domain for the device */
1449 drhd = dmar_find_matched_drhd_unit(pdev);
1450 if (!drhd) {
1451 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1452 pci_name(pdev));
1453 return NULL;
1454 }
1455 iommu = drhd->iommu;
1456
1457 domain = iommu_alloc_domain(iommu);
1458 if (!domain)
1459 goto error;
1460
1461 if (domain_init(domain, gaw)) {
1462 domain_exit(domain);
1463 goto error;
1464 }
1465
1466 /* register pcie-to-pci device */
1467 if (dev_tmp) {
1468 info = alloc_devinfo_mem();
1469 if (!info) {
1470 domain_exit(domain);
1471 goto error;
1472 }
1473 info->bus = bus;
1474 info->devfn = devfn;
1475 info->dev = NULL;
1476 info->domain = domain;
1477 /* This domain is shared by devices under p2p bridge */
1478 domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES;
1479
1480 /* pcie-to-pci bridge already has a domain, uses it */
1481 found = NULL;
1482 spin_lock_irqsave(&device_domain_lock, flags);
1483 list_for_each_entry(tmp, &device_domain_list, global) {
1484 if (tmp->bus == bus && tmp->devfn == devfn) {
1485 found = tmp->domain;
1486 break;
1487 }
1488 }
1489 if (found) {
1490 free_devinfo_mem(info);
1491 domain_exit(domain);
1492 domain = found;
1493 } else {
1494 list_add(&info->link, &domain->devices);
1495 list_add(&info->global, &device_domain_list);
1496 }
1497 spin_unlock_irqrestore(&device_domain_lock, flags);
1498 }
1499
1500found_domain:
1501 info = alloc_devinfo_mem();
1502 if (!info)
1503 goto error;
1504 info->bus = pdev->bus->number;
1505 info->devfn = pdev->devfn;
1506 info->dev = pdev;
1507 info->domain = domain;
1508 spin_lock_irqsave(&device_domain_lock, flags);
1509 /* somebody is fast */
1510 found = find_domain(pdev);
1511 if (found != NULL) {
1512 spin_unlock_irqrestore(&device_domain_lock, flags);
1513 if (found != domain) {
1514 domain_exit(domain);
1515 domain = found;
1516 }
1517 free_devinfo_mem(info);
1518 return domain;
1519 }
1520 list_add(&info->link, &domain->devices);
1521 list_add(&info->global, &device_domain_list);
1522 pdev->dev.archdata.iommu = info;
1523 spin_unlock_irqrestore(&device_domain_lock, flags);
1524 return domain;
1525error:
1526 /* recheck it here, maybe others set it */
1527 return find_domain(pdev);
1528}
1529
1530static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
1531{
1532 struct dmar_domain *domain;
1533 unsigned long size;
1534 u64 base;
1535 int ret;
1536
1537 printk(KERN_INFO
1538 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1539 pci_name(pdev), start, end);
1540 /* page table init */
1541 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1542 if (!domain)
1543 return -ENOMEM;
1544
1545 /* The address might not be aligned */
1546 base = start & PAGE_MASK_4K;
1547 size = end - base;
1548 size = PAGE_ALIGN_4K(size);
1549 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1550 IOVA_PFN(base + size) - 1)) {
1551 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1552 ret = -ENOMEM;
1553 goto error;
1554 }
1555
1556 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1557 size, base, pci_name(pdev));
1558 /*
1559 * RMRR range might have overlap with physical memory range,
1560 * clear it first
1561 */
1562 dma_pte_clear_range(domain, base, base + size);
1563
1564 ret = domain_page_mapping(domain, base, base, size,
1565 DMA_PTE_READ|DMA_PTE_WRITE);
1566 if (ret)
1567 goto error;
1568
1569 /* context entry init */
1570 ret = domain_context_mapping(domain, pdev);
1571 if (!ret)
1572 return 0;
1573error:
1574 domain_exit(domain);
1575 return ret;
1576
1577}
1578
1579static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1580 struct pci_dev *pdev)
1581{
1582 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1583 return 0;
1584 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1585 rmrr->end_address + 1);
1586}
1587
1588#ifdef CONFIG_DMAR_GFX_WA
1589extern int arch_get_ram_range(int slot, u64 *addr, u64 *size);
1590static void __init iommu_prepare_gfx_mapping(void)
1591{
1592 struct pci_dev *pdev = NULL;
1593 u64 base, size;
1594 int slot;
1595 int ret;
1596
1597 for_each_pci_dev(pdev) {
1598 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
1599 !IS_GFX_DEVICE(pdev))
1600 continue;
1601 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1602 pci_name(pdev));
1603 slot = arch_get_ram_range(0, &base, &size);
1604 while (slot >= 0) {
1605 ret = iommu_prepare_identity_map(pdev,
1606 base, base + size);
1607 if (ret)
1608 goto error;
1609 slot = arch_get_ram_range(slot, &base, &size);
1610 }
1611 continue;
1612error:
1613 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1614 }
1615}
1616#endif
1617
1618#ifdef CONFIG_DMAR_FLOPPY_WA
1619static inline void iommu_prepare_isa(void)
1620{
1621 struct pci_dev *pdev;
1622 int ret;
1623
1624 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1625 if (!pdev)
1626 return;
1627
1628 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1629 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1630
1631 if (ret)
1632 printk("IOMMU: Failed to create 0-64M identity map, "
1633 "floppy might not work\n");
1634
1635}
1636#else
1637static inline void iommu_prepare_isa(void)
1638{
1639 return;
1640}
1641#endif /* !CONFIG_DMAR_FLPY_WA */
1642
1643int __init init_dmars(void)
1644{
1645 struct dmar_drhd_unit *drhd;
1646 struct dmar_rmrr_unit *rmrr;
1647 struct pci_dev *pdev;
1648 struct intel_iommu *iommu;
1649 int ret, unit = 0;
1650
1651 /*
1652 * for each drhd
1653 * allocate root
1654 * initialize and program root entry to not present
1655 * endfor
1656 */
1657 for_each_drhd_unit(drhd) {
1658 if (drhd->ignored)
1659 continue;
1660 iommu = alloc_iommu(drhd);
1661 if (!iommu) {
1662 ret = -ENOMEM;
1663 goto error;
1664 }
1665
1666 /*
1667 * TBD:
1668 * we could share the same root & context tables
1669 * amoung all IOMMU's. Need to Split it later.
1670 */
1671 ret = iommu_alloc_root_entry(iommu);
1672 if (ret) {
1673 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1674 goto error;
1675 }
1676 }
1677
1678 /*
1679 * For each rmrr
1680 * for each dev attached to rmrr
1681 * do
1682 * locate drhd for dev, alloc domain for dev
1683 * allocate free domain
1684 * allocate page table entries for rmrr
1685 * if context not allocated for bus
1686 * allocate and init context
1687 * set present in root table for this bus
1688 * init context with domain, translation etc
1689 * endfor
1690 * endfor
1691 */
1692 for_each_rmrr_units(rmrr) {
1693 int i;
1694 for (i = 0; i < rmrr->devices_cnt; i++) {
1695 pdev = rmrr->devices[i];
1696 /* some BIOS lists non-exist devices in DMAR table */
1697 if (!pdev)
1698 continue;
1699 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
1700 if (ret)
1701 printk(KERN_ERR
1702 "IOMMU: mapping reserved region failed\n");
1703 }
1704 }
1705
1706 iommu_prepare_gfx_mapping();
1707
1708 iommu_prepare_isa();
1709
1710 /*
1711 * for each drhd
1712 * enable fault log
1713 * global invalidate context cache
1714 * global invalidate iotlb
1715 * enable translation
1716 */
1717 for_each_drhd_unit(drhd) {
1718 if (drhd->ignored)
1719 continue;
1720 iommu = drhd->iommu;
1721 sprintf (iommu->name, "dmar%d", unit++);
1722
1723 iommu_flush_write_buffer(iommu);
1724
1725 ret = dmar_set_interrupt(iommu);
1726 if (ret)
1727 goto error;
1728
1729 iommu_set_root_entry(iommu);
1730
1731 iommu_flush_context_global(iommu, 0);
1732 iommu_flush_iotlb_global(iommu, 0);
1733
1734 ret = iommu_enable_translation(iommu);
1735 if (ret)
1736 goto error;
1737 }
1738
1739 return 0;
1740error:
1741 for_each_drhd_unit(drhd) {
1742 if (drhd->ignored)
1743 continue;
1744 iommu = drhd->iommu;
1745 free_iommu(iommu);
1746 }
1747 return ret;
1748}
1749
1750static inline u64 aligned_size(u64 host_addr, size_t size)
1751{
1752 u64 addr;
1753 addr = (host_addr & (~PAGE_MASK_4K)) + size;
1754 return PAGE_ALIGN_4K(addr);
1755}
1756
1757struct iova *
1758iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1759{
1760 struct iova *piova;
1761
1762 /* Make sure it's in range */
1763 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
1764 if (!size || (IOVA_START_ADDR + size > end))
1765 return NULL;
1766
1767 piova = alloc_iova(&domain->iovad,
1768 size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
1769 return piova;
1770}
1771
1772static struct iova *
1773__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1774 size_t size)
1775{
1776 struct pci_dev *pdev = to_pci_dev(dev);
1777 struct iova *iova = NULL;
1778
1779 if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
1780 iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
1781 } else {
1782 /*
1783 * First try to allocate an io virtual address in
1784 * DMA_32BIT_MASK and if that fails then try allocating
1785 * from higer range
1786 */
1787 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
1788 if (!iova)
1789 iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
1790 }
1791
1792 if (!iova) {
1793 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
1794 return NULL;
1795 }
1796
1797 return iova;
1798}
1799
1800static struct dmar_domain *
1801get_valid_domain_for_dev(struct pci_dev *pdev)
1802{
1803 struct dmar_domain *domain;
1804 int ret;
1805
1806 domain = get_domain_for_dev(pdev,
1807 DEFAULT_DOMAIN_ADDRESS_WIDTH);
1808 if (!domain) {
1809 printk(KERN_ERR
1810 "Allocating domain for %s failed", pci_name(pdev));
1811 return 0;
1812 }
1813
1814 /* make sure context mapping is ok */
1815 if (unlikely(!domain_context_mapped(domain, pdev))) {
1816 ret = domain_context_mapping(domain, pdev);
1817 if (ret) {
1818 printk(KERN_ERR
1819 "Domain context map for %s failed",
1820 pci_name(pdev));
1821 return 0;
1822 }
1823 }
1824
1825 return domain;
1826}
1827
1828static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
1829 size_t size, int dir)
1830{
1831 struct pci_dev *pdev = to_pci_dev(hwdev);
1832 int ret;
1833 struct dmar_domain *domain;
1834 unsigned long start_addr;
1835 struct iova *iova;
1836 int prot = 0;
1837
1838 BUG_ON(dir == DMA_NONE);
1839 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1840 return virt_to_bus(addr);
1841
1842 domain = get_valid_domain_for_dev(pdev);
1843 if (!domain)
1844 return 0;
1845
1846 addr = (void *)virt_to_phys(addr);
1847 size = aligned_size((u64)addr, size);
1848
1849 iova = __intel_alloc_iova(hwdev, domain, size);
1850 if (!iova)
1851 goto error;
1852
1853 start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
1854
1855 /*
1856 * Check if DMAR supports zero-length reads on write only
1857 * mappings..
1858 */
1859 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
1860 !cap_zlr(domain->iommu->cap))
1861 prot |= DMA_PTE_READ;
1862 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1863 prot |= DMA_PTE_WRITE;
1864 /*
1865 * addr - (addr + size) might be partial page, we should map the whole
1866 * page. Note: if two part of one page are separately mapped, we
1867 * might have two guest_addr mapping to the same host addr, but this
1868 * is not a big problem
1869 */
1870 ret = domain_page_mapping(domain, start_addr,
1871 ((u64)addr) & PAGE_MASK_4K, size, prot);
1872 if (ret)
1873 goto error;
1874
1875 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1876 pci_name(pdev), size, (u64)addr,
1877 size, (u64)start_addr, dir);
1878
1879 /* it's a non-present to present mapping */
1880 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1881 start_addr, size >> PAGE_SHIFT_4K, 1);
1882 if (ret)
1883 iommu_flush_write_buffer(domain->iommu);
1884
1885 return (start_addr + ((u64)addr & (~PAGE_MASK_4K)));
1886
1887error:
1888 if (iova)
1889 __free_iova(&domain->iovad, iova);
1890 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1891 pci_name(pdev), size, (u64)addr, dir);
1892 return 0;
1893}
1894
1895static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1896 size_t size, int dir)
1897{
1898 struct pci_dev *pdev = to_pci_dev(dev);
1899 struct dmar_domain *domain;
1900 unsigned long start_addr;
1901 struct iova *iova;
1902
1903 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1904 return;
1905 domain = find_domain(pdev);
1906 BUG_ON(!domain);
1907
1908 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
1909 if (!iova)
1910 return;
1911
1912 start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
1913 size = aligned_size((u64)dev_addr, size);
1914
1915 pr_debug("Device %s unmapping: %lx@%llx\n",
1916 pci_name(pdev), size, (u64)start_addr);
1917
1918 /* clear the whole page */
1919 dma_pte_clear_range(domain, start_addr, start_addr + size);
1920 /* free page tables */
1921 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1922
1923 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
1924 size >> PAGE_SHIFT_4K, 0))
1925 iommu_flush_write_buffer(domain->iommu);
1926
1927 /* free iova */
1928 __free_iova(&domain->iovad, iova);
1929}
1930
1931static void * intel_alloc_coherent(struct device *hwdev, size_t size,
1932 dma_addr_t *dma_handle, gfp_t flags)
1933{
1934 void *vaddr;
1935 int order;
1936
1937 size = PAGE_ALIGN_4K(size);
1938 order = get_order(size);
1939 flags &= ~(GFP_DMA | GFP_DMA32);
1940
1941 vaddr = (void *)__get_free_pages(flags, order);
1942 if (!vaddr)
1943 return NULL;
1944 memset(vaddr, 0, size);
1945
1946 *dma_handle = intel_map_single(hwdev, vaddr, size, DMA_BIDIRECTIONAL);
1947 if (*dma_handle)
1948 return vaddr;
1949 free_pages((unsigned long)vaddr, order);
1950 return NULL;
1951}
1952
1953static void intel_free_coherent(struct device *hwdev, size_t size,
1954 void *vaddr, dma_addr_t dma_handle)
1955{
1956 int order;
1957
1958 size = PAGE_ALIGN_4K(size);
1959 order = get_order(size);
1960
1961 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
1962 free_pages((unsigned long)vaddr, order);
1963}
1964
1965#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
1966static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
1967 int nelems, int dir)
1968{
1969 int i;
1970 struct pci_dev *pdev = to_pci_dev(hwdev);
1971 struct dmar_domain *domain;
1972 unsigned long start_addr;
1973 struct iova *iova;
1974 size_t size = 0;
1975 void *addr;
1976 struct scatterlist *sg;
1977
1978 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1979 return;
1980
1981 domain = find_domain(pdev);
1982
1983 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
1984 if (!iova)
1985 return;
1986 for_each_sg(sglist, sg, nelems, i) {
1987 addr = SG_ENT_VIRT_ADDRESS(sg);
1988 size += aligned_size((u64)addr, sg->length);
1989 }
1990
1991 start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
1992
1993 /* clear the whole page */
1994 dma_pte_clear_range(domain, start_addr, start_addr + size);
1995 /* free page tables */
1996 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1997
1998 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
1999 size >> PAGE_SHIFT_4K, 0))
2000 iommu_flush_write_buffer(domain->iommu);
2001
2002 /* free iova */
2003 __free_iova(&domain->iovad, iova);
2004}
2005
2006static int intel_nontranslate_map_sg(struct device *hddev,
2007 struct scatterlist *sglist, int nelems, int dir)
2008{
2009 int i;
2010 struct scatterlist *sg;
2011
2012 for_each_sg(sglist, sg, nelems, i) {
2013 BUG_ON(!sg_page(sg));
2014 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
2015 sg->dma_length = sg->length;
2016 }
2017 return nelems;
2018}
2019
2020static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2021 int nelems, int dir)
2022{
2023 void *addr;
2024 int i;
2025 struct pci_dev *pdev = to_pci_dev(hwdev);
2026 struct dmar_domain *domain;
2027 size_t size = 0;
2028 int prot = 0;
2029 size_t offset = 0;
2030 struct iova *iova = NULL;
2031 int ret;
2032 struct scatterlist *sg;
2033 unsigned long start_addr;
2034
2035 BUG_ON(dir == DMA_NONE);
2036 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2037 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2038
2039 domain = get_valid_domain_for_dev(pdev);
2040 if (!domain)
2041 return 0;
2042
2043 for_each_sg(sglist, sg, nelems, i) {
2044 addr = SG_ENT_VIRT_ADDRESS(sg);
2045 addr = (void *)virt_to_phys(addr);
2046 size += aligned_size((u64)addr, sg->length);
2047 }
2048
2049 iova = __intel_alloc_iova(hwdev, domain, size);
2050 if (!iova) {
2051 sglist->dma_length = 0;
2052 return 0;
2053 }
2054
2055 /*
2056 * Check if DMAR supports zero-length reads on write only
2057 * mappings..
2058 */
2059 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2060 !cap_zlr(domain->iommu->cap))
2061 prot |= DMA_PTE_READ;
2062 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2063 prot |= DMA_PTE_WRITE;
2064
2065 start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
2066 offset = 0;
2067 for_each_sg(sglist, sg, nelems, i) {
2068 addr = SG_ENT_VIRT_ADDRESS(sg);
2069 addr = (void *)virt_to_phys(addr);
2070 size = aligned_size((u64)addr, sg->length);
2071 ret = domain_page_mapping(domain, start_addr + offset,
2072 ((u64)addr) & PAGE_MASK_4K,
2073 size, prot);
2074 if (ret) {
2075 /* clear the page */
2076 dma_pte_clear_range(domain, start_addr,
2077 start_addr + offset);
2078 /* free page tables */
2079 dma_pte_free_pagetable(domain, start_addr,
2080 start_addr + offset);
2081 /* free iova */
2082 __free_iova(&domain->iovad, iova);
2083 return 0;
2084 }
2085 sg->dma_address = start_addr + offset +
2086 ((u64)addr & (~PAGE_MASK_4K));
2087 sg->dma_length = sg->length;
2088 offset += size;
2089 }
2090
2091 /* it's a non-present to present mapping */
2092 if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
2093 start_addr, offset >> PAGE_SHIFT_4K, 1))
2094 iommu_flush_write_buffer(domain->iommu);
2095 return nelems;
2096}
2097
2098static struct dma_mapping_ops intel_dma_ops = {
2099 .alloc_coherent = intel_alloc_coherent,
2100 .free_coherent = intel_free_coherent,
2101 .map_single = intel_map_single,
2102 .unmap_single = intel_unmap_single,
2103 .map_sg = intel_map_sg,
2104 .unmap_sg = intel_unmap_sg,
2105};
2106
2107static inline int iommu_domain_cache_init(void)
2108{
2109 int ret = 0;
2110
2111 iommu_domain_cache = kmem_cache_create("iommu_domain",
2112 sizeof(struct dmar_domain),
2113 0,
2114 SLAB_HWCACHE_ALIGN,
2115
2116 NULL);
2117 if (!iommu_domain_cache) {
2118 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2119 ret = -ENOMEM;
2120 }
2121
2122 return ret;
2123}
2124
2125static inline int iommu_devinfo_cache_init(void)
2126{
2127 int ret = 0;
2128
2129 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2130 sizeof(struct device_domain_info),
2131 0,
2132 SLAB_HWCACHE_ALIGN,
2133
2134 NULL);
2135 if (!iommu_devinfo_cache) {
2136 printk(KERN_ERR "Couldn't create devinfo cache\n");
2137 ret = -ENOMEM;
2138 }
2139
2140 return ret;
2141}
2142
2143static inline int iommu_iova_cache_init(void)
2144{
2145 int ret = 0;
2146
2147 iommu_iova_cache = kmem_cache_create("iommu_iova",
2148 sizeof(struct iova),
2149 0,
2150 SLAB_HWCACHE_ALIGN,
2151
2152 NULL);
2153 if (!iommu_iova_cache) {
2154 printk(KERN_ERR "Couldn't create iova cache\n");
2155 ret = -ENOMEM;
2156 }
2157
2158 return ret;
2159}
2160
2161static int __init iommu_init_mempool(void)
2162{
2163 int ret;
2164 ret = iommu_iova_cache_init();
2165 if (ret)
2166 return ret;
2167
2168 ret = iommu_domain_cache_init();
2169 if (ret)
2170 goto domain_error;
2171
2172 ret = iommu_devinfo_cache_init();
2173 if (!ret)
2174 return ret;
2175
2176 kmem_cache_destroy(iommu_domain_cache);
2177domain_error:
2178 kmem_cache_destroy(iommu_iova_cache);
2179
2180 return -ENOMEM;
2181}
2182
2183static void __init iommu_exit_mempool(void)
2184{
2185 kmem_cache_destroy(iommu_devinfo_cache);
2186 kmem_cache_destroy(iommu_domain_cache);
2187 kmem_cache_destroy(iommu_iova_cache);
2188
2189}
2190
2191void __init detect_intel_iommu(void)
2192{
2193 if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
2194 return;
2195 if (early_dmar_detect()) {
2196 iommu_detected = 1;
2197 }
2198}
2199
2200static void __init init_no_remapping_devices(void)
2201{
2202 struct dmar_drhd_unit *drhd;
2203
2204 for_each_drhd_unit(drhd) {
2205 if (!drhd->include_all) {
2206 int i;
2207 for (i = 0; i < drhd->devices_cnt; i++)
2208 if (drhd->devices[i] != NULL)
2209 break;
2210 /* ignore DMAR unit if no pci devices exist */
2211 if (i == drhd->devices_cnt)
2212 drhd->ignored = 1;
2213 }
2214 }
2215
2216 if (dmar_map_gfx)
2217 return;
2218
2219 for_each_drhd_unit(drhd) {
2220 int i;
2221 if (drhd->ignored || drhd->include_all)
2222 continue;
2223
2224 for (i = 0; i < drhd->devices_cnt; i++)
2225 if (drhd->devices[i] &&
2226 !IS_GFX_DEVICE(drhd->devices[i]))
2227 break;
2228
2229 if (i < drhd->devices_cnt)
2230 continue;
2231
2232 /* bypass IOMMU if it is just for gfx devices */
2233 drhd->ignored = 1;
2234 for (i = 0; i < drhd->devices_cnt; i++) {
2235 if (!drhd->devices[i])
2236 continue;
2237 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2238 }
2239 }
2240}
2241
2242int __init intel_iommu_init(void)
2243{
2244 int ret = 0;
2245
2246 if (no_iommu || swiotlb || dmar_disabled)
2247 return -ENODEV;
2248
2249 if (dmar_table_init())
2250 return -ENODEV;
2251
2252 iommu_init_mempool();
2253 dmar_init_reserved_ranges();
2254
2255 init_no_remapping_devices();
2256
2257 ret = init_dmars();
2258 if (ret) {
2259 printk(KERN_ERR "IOMMU: dmar init failed\n");
2260 put_iova_domain(&reserved_iova_list);
2261 iommu_exit_mempool();
2262 return ret;
2263 }
2264 printk(KERN_INFO
2265 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2266
2267 force_iommu = 1;
2268 dma_ops = &intel_dma_ops;
2269 return 0;
2270}
2271
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
new file mode 100644
index 000000000000..ee88dd2400cb
--- /dev/null
+++ b/drivers/pci/intel-iommu.h
@@ -0,0 +1,325 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
19 */
20
21#ifndef _INTEL_IOMMU_H_
22#define _INTEL_IOMMU_H_
23
24#include <linux/types.h>
25#include <linux/msi.h>
26#include "iova.h"
27#include <linux/io.h>
28
29/*
30 * Intel IOMMU register specification per version 1.0 public spec.
31 */
32
33#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
34#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
35#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
36#define DMAR_GCMD_REG 0x18 /* Global command register */
37#define DMAR_GSTS_REG 0x1c /* Global status register */
38#define DMAR_RTADDR_REG 0x20 /* Root entry table */
39#define DMAR_CCMD_REG 0x28 /* Context command reg */
40#define DMAR_FSTS_REG 0x34 /* Fault Status register */
41#define DMAR_FECTL_REG 0x38 /* Fault control register */
42#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
43#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
44#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
45#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
46#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
47#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
48#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
49#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
50#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
51
52#define OFFSET_STRIDE (9)
53/*
54#define dmar_readl(dmar, reg) readl(dmar + reg)
55#define dmar_readq(dmar, reg) ({ \
56 u32 lo, hi; \
57 lo = readl(dmar + reg); \
58 hi = readl(dmar + reg + 4); \
59 (((u64) hi) << 32) + lo; })
60*/
61static inline u64 dmar_readq(void *addr)
62{
63 u32 lo, hi;
64 lo = readl(addr);
65 hi = readl(addr + 4);
66 return (((u64) hi) << 32) + lo;
67}
68
69static inline void dmar_writeq(void __iomem *addr, u64 val)
70{
71 writel((u32)val, addr);
72 writel((u32)(val >> 32), addr + 4);
73}
74
75#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
76#define DMAR_VER_MINOR(v) ((v) & 0x0f)
77
78/*
79 * Decoding Capability Register
80 */
81#define cap_read_drain(c) (((c) >> 55) & 1)
82#define cap_write_drain(c) (((c) >> 54) & 1)
83#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
84#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
85#define cap_pgsel_inv(c) (((c) >> 39) & 1)
86
87#define cap_super_page_val(c) (((c) >> 34) & 0xf)
88#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
89 * OFFSET_STRIDE) + 21)
90
91#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
92#define cap_max_fault_reg_offset(c) \
93 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
94
95#define cap_zlr(c) (((c) >> 22) & 1)
96#define cap_isoch(c) (((c) >> 23) & 1)
97#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
98#define cap_sagaw(c) (((c) >> 8) & 0x1f)
99#define cap_caching_mode(c) (((c) >> 7) & 1)
100#define cap_phmr(c) (((c) >> 6) & 1)
101#define cap_plmr(c) (((c) >> 5) & 1)
102#define cap_rwbf(c) (((c) >> 4) & 1)
103#define cap_afl(c) (((c) >> 3) & 1)
104#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
105/*
106 * Extended Capability Register
107 */
108
109#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
110#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
111#define ecap_max_iotlb_offset(e) \
112 (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
113#define ecap_coherent(e) ((e) & 0x1)
114
115
116/* IOTLB_REG */
117#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
118#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
119#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
120#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
121#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
122#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
123#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
124#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
125#define DMA_TLB_IVT (((u64)1) << 63)
126#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
127#define DMA_TLB_MAX_SIZE (0x3f)
128
129/* GCMD_REG */
130#define DMA_GCMD_TE (((u32)1) << 31)
131#define DMA_GCMD_SRTP (((u32)1) << 30)
132#define DMA_GCMD_SFL (((u32)1) << 29)
133#define DMA_GCMD_EAFL (((u32)1) << 28)
134#define DMA_GCMD_WBF (((u32)1) << 27)
135
136/* GSTS_REG */
137#define DMA_GSTS_TES (((u32)1) << 31)
138#define DMA_GSTS_RTPS (((u32)1) << 30)
139#define DMA_GSTS_FLS (((u32)1) << 29)
140#define DMA_GSTS_AFLS (((u32)1) << 28)
141#define DMA_GSTS_WBFS (((u32)1) << 27)
142
143/* CCMD_REG */
144#define DMA_CCMD_ICC (((u64)1) << 63)
145#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
146#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
147#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
148#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
149#define DMA_CCMD_MASK_NOBIT 0
150#define DMA_CCMD_MASK_1BIT 1
151#define DMA_CCMD_MASK_2BIT 2
152#define DMA_CCMD_MASK_3BIT 3
153#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
154#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
155
156/* FECTL_REG */
157#define DMA_FECTL_IM (((u32)1) << 31)
158
159/* FSTS_REG */
160#define DMA_FSTS_PPF ((u32)2)
161#define DMA_FSTS_PFO ((u32)1)
162#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
163
164/* FRCD_REG, 32 bits access */
165#define DMA_FRCD_F (((u32)1) << 31)
166#define dma_frcd_type(d) ((d >> 30) & 1)
167#define dma_frcd_fault_reason(c) (c & 0xff)
168#define dma_frcd_source_id(c) (c & 0xffff)
169#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
170
171/*
172 * 0: Present
173 * 1-11: Reserved
174 * 12-63: Context Ptr (12 - (haw-1))
175 * 64-127: Reserved
176 */
177struct root_entry {
178 u64 val;
179 u64 rsvd1;
180};
181#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
182static inline bool root_present(struct root_entry *root)
183{
184 return (root->val & 1);
185}
186static inline void set_root_present(struct root_entry *root)
187{
188 root->val |= 1;
189}
190static inline void set_root_value(struct root_entry *root, unsigned long value)
191{
192 root->val |= value & PAGE_MASK_4K;
193}
194
195struct context_entry;
196static inline struct context_entry *
197get_context_addr_from_root(struct root_entry *root)
198{
199 return (struct context_entry *)
200 (root_present(root)?phys_to_virt(
201 root->val & PAGE_MASK_4K):
202 NULL);
203}
204
205/*
206 * low 64 bits:
207 * 0: present
208 * 1: fault processing disable
209 * 2-3: translation type
210 * 12-63: address space root
211 * high 64 bits:
212 * 0-2: address width
213 * 3-6: aval
214 * 8-23: domain id
215 */
216struct context_entry {
217 u64 lo;
218 u64 hi;
219};
220#define context_present(c) ((c).lo & 1)
221#define context_fault_disable(c) (((c).lo >> 1) & 1)
222#define context_translation_type(c) (((c).lo >> 2) & 3)
223#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
224#define context_address_width(c) ((c).hi & 7)
225#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
226
227#define context_set_present(c) do {(c).lo |= 1;} while (0)
228#define context_set_fault_enable(c) \
229 do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
230#define context_set_translation_type(c, val) \
231 do { \
232 (c).lo &= (((u64)-1) << 4) | 3; \
233 (c).lo |= ((val) & 3) << 2; \
234 } while (0)
235#define CONTEXT_TT_MULTI_LEVEL 0
236#define context_set_address_root(c, val) \
237 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
238#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
239#define context_set_domain_id(c, val) \
240 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
241#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
242
243/*
244 * 0: readable
245 * 1: writable
246 * 2-6: reserved
247 * 7: super page
248 * 8-11: available
249 * 12-63: Host physcial address
250 */
251struct dma_pte {
252 u64 val;
253};
254#define dma_clear_pte(p) do {(p).val = 0;} while (0)
255
256#define DMA_PTE_READ (1)
257#define DMA_PTE_WRITE (2)
258
259#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
260#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
261#define dma_set_pte_prot(p, prot) \
262 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
263#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
264#define dma_set_pte_addr(p, addr) do {\
265 (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
266#define dma_pte_present(p) (((p).val & 3) != 0)
267
268struct intel_iommu;
269
270struct dmar_domain {
271 int id; /* domain id */
272 struct intel_iommu *iommu; /* back pointer to owning iommu */
273
274 struct list_head devices; /* all devices' list */
275 struct iova_domain iovad; /* iova's that belong to this domain */
276
277 struct dma_pte *pgd; /* virtual address */
278 spinlock_t mapping_lock; /* page table lock */
279 int gaw; /* max guest address width */
280
281 /* adjusted guest address width, 0 is level 2 30-bit */
282 int agaw;
283
284#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
285 int flags;
286};
287
288/* PCI domain-device relationship */
289struct device_domain_info {
290 struct list_head link; /* link to domain siblings */
291 struct list_head global; /* link to global list */
292 u8 bus; /* PCI bus numer */
293 u8 devfn; /* PCI devfn number */
294 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
295 struct dmar_domain *domain; /* pointer to domain */
296};
297
298extern int init_dmars(void);
299
300struct intel_iommu {
301 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
302 u64 cap;
303 u64 ecap;
304 unsigned long *domain_ids; /* bitmap of domains */
305 struct dmar_domain **domains; /* ptr to domains */
306 int seg;
307 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
308 spinlock_t lock; /* protect context, domain ids */
309 spinlock_t register_lock; /* protect register handling */
310 struct root_entry *root_entry; /* virtual address */
311
312 unsigned int irq;
313 unsigned char name[7]; /* Device Name */
314 struct msi_msg saved_msg;
315 struct sys_device sysdev;
316};
317
318#ifndef CONFIG_DMAR_GFX_WA
319static inline void iommu_prepare_gfx_mapping(void)
320{
321 return;
322}
323#endif /* !CONFIG_DMAR_GFX_WA */
324
325#endif
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
new file mode 100644
index 000000000000..a84571c29360
--- /dev/null
+++ b/drivers/pci/iova.c
@@ -0,0 +1,394 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 */
8
9#include "iova.h"
10
11void
12init_iova_domain(struct iova_domain *iovad)
13{
14 spin_lock_init(&iovad->iova_alloc_lock);
15 spin_lock_init(&iovad->iova_rbtree_lock);
16 iovad->rbroot = RB_ROOT;
17 iovad->cached32_node = NULL;
18
19}
20
21static struct rb_node *
22__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
23{
24 if ((*limit_pfn != DMA_32BIT_PFN) ||
25 (iovad->cached32_node == NULL))
26 return rb_last(&iovad->rbroot);
27 else {
28 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
29 struct iova *curr_iova =
30 container_of(iovad->cached32_node, struct iova, node);
31 *limit_pfn = curr_iova->pfn_lo - 1;
32 return prev_node;
33 }
34}
35
36static void
37__cached_rbnode_insert_update(struct iova_domain *iovad,
38 unsigned long limit_pfn, struct iova *new)
39{
40 if (limit_pfn != DMA_32BIT_PFN)
41 return;
42 iovad->cached32_node = &new->node;
43}
44
45static void
46__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
47{
48 struct iova *cached_iova;
49 struct rb_node *curr;
50
51 if (!iovad->cached32_node)
52 return;
53 curr = iovad->cached32_node;
54 cached_iova = container_of(curr, struct iova, node);
55
56 if (free->pfn_lo >= cached_iova->pfn_lo)
57 iovad->cached32_node = rb_next(&free->node);
58}
59
60/* Computes the padding size required, to make the
61 * the start address naturally aligned on its size
62 */
63static int
64iova_get_pad_size(int size, unsigned int limit_pfn)
65{
66 unsigned int pad_size = 0;
67 unsigned int order = ilog2(size);
68
69 if (order)
70 pad_size = (limit_pfn + 1) % (1 << order);
71
72 return pad_size;
73}
74
75static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
76 unsigned long limit_pfn, struct iova *new, bool size_aligned)
77{
78 struct rb_node *curr = NULL;
79 unsigned long flags;
80 unsigned long saved_pfn;
81 unsigned int pad_size = 0;
82
83 /* Walk the tree backwards */
84 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
85 saved_pfn = limit_pfn;
86 curr = __get_cached_rbnode(iovad, &limit_pfn);
87 while (curr) {
88 struct iova *curr_iova = container_of(curr, struct iova, node);
89 if (limit_pfn < curr_iova->pfn_lo)
90 goto move_left;
91 else if (limit_pfn < curr_iova->pfn_hi)
92 goto adjust_limit_pfn;
93 else {
94 if (size_aligned)
95 pad_size = iova_get_pad_size(size, limit_pfn);
96 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
97 break; /* found a free slot */
98 }
99adjust_limit_pfn:
100 limit_pfn = curr_iova->pfn_lo - 1;
101move_left:
102 curr = rb_prev(curr);
103 }
104
105 if (!curr) {
106 if (size_aligned)
107 pad_size = iova_get_pad_size(size, limit_pfn);
108 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
109 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
110 return -ENOMEM;
111 }
112 }
113
114 /* pfn_lo will point to size aligned address if size_aligned is set */
115 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
116 new->pfn_hi = new->pfn_lo + size - 1;
117
118 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
119 return 0;
120}
121
122static void
123iova_insert_rbtree(struct rb_root *root, struct iova *iova)
124{
125 struct rb_node **new = &(root->rb_node), *parent = NULL;
126 /* Figure out where to put new node */
127 while (*new) {
128 struct iova *this = container_of(*new, struct iova, node);
129 parent = *new;
130
131 if (iova->pfn_lo < this->pfn_lo)
132 new = &((*new)->rb_left);
133 else if (iova->pfn_lo > this->pfn_lo)
134 new = &((*new)->rb_right);
135 else
136 BUG(); /* this should not happen */
137 }
138 /* Add new node and rebalance tree. */
139 rb_link_node(&iova->node, parent, new);
140 rb_insert_color(&iova->node, root);
141}
142
143/**
144 * alloc_iova - allocates an iova
145 * @iovad - iova domain in question
146 * @size - size of page frames to allocate
147 * @limit_pfn - max limit address
148 * @size_aligned - set if size_aligned address range is required
149 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
150 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
151 * flag is set then the allocated address iova->pfn_lo will be naturally
152 * aligned on roundup_power_of_two(size).
153 */
154struct iova *
155alloc_iova(struct iova_domain *iovad, unsigned long size,
156 unsigned long limit_pfn,
157 bool size_aligned)
158{
159 unsigned long flags;
160 struct iova *new_iova;
161 int ret;
162
163 new_iova = alloc_iova_mem();
164 if (!new_iova)
165 return NULL;
166
167 /* If size aligned is set then round the size to
168 * to next power of two.
169 */
170 if (size_aligned)
171 size = __roundup_pow_of_two(size);
172
173 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
174 ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
175 size_aligned);
176
177 if (ret) {
178 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
179 free_iova_mem(new_iova);
180 return NULL;
181 }
182
183 /* Insert the new_iova into domain rbtree by holding writer lock */
184 spin_lock(&iovad->iova_rbtree_lock);
185 iova_insert_rbtree(&iovad->rbroot, new_iova);
186 __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
187 spin_unlock(&iovad->iova_rbtree_lock);
188
189 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
190
191 return new_iova;
192}
193
194/**
195 * find_iova - find's an iova for a given pfn
196 * @iovad - iova domain in question.
197 * pfn - page frame number
198 * This function finds and returns an iova belonging to the
199 * given doamin which matches the given pfn.
200 */
201struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
202{
203 unsigned long flags;
204 struct rb_node *node;
205
206 /* Take the lock so that no other thread is manipulating the rbtree */
207 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
208 node = iovad->rbroot.rb_node;
209 while (node) {
210 struct iova *iova = container_of(node, struct iova, node);
211
212 /* If pfn falls within iova's range, return iova */
213 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
214 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
215 /* We are not holding the lock while this iova
216 * is referenced by the caller as the same thread
217 * which called this function also calls __free_iova()
218 * and it is by desing that only one thread can possibly
219 * reference a particular iova and hence no conflict.
220 */
221 return iova;
222 }
223
224 if (pfn < iova->pfn_lo)
225 node = node->rb_left;
226 else if (pfn > iova->pfn_lo)
227 node = node->rb_right;
228 }
229
230 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
231 return NULL;
232}
233
234/**
235 * __free_iova - frees the given iova
236 * @iovad: iova domain in question.
237 * @iova: iova in question.
238 * Frees the given iova belonging to the giving domain
239 */
240void
241__free_iova(struct iova_domain *iovad, struct iova *iova)
242{
243 unsigned long flags;
244
245 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
246 __cached_rbnode_delete_update(iovad, iova);
247 rb_erase(&iova->node, &iovad->rbroot);
248 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
249 free_iova_mem(iova);
250}
251
252/**
253 * free_iova - finds and frees the iova for a given pfn
254 * @iovad: - iova domain in question.
255 * @pfn: - pfn that is allocated previously
256 * This functions finds an iova for a given pfn and then
257 * frees the iova from that domain.
258 */
259void
260free_iova(struct iova_domain *iovad, unsigned long pfn)
261{
262 struct iova *iova = find_iova(iovad, pfn);
263 if (iova)
264 __free_iova(iovad, iova);
265
266}
267
268/**
269 * put_iova_domain - destroys the iova doamin
270 * @iovad: - iova domain in question.
271 * All the iova's in that domain are destroyed.
272 */
273void put_iova_domain(struct iova_domain *iovad)
274{
275 struct rb_node *node;
276 unsigned long flags;
277
278 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
279 node = rb_first(&iovad->rbroot);
280 while (node) {
281 struct iova *iova = container_of(node, struct iova, node);
282 rb_erase(node, &iovad->rbroot);
283 free_iova_mem(iova);
284 node = rb_first(&iovad->rbroot);
285 }
286 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
287}
288
289static int
290__is_range_overlap(struct rb_node *node,
291 unsigned long pfn_lo, unsigned long pfn_hi)
292{
293 struct iova *iova = container_of(node, struct iova, node);
294
295 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
296 return 1;
297 return 0;
298}
299
300static struct iova *
301__insert_new_range(struct iova_domain *iovad,
302 unsigned long pfn_lo, unsigned long pfn_hi)
303{
304 struct iova *iova;
305
306 iova = alloc_iova_mem();
307 if (!iova)
308 return iova;
309
310 iova->pfn_hi = pfn_hi;
311 iova->pfn_lo = pfn_lo;
312 iova_insert_rbtree(&iovad->rbroot, iova);
313 return iova;
314}
315
316static void
317__adjust_overlap_range(struct iova *iova,
318 unsigned long *pfn_lo, unsigned long *pfn_hi)
319{
320 if (*pfn_lo < iova->pfn_lo)
321 iova->pfn_lo = *pfn_lo;
322 if (*pfn_hi > iova->pfn_hi)
323 *pfn_lo = iova->pfn_hi + 1;
324}
325
326/**
327 * reserve_iova - reserves an iova in the given range
328 * @iovad: - iova domain pointer
329 * @pfn_lo: - lower page frame address
330 * @pfn_hi:- higher pfn adderss
331 * This function allocates reserves the address range from pfn_lo to pfn_hi so
332 * that this address is not dished out as part of alloc_iova.
333 */
334struct iova *
335reserve_iova(struct iova_domain *iovad,
336 unsigned long pfn_lo, unsigned long pfn_hi)
337{
338 struct rb_node *node;
339 unsigned long flags;
340 struct iova *iova;
341 unsigned int overlap = 0;
342
343 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
344 spin_lock(&iovad->iova_rbtree_lock);
345 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
346 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
347 iova = container_of(node, struct iova, node);
348 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
349 if ((pfn_lo >= iova->pfn_lo) &&
350 (pfn_hi <= iova->pfn_hi))
351 goto finish;
352 overlap = 1;
353
354 } else if (overlap)
355 break;
356 }
357
358 /* We are here either becasue this is the first reserver node
359 * or need to insert remaining non overlap addr range
360 */
361 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
362finish:
363
364 spin_unlock(&iovad->iova_rbtree_lock);
365 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
366 return iova;
367}
368
369/**
370 * copy_reserved_iova - copies the reserved between domains
371 * @from: - source doamin from where to copy
372 * @to: - destination domin where to copy
373 * This function copies reserved iova's from one doamin to
374 * other.
375 */
376void
377copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
378{
379 unsigned long flags;
380 struct rb_node *node;
381
382 spin_lock_irqsave(&from->iova_alloc_lock, flags);
383 spin_lock(&from->iova_rbtree_lock);
384 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
385 struct iova *iova = container_of(node, struct iova, node);
386 struct iova *new_iova;
387 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
388 if (!new_iova)
389 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
390 iova->pfn_lo, iova->pfn_lo);
391 }
392 spin_unlock(&from->iova_rbtree_lock);
393 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
394}
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
new file mode 100644
index 000000000000..ae3028d5a941
--- /dev/null
+++ b/drivers/pci/iova.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 *
8 */
9
10#ifndef _IOVA_H_
11#define _IOVA_H_
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/rbtree.h>
16#include <linux/dma-mapping.h>
17
18/*
19 * We need a fixed PAGE_SIZE of 4K irrespective of
20 * arch PAGE_SIZE for IOMMU page tables.
21 */
22#define PAGE_SHIFT_4K (12)
23#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
24#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
25#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
26
27/* IO virtual address start page frame number */
28#define IOVA_START_PFN (1)
29
30#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
31#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
32#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
33
34/* iova structure */
35struct iova {
36 struct rb_node node;
37 unsigned long pfn_hi; /* IOMMU dish out addr hi */
38 unsigned long pfn_lo; /* IOMMU dish out addr lo */
39};
40
41/* holds all the iova translations for a domain */
42struct iova_domain {
43 spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
44 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
45 struct rb_root rbroot; /* iova domain rbtree root */
46 struct rb_node *cached32_node; /* Save last alloced node */
47};
48
49struct iova *alloc_iova_mem(void);
50void free_iova_mem(struct iova *iova);
51void free_iova(struct iova_domain *iovad, unsigned long pfn);
52void __free_iova(struct iova_domain *iovad, struct iova *iova);
53struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
54 unsigned long limit_pfn,
55 bool size_aligned);
56struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
57 unsigned long pfn_hi);
58void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
59void init_iova_domain(struct iova_domain *iovad);
60struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
61void put_iova_domain(struct iova_domain *iovad);
62
63#endif
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6fda33de84e8..fc87e14b50de 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -90,3 +90,4 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
90 return NULL; 90 return NULL;
91} 91}
92 92
93struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5db6b6690b59..463a5a9d583d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -837,6 +837,19 @@ static void pci_release_dev(struct device *dev)
837 kfree(pci_dev); 837 kfree(pci_dev);
838} 838}
839 839
840static void set_pcie_port_type(struct pci_dev *pdev)
841{
842 int pos;
843 u16 reg16;
844
845 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
846 if (!pos)
847 return;
848 pdev->is_pcie = 1;
849 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
850 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
851}
852
840/** 853/**
841 * pci_cfg_space_size - get the configuration space size of the PCI device. 854 * pci_cfg_space_size - get the configuration space size of the PCI device.
842 * @dev: PCI device 855 * @dev: PCI device
@@ -951,6 +964,7 @@ pci_scan_device(struct pci_bus *bus, int devfn)
951 dev->device = (l >> 16) & 0xffff; 964 dev->device = (l >> 16) & 0xffff;
952 dev->cfg_size = pci_cfg_space_size(dev); 965 dev->cfg_size = pci_cfg_space_size(dev);
953 dev->error_state = pci_channel_io_normal; 966 dev->error_state = pci_channel_io_normal;
967 set_pcie_port_type(dev);
954 968
955 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 969 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
956 set this higher, assuming the system even supports it. */ 970 set this higher, assuming the system even supports it. */
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index c6e79d01ce3d..b001b5922e33 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -14,6 +14,40 @@
14#include "pci.h" 14#include "pci.h"
15 15
16DECLARE_RWSEM(pci_bus_sem); 16DECLARE_RWSEM(pci_bus_sem);
17/*
18 * find the upstream PCIE-to-PCI bridge of a PCI device
19 * if the device is PCIE, return NULL
20 * if the device isn't connected to a PCIE bridge (that is its parent is a
21 * legacy PCI bridge and the bridge is directly connected to bus 0), return its
22 * parent
23 */
24struct pci_dev *
25pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
26{
27 struct pci_dev *tmp = NULL;
28
29 if (pdev->is_pcie)
30 return NULL;
31 while (1) {
32 if (!pdev->bus->self)
33 break;
34 pdev = pdev->bus->self;
35 /* a p2p bridge */
36 if (!pdev->is_pcie) {
37 tmp = pdev;
38 continue;
39 }
40 /* PCI device should connect to a PCIE bridge */
41 if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
42 /* Busted hardware? */
43 WARN_ON_ONCE(1);
44 return NULL;
45 }
46 return pdev;
47 }
48
49 return tmp;
50}
17 51
18static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) 52static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
19{ 53{
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 39a90a6f0f80..bbf3ee10da04 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -26,65 +26,124 @@ static struct power_supply *main_battery;
26static void find_main_battery(void) 26static void find_main_battery(void)
27{ 27{
28 struct device *dev; 28 struct device *dev;
29 struct power_supply *bat, *batm; 29 struct power_supply *bat = NULL;
30 struct power_supply *max_charge_bat = NULL;
31 struct power_supply *max_energy_bat = NULL;
30 union power_supply_propval full; 32 union power_supply_propval full;
31 int max_charge = 0; 33 int max_charge = 0;
34 int max_energy = 0;
32 35
33 main_battery = NULL; 36 main_battery = NULL;
34 batm = NULL; 37
35 list_for_each_entry(dev, &power_supply_class->devices, node) { 38 list_for_each_entry(dev, &power_supply_class->devices, node) {
36 bat = dev_get_drvdata(dev); 39 bat = dev_get_drvdata(dev);
37 /* If none of battery devices cantains 'use_for_apm' flag, 40
38 choice one with maximum design charge */ 41 if (bat->use_for_apm) {
39 if (!PSY_PROP(bat, CHARGE_FULL_DESIGN, &full)) { 42 /* nice, we explicitly asked to report this battery. */
43 main_battery = bat;
44 return;
45 }
46
47 if (!PSY_PROP(bat, CHARGE_FULL_DESIGN, &full) ||
48 !PSY_PROP(bat, CHARGE_FULL, &full)) {
40 if (full.intval > max_charge) { 49 if (full.intval > max_charge) {
41 batm = bat; 50 max_charge_bat = bat;
42 max_charge = full.intval; 51 max_charge = full.intval;
43 } 52 }
53 } else if (!PSY_PROP(bat, ENERGY_FULL_DESIGN, &full) ||
54 !PSY_PROP(bat, ENERGY_FULL, &full)) {
55 if (full.intval > max_energy) {
56 max_energy_bat = bat;
57 max_energy = full.intval;
58 }
44 } 59 }
60 }
45 61
46 if (bat->use_for_apm) 62 if ((max_energy_bat && max_charge_bat) &&
47 main_battery = bat; 63 (max_energy_bat != max_charge_bat)) {
64 /* try guess battery with more capacity */
65 if (!PSY_PROP(max_charge_bat, VOLTAGE_MAX_DESIGN, &full)) {
66 if (max_energy > max_charge * full.intval)
67 main_battery = max_energy_bat;
68 else
69 main_battery = max_charge_bat;
70 } else if (!PSY_PROP(max_energy_bat, VOLTAGE_MAX_DESIGN,
71 &full)) {
72 if (max_charge > max_energy / full.intval)
73 main_battery = max_charge_bat;
74 else
75 main_battery = max_energy_bat;
76 } else {
77 /* give up, choice any */
78 main_battery = max_energy_bat;
79 }
80 } else if (max_charge_bat) {
81 main_battery = max_charge_bat;
82 } else if (max_energy_bat) {
83 main_battery = max_energy_bat;
84 } else {
85 /* give up, try the last if any */
86 main_battery = bat;
48 } 87 }
49 if (!main_battery)
50 main_battery = batm;
51} 88}
52 89
53static int calculate_time(int status) 90static int calculate_time(int status, int using_charge)
54{ 91{
55 union power_supply_propval charge_full, charge_empty; 92 union power_supply_propval full;
56 union power_supply_propval charge, I; 93 union power_supply_propval empty;
94 union power_supply_propval cur;
95 union power_supply_propval I;
96 enum power_supply_property full_prop;
97 enum power_supply_property full_design_prop;
98 enum power_supply_property empty_prop;
99 enum power_supply_property empty_design_prop;
100 enum power_supply_property cur_avg_prop;
101 enum power_supply_property cur_now_prop;
57 102
58 if (MPSY_PROP(CHARGE_FULL, &charge_full)) { 103 if (MPSY_PROP(CURRENT_AVG, &I)) {
59 /* if battery can't report this property, use design value */ 104 /* if battery can't report average value, use momentary */
60 if (MPSY_PROP(CHARGE_FULL_DESIGN, &charge_full)) 105 if (MPSY_PROP(CURRENT_NOW, &I))
61 return -1; 106 return -1;
62 } 107 }
63 108
64 if (MPSY_PROP(CHARGE_EMPTY, &charge_empty)) { 109 if (using_charge) {
65 /* if battery can't report this property, use design value */ 110 full_prop = POWER_SUPPLY_PROP_CHARGE_FULL;
66 if (MPSY_PROP(CHARGE_EMPTY_DESIGN, &charge_empty)) 111 full_design_prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
67 charge_empty.intval = 0; 112 empty_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
113 empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
114 cur_avg_prop = POWER_SUPPLY_PROP_CHARGE_AVG;
115 cur_now_prop = POWER_SUPPLY_PROP_CHARGE_NOW;
116 } else {
117 full_prop = POWER_SUPPLY_PROP_ENERGY_FULL;
118 full_design_prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
119 empty_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY;
120 empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
121 cur_avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
122 cur_now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
68 } 123 }
69 124
70 if (MPSY_PROP(CHARGE_AVG, &charge)) { 125 if (_MPSY_PROP(full_prop, &full)) {
71 /* if battery can't report average value, use momentary */ 126 /* if battery can't report this property, use design value */
72 if (MPSY_PROP(CHARGE_NOW, &charge)) 127 if (_MPSY_PROP(full_design_prop, &full))
73 return -1; 128 return -1;
74 } 129 }
75 130
76 if (MPSY_PROP(CURRENT_AVG, &I)) { 131 if (_MPSY_PROP(empty_prop, &empty)) {
132 /* if battery can't report this property, use design value */
133 if (_MPSY_PROP(empty_design_prop, &empty))
134 empty.intval = 0;
135 }
136
137 if (_MPSY_PROP(cur_avg_prop, &cur)) {
77 /* if battery can't report average value, use momentary */ 138 /* if battery can't report average value, use momentary */
78 if (MPSY_PROP(CURRENT_NOW, &I)) 139 if (_MPSY_PROP(cur_now_prop, &cur))
79 return -1; 140 return -1;
80 } 141 }
81 142
82 if (status == POWER_SUPPLY_STATUS_CHARGING) 143 if (status == POWER_SUPPLY_STATUS_CHARGING)
83 return ((charge.intval - charge_full.intval) * 60L) / 144 return ((cur.intval - full.intval) * 60L) / I.intval;
84 I.intval;
85 else 145 else
86 return -((charge.intval - charge_empty.intval) * 60L) / 146 return -((cur.intval - empty.intval) * 60L) / I.intval;
87 I.intval;
88} 147}
89 148
90static int calculate_capacity(int using_charge) 149static int calculate_capacity(int using_charge)
@@ -200,18 +259,22 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
200 info->units = APM_UNITS_MINS; 259 info->units = APM_UNITS_MINS;
201 260
202 if (status.intval == POWER_SUPPLY_STATUS_CHARGING) { 261 if (status.intval == POWER_SUPPLY_STATUS_CHARGING) {
203 if (MPSY_PROP(TIME_TO_FULL_AVG, &time_to_full)) { 262 if (!MPSY_PROP(TIME_TO_FULL_AVG, &time_to_full) ||
204 if (MPSY_PROP(TIME_TO_FULL_NOW, &time_to_full)) 263 !MPSY_PROP(TIME_TO_FULL_NOW, &time_to_full)) {
205 info->time = calculate_time(status.intval); 264 info->time = time_to_full.intval / 60;
206 else 265 } else {
207 info->time = time_to_full.intval / 60; 266 info->time = calculate_time(status.intval, 0);
267 if (info->time == -1)
268 info->time = calculate_time(status.intval, 1);
208 } 269 }
209 } else { 270 } else {
210 if (MPSY_PROP(TIME_TO_EMPTY_AVG, &time_to_empty)) { 271 if (!MPSY_PROP(TIME_TO_EMPTY_AVG, &time_to_empty) ||
211 if (MPSY_PROP(TIME_TO_EMPTY_NOW, &time_to_empty)) 272 !MPSY_PROP(TIME_TO_EMPTY_NOW, &time_to_empty)) {
212 info->time = calculate_time(status.intval); 273 info->time = time_to_empty.intval / 60;
213 else 274 } else {
214 info->time = time_to_empty.intval / 60; 275 info->time = calculate_time(status.intval, 0);
276 if (info->time == -1)
277 info->time = calculate_time(status.intval, 1);
215 } 278 }
216 } 279 }
217 280
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2edd5fb6d3dc..8d1c64a24dec 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -48,8 +48,8 @@ struct raw3270 {
48 struct timer_list timer; /* Device timer. */ 48 struct timer_list timer; /* Device timer. */
49 49
50 unsigned char *ascebc; /* ascii -> ebcdic table */ 50 unsigned char *ascebc; /* ascii -> ebcdic table */
51 struct class_device *clttydev; /* 3270-class tty device ptr */ 51 struct device *clttydev; /* 3270-class tty device ptr */
52 struct class_device *cltubdev; /* 3270-class tub device ptr */ 52 struct device *cltubdev; /* 3270-class tub device ptr */
53 53
54 struct raw3270_request init_request; 54 struct raw3270_request init_request;
55 unsigned char init_data[256]; 55 unsigned char init_data[256];
@@ -1107,11 +1107,9 @@ raw3270_delete_device(struct raw3270 *rp)
1107 /* Remove from device chain. */ 1107 /* Remove from device chain. */
1108 mutex_lock(&raw3270_mutex); 1108 mutex_lock(&raw3270_mutex);
1109 if (rp->clttydev && !IS_ERR(rp->clttydev)) 1109 if (rp->clttydev && !IS_ERR(rp->clttydev))
1110 class_device_destroy(class3270, 1110 device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1112 if (rp->cltubdev && !IS_ERR(rp->cltubdev)) 1111 if (rp->cltubdev && !IS_ERR(rp->cltubdev))
1113 class_device_destroy(class3270, 1112 device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
1114 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1115 list_del_init(&rp->list); 1113 list_del_init(&rp->list);
1116 mutex_unlock(&raw3270_mutex); 1114 mutex_unlock(&raw3270_mutex);
1117 1115
@@ -1181,24 +1179,22 @@ static int raw3270_create_attributes(struct raw3270 *rp)
1181 if (rc) 1179 if (rc)
1182 goto out; 1180 goto out;
1183 1181
1184 rp->clttydev = class_device_create(class3270, NULL, 1182 rp->clttydev = device_create(class3270, &rp->cdev->dev,
1185 MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1183 MKDEV(IBM_TTY3270_MAJOR, rp->minor),
1186 &rp->cdev->dev, "tty%s", 1184 "tty%s", rp->cdev->dev.bus_id);
1187 rp->cdev->dev.bus_id);
1188 if (IS_ERR(rp->clttydev)) { 1185 if (IS_ERR(rp->clttydev)) {
1189 rc = PTR_ERR(rp->clttydev); 1186 rc = PTR_ERR(rp->clttydev);
1190 goto out_ttydev; 1187 goto out_ttydev;
1191 } 1188 }
1192 1189
1193 rp->cltubdev = class_device_create(class3270, NULL, 1190 rp->cltubdev = device_create(class3270, &rp->cdev->dev,
1194 MKDEV(IBM_FS3270_MAJOR, rp->minor), 1191 MKDEV(IBM_FS3270_MAJOR, rp->minor),
1195 &rp->cdev->dev, "tub%s", 1192 "tub%s", rp->cdev->dev.bus_id);
1196 rp->cdev->dev.bus_id);
1197 if (!IS_ERR(rp->cltubdev)) 1193 if (!IS_ERR(rp->cltubdev))
1198 goto out; 1194 goto out;
1199 1195
1200 rc = PTR_ERR(rp->cltubdev); 1196 rc = PTR_ERR(rp->cltubdev);
1201 class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1197 device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1202 1198
1203out_ttydev: 1199out_ttydev:
1204 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1200 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 2e0d29730b67..aa7f166f4034 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -69,12 +69,9 @@ struct tape_class_device *register_tape_dev(
69 if (rc) 69 if (rc)
70 goto fail_with_cdev; 70 goto fail_with_cdev;
71 71
72 tcd->class_device = class_device_create( 72 tcd->class_device = device_create(tape_class, device,
73 tape_class, 73 tcd->char_device->dev,
74 NULL, 74 "%s", tcd->device_name
75 tcd->char_device->dev,
76 device,
77 "%s", tcd->device_name
78 ); 75 );
79 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; 76 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
80 if (rc) 77 if (rc)
@@ -90,7 +87,7 @@ struct tape_class_device *register_tape_dev(
90 return tcd; 87 return tcd;
91 88
92fail_with_class_device: 89fail_with_class_device:
93 class_device_destroy(tape_class, tcd->char_device->dev); 90 device_destroy(tape_class, tcd->char_device->dev);
94 91
95fail_with_cdev: 92fail_with_cdev:
96 cdev_del(tcd->char_device); 93 cdev_del(tcd->char_device);
@@ -105,11 +102,9 @@ EXPORT_SYMBOL(register_tape_dev);
105void unregister_tape_dev(struct tape_class_device *tcd) 102void unregister_tape_dev(struct tape_class_device *tcd)
106{ 103{
107 if (tcd != NULL && !IS_ERR(tcd)) { 104 if (tcd != NULL && !IS_ERR(tcd)) {
108 sysfs_remove_link( 105 sysfs_remove_link(&tcd->class_device->kobj,
109 &tcd->class_device->dev->kobj, 106 tcd->mode_name);
110 tcd->mode_name 107 device_destroy(tape_class, tcd->char_device->dev);
111 );
112 class_device_destroy(tape_class, tcd->char_device->dev);
113 cdev_del(tcd->char_device); 108 cdev_del(tcd->char_device);
114 kfree(tcd); 109 kfree(tcd);
115 } 110 }
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index a8bd9b47fad6..e2b5ac918acf 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -24,8 +24,8 @@
24#define TAPECLASS_NAME_LEN 32 24#define TAPECLASS_NAME_LEN 32
25 25
26struct tape_class_device { 26struct tape_class_device {
27 struct cdev * char_device; 27 struct cdev *char_device;
28 struct class_device * class_device; 28 struct device *class_device;
29 char device_name[TAPECLASS_NAME_LEN]; 29 char device_name[TAPECLASS_NAME_LEN];
30 char mode_name[TAPECLASS_NAME_LEN]; 30 char mode_name[TAPECLASS_NAME_LEN];
31}; 31};
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 12f7a4ce82c1..e0c4c508e121 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -74,7 +74,7 @@ struct vmlogrdr_priv_t {
74 int dev_in_use; /* 1: already opened, 0: not opened*/ 74 int dev_in_use; /* 1: already opened, 0: not opened*/
75 spinlock_t priv_lock; 75 spinlock_t priv_lock;
76 struct device *device; 76 struct device *device;
77 struct class_device *class_device; 77 struct device *class_device;
78 int autorecording; 78 int autorecording;
79 int autopurge; 79 int autopurge;
80}; 80};
@@ -762,12 +762,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
762 device_unregister(dev); 762 device_unregister(dev);
763 return ret; 763 return ret;
764 } 764 }
765 priv->class_device = class_device_create( 765 priv->class_device = device_create(vmlogrdr_class, dev,
766 vmlogrdr_class, 766 MKDEV(vmlogrdr_major,
767 NULL, 767 priv->minor_num),
768 MKDEV(vmlogrdr_major, priv->minor_num), 768 "%s", dev->bus_id);
769 dev,
770 "%s", dev->bus_id );
771 if (IS_ERR(priv->class_device)) { 769 if (IS_ERR(priv->class_device)) {
772 ret = PTR_ERR(priv->class_device); 770 ret = PTR_ERR(priv->class_device);
773 priv->class_device=NULL; 771 priv->class_device=NULL;
@@ -783,8 +781,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
783 781
784static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) 782static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
785{ 783{
786 class_device_destroy(vmlogrdr_class, 784 device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
787 MKDEV(vmlogrdr_major, priv->minor_num));
788 if (priv->device != NULL) { 785 if (priv->device != NULL) {
789 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); 786 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
790 device_unregister(priv->device); 787 device_unregister(priv->device);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 42c1f4659adb..297cdceb0ca4 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -246,7 +246,7 @@ int chp_add_cmg_attr(struct channel_path *chp)
246static ssize_t chp_status_show(struct device *dev, 246static ssize_t chp_status_show(struct device *dev,
247 struct device_attribute *attr, char *buf) 247 struct device_attribute *attr, char *buf)
248{ 248{
249 struct channel_path *chp = container_of(dev, struct channel_path, dev); 249 struct channel_path *chp = to_channelpath(dev);
250 250
251 if (!chp) 251 if (!chp)
252 return 0; 252 return 0;
@@ -258,7 +258,7 @@ static ssize_t chp_status_write(struct device *dev,
258 struct device_attribute *attr, 258 struct device_attribute *attr,
259 const char *buf, size_t count) 259 const char *buf, size_t count)
260{ 260{
261 struct channel_path *cp = container_of(dev, struct channel_path, dev); 261 struct channel_path *cp = to_channelpath(dev);
262 char cmd[10]; 262 char cmd[10];
263 int num_args; 263 int num_args;
264 int error; 264 int error;
@@ -286,7 +286,7 @@ static ssize_t chp_configure_show(struct device *dev,
286 struct channel_path *cp; 286 struct channel_path *cp;
287 int status; 287 int status;
288 288
289 cp = container_of(dev, struct channel_path, dev); 289 cp = to_channelpath(dev);
290 status = chp_info_get_status(cp->chpid); 290 status = chp_info_get_status(cp->chpid);
291 if (status < 0) 291 if (status < 0)
292 return status; 292 return status;
@@ -308,7 +308,7 @@ static ssize_t chp_configure_write(struct device *dev,
308 return -EINVAL; 308 return -EINVAL;
309 if (val != 0 && val != 1) 309 if (val != 0 && val != 1)
310 return -EINVAL; 310 return -EINVAL;
311 cp = container_of(dev, struct channel_path, dev); 311 cp = to_channelpath(dev);
312 chp_cfg_schedule(cp->chpid, val); 312 chp_cfg_schedule(cp->chpid, val);
313 cfg_wait_idle(); 313 cfg_wait_idle();
314 314
@@ -320,7 +320,7 @@ static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
320static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, 320static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
321 char *buf) 321 char *buf)
322{ 322{
323 struct channel_path *chp = container_of(dev, struct channel_path, dev); 323 struct channel_path *chp = to_channelpath(dev);
324 324
325 if (!chp) 325 if (!chp)
326 return 0; 326 return 0;
@@ -374,7 +374,7 @@ static void chp_release(struct device *dev)
374{ 374{
375 struct channel_path *cp; 375 struct channel_path *cp;
376 376
377 cp = container_of(dev, struct channel_path, dev); 377 cp = to_channelpath(dev);
378 kfree(cp); 378 kfree(cp);
379} 379}
380 380
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 5d83dd471461..838f7ac0dc32 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -182,6 +182,15 @@ static int css_register_subchannel(struct subchannel *sch)
182 sch->dev.bus = &css_bus_type; 182 sch->dev.bus = &css_bus_type;
183 sch->dev.release = &css_subchannel_release; 183 sch->dev.release = &css_subchannel_release;
184 sch->dev.groups = subch_attr_groups; 184 sch->dev.groups = subch_attr_groups;
185 /*
186 * We don't want to generate uevents for I/O subchannels that don't
187 * have a working ccw device behind them since they will be
188 * unregistered before they can be used anyway, so we delay the add
189 * uevent until after device recognition was successful.
190 */
191 if (!cio_is_console(sch->schid))
192 /* Console is special, no need to suppress. */
193 sch->dev.uevent_suppress = 1;
185 css_update_ssd_info(sch); 194 css_update_ssd_info(sch);
186 /* make it known to the system */ 195 /* make it known to the system */
187 ret = css_sch_device_register(sch); 196 ret = css_sch_device_register(sch);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 7507067351bd..fd5d0c1570df 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -559,6 +559,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
559 retval = -ENOMEM; 559 retval = -ENOMEM;
560 goto out; 560 goto out;
561 } 561 }
562 sg_init_table(sg_list->sg, sg_list->count);
562 563
563 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) { 564 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
564 sg->length = min(size, PAGE_SIZE); 565 sg->length = min(size, PAGE_SIZE);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 57cac7008e0b..326e7ee232cb 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -63,7 +63,7 @@
63static inline void * 63static inline void *
64zfcp_sg_to_address(struct scatterlist *list) 64zfcp_sg_to_address(struct scatterlist *list)
65{ 65{
66 return (void *) (page_address(list->page) + list->offset); 66 return sg_virt(list);
67} 67}
68 68
69/** 69/**
@@ -74,7 +74,7 @@ zfcp_sg_to_address(struct scatterlist *list)
74static inline void 74static inline void
75zfcp_address_to_sg(void *address, struct scatterlist *list) 75zfcp_address_to_sg(void *address, struct scatterlist *list)
76{ 76{
77 list->page = virt_to_page(address); 77 sg_set_page(list, virt_to_page(address));
78 list->offset = ((unsigned long) address) & (PAGE_SIZE - 1); 78 list->offset = ((unsigned long) address) & (PAGE_SIZE - 1);
79} 79}
80 80
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index a6475a2bb8a7..9438d0b28799 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -308,13 +308,15 @@ zfcp_erp_adisc(struct zfcp_port *port)
308 if (send_els == NULL) 308 if (send_els == NULL)
309 goto nomem; 309 goto nomem;
310 310
311 send_els->req = kzalloc(sizeof(struct scatterlist), GFP_ATOMIC); 311 send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
312 if (send_els->req == NULL) 312 if (send_els->req == NULL)
313 goto nomem; 313 goto nomem;
314 sg_init_table(send_els->req, 1);
314 315
315 send_els->resp = kzalloc(sizeof(struct scatterlist), GFP_ATOMIC); 316 send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
316 if (send_els->resp == NULL) 317 if (send_els->resp == NULL)
317 goto nomem; 318 goto nomem;
319 sg_init_table(send_els->resp, 1);
318 320
319 address = (void *) get_zeroed_page(GFP_ATOMIC); 321 address = (void *) get_zeroed_page(GFP_ATOMIC);
320 if (address == NULL) 322 if (address == NULL)
@@ -363,7 +365,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
363 retval = -ENOMEM; 365 retval = -ENOMEM;
364 freemem: 366 freemem:
365 if (address != NULL) 367 if (address != NULL)
366 __free_pages(send_els->req->page, 0); 368 __free_pages(sg_page(send_els->req), 0);
367 if (send_els != NULL) { 369 if (send_els != NULL) {
368 kfree(send_els->req); 370 kfree(send_els->req);
369 kfree(send_els->resp); 371 kfree(send_els->resp);
@@ -437,7 +439,7 @@ zfcp_erp_adisc_handler(unsigned long data)
437 439
438 out: 440 out:
439 zfcp_port_put(port); 441 zfcp_port_put(port);
440 __free_pages(send_els->req->page, 0); 442 __free_pages(sg_page(send_els->req), 0);
441 kfree(send_els->req); 443 kfree(send_els->req);
442 kfree(send_els->resp); 444 kfree(send_els->resp);
443 kfree(send_els); 445 kfree(send_els);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index fb14014ee16e..afb262b4be15 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1840,7 +1840,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1840 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { 1840 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1841 if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) { 1841 if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
1842 struct scatterlist *sg = scsi_sglist(srb); 1842 struct scatterlist *sg = scsi_sglist(srb);
1843 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1843 char *buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1844 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length); 1844 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
1845 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1845 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1846 } 1846 }
@@ -1919,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
1919 char *buf; 1919 char *buf;
1920 unsigned long flags = 0; 1920 unsigned long flags = 0;
1921 local_irq_save(flags); 1921 local_irq_save(flags);
1922 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1922 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1923 memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length); 1923 memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
1924 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1924 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1925 local_irq_restore(flags); 1925 local_irq_restore(flags);
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a64153b96034..59716ebeb10c 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1469,7 +1469,7 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1469 struct scatterlist *sg = scsi_sglist(cmd); 1469 struct scatterlist *sg = scsi_sglist(cmd);
1470 1470
1471 local_irq_save(flags); 1471 local_irq_save(flags);
1472 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1472 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1473 transfer_len = min(sg->length, len); 1473 transfer_len = min(sg->length, len);
1474 1474
1475 memcpy(buf, data, transfer_len); 1475 memcpy(buf, data, transfer_len);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 988f0bc5eda5..2597209183d0 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -298,8 +298,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
298 if (cmd->use_sg) { 298 if (cmd->use_sg) {
299 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 299 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
300 cmd->SCp.buffers_residual = cmd->use_sg - 1; 300 cmd->SCp.buffers_residual = cmd->use_sg - 1;
301 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+ 301 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
302 cmd->SCp.buffer->offset;
303 cmd->SCp.this_residual = cmd->SCp.buffer->length; 302 cmd->SCp.this_residual = cmd->SCp.buffer->length;
304 } else { 303 } else {
305 cmd->SCp.buffer = NULL; 304 cmd->SCp.buffer = NULL;
@@ -2143,8 +2142,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2143 ++cmd->SCp.buffer; 2142 ++cmd->SCp.buffer;
2144 --cmd->SCp.buffers_residual; 2143 --cmd->SCp.buffers_residual;
2145 cmd->SCp.this_residual = cmd->SCp.buffer->length; 2144 cmd->SCp.this_residual = cmd->SCp.buffer->length;
2146 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+ 2145 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
2147 cmd->SCp.buffer->offset;
2148 dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual)); 2146 dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
2149 } 2147 }
2150 /* 2148 /*
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 96e8e29aa05d..5b0efc903918 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -927,7 +927,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
927 esp->dma_mmu_get_scsi_sgl(esp, sp); 927 esp->dma_mmu_get_scsi_sgl(esp, sp);
928 else 928 else
929 sp->SCp.ptr = 929 sp->SCp.ptr =
930 (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset)); 930 (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
931 } 931 }
932} 932}
933 933
@@ -1748,7 +1748,7 @@ static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp)
1748 if (esp->dma_advance_sg) 1748 if (esp->dma_advance_sg)
1749 esp->dma_advance_sg (sp); 1749 esp->dma_advance_sg (sp);
1750 else 1750 else
1751 sp->SCp.ptr = (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset)); 1751 sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
1752 1752
1753} 1753}
1754 1754
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 3168a1794849..137d065db3da 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -875,8 +875,7 @@ static void NCR53c406a_intr(void *dev_id)
875 outb(TRANSFER_INFO | DMA_OP, CMD_REG); 875 outb(TRANSFER_INFO | DMA_OP, CMD_REG);
876#if USE_PIO 876#if USE_PIO
877 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) { 877 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
878 NCR53c406a_pio_write(page_address(sg->page) + sg->offset, 878 NCR53c406a_pio_write(sg_virt(sg), sg->length);
879 sg->length);
880 } 879 }
881 REG0; 880 REG0;
882#endif /* USE_PIO */ 881#endif /* USE_PIO */
@@ -897,8 +896,7 @@ static void NCR53c406a_intr(void *dev_id)
897 outb(TRANSFER_INFO | DMA_OP, CMD_REG); 896 outb(TRANSFER_INFO | DMA_OP, CMD_REG);
898#if USE_PIO 897#if USE_PIO
899 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) { 898 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
900 NCR53c406a_pio_read(page_address(sg->page) + sg->offset, 899 NCR53c406a_pio_read(sg_virt(sg), sg->length);
901 sg->length);
902 } 900 }
903 REG0; 901 REG0;
904#endif /* USE_PIO */ 902#endif /* USE_PIO */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 80e448d0f3db..a77ab8d693d4 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -356,7 +356,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
356 int transfer_len; 356 int transfer_len;
357 struct scatterlist *sg = scsi_sglist(scsicmd); 357 struct scatterlist *sg = scsi_sglist(scsicmd);
358 358
359 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 359 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
360 transfer_len = min(sg->length, len + offset); 360 transfer_len = min(sg->length, len + offset);
361 361
362 transfer_len -= offset; 362 transfer_len -= offset;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index a58c265dc8af..ea8c69947644 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -613,7 +613,7 @@ struct aha152x_scdata {
613#define SCNEXT(SCpnt) SCDATA(SCpnt)->next 613#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
614#define SCSEM(SCpnt) SCDATA(SCpnt)->done 614#define SCSEM(SCpnt) SCDATA(SCpnt)->done
615 615
616#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset)) 616#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
617 617
618/* state handling */ 618/* state handling */
619static void seldi_run(struct Scsi_Host *shpnt); 619static void seldi_run(struct Scsi_Host *shpnt);
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 961a1882cb7e..bbcc2c52d79f 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -49,7 +49,7 @@
49#include "aha1542.h" 49#include "aha1542.h"
50 50
51#define SCSI_BUF_PA(address) isa_virt_to_bus(address) 51#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
52#define SCSI_SG_PA(sgent) (isa_page_to_bus((sgent)->page) + (sgent)->offset) 52#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
53 53
54static void BAD_DMA(void *address, unsigned int length) 54static void BAD_DMA(void *address, unsigned int length)
55{ 55{
@@ -66,8 +66,7 @@ static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
66 int badseg) 66 int badseg)
67{ 67{
68 printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n", 68 printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
69 badseg, nseg, 69 badseg, nseg, sg_virt(sgp),
70 page_address(sgp->page) + sgp->offset,
71 (unsigned long long)SCSI_SG_PA(sgp), 70 (unsigned long long)SCSI_SG_PA(sgp),
72 sgp->length); 71 sgp->length);
73 72
@@ -712,8 +711,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
712 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i); 711 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
713 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) { 712 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
714 printk(KERN_CRIT "%d: %p %d\n", i, 713 printk(KERN_CRIT "%d: %p %d\n", i,
715 (page_address(sg->page) + 714 sg_virt(sg), sg->length);
716 sg->offset), sg->length);
717 }; 715 };
718 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr); 716 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
719 ptr = (unsigned char *) &cptr[i]; 717 ptr = (unsigned char *) &cptr[i];
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f81777586b8f..f7a252885a5c 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1343,7 +1343,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1343 /* 4 bytes: Areca io control code */ 1343 /* 4 bytes: Areca io control code */
1344 1344
1345 sg = scsi_sglist(cmd); 1345 sg = scsi_sglist(cmd);
1346 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1346 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1347 if (scsi_sg_count(cmd) > 1) { 1347 if (scsi_sg_count(cmd) > 1) {
1348 retvalue = ARCMSR_MESSAGE_FAIL; 1348 retvalue = ARCMSR_MESSAGE_FAIL;
1349 goto message_out; 1349 goto message_out;
@@ -1593,7 +1593,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1593 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1593 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1594 1594
1595 sg = scsi_sglist(cmd); 1595 sg = scsi_sglist(cmd);
1596 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1596 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1597 1597
1598 memcpy(buffer, inqdata, sizeof(inqdata)); 1598 memcpy(buffer, inqdata, sizeof(inqdata));
1599 sg = scsi_sglist(cmd); 1599 sg = scsi_sglist(cmd);
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 52d0b87e9aa4..d1780980fb20 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -515,8 +515,7 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
515 if (cmd->use_sg) { 515 if (cmd->use_sg) {
516 cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer; 516 cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
517 cmd->SCp.buffers_residual = cmd->use_sg - 1; 517 cmd->SCp.buffers_residual = cmd->use_sg - 1;
518 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page) + 518 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
519 cmd->SCp.buffer->offset;
520 cmd->SCp.this_residual = cmd->SCp.buffer->length; 519 cmd->SCp.this_residual = cmd->SCp.buffer->length;
521 /* ++roman: Try to merge some scatter-buffers if they are at 520 /* ++roman: Try to merge some scatter-buffers if they are at
522 * contiguous physical addresses. 521 * contiguous physical addresses.
@@ -2054,8 +2053,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2054 ++cmd->SCp.buffer; 2053 ++cmd->SCp.buffer;
2055 --cmd->SCp.buffers_residual; 2054 --cmd->SCp.buffers_residual;
2056 cmd->SCp.this_residual = cmd->SCp.buffer->length; 2055 cmd->SCp.this_residual = cmd->SCp.buffer->length;
2057 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 2056 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
2058 cmd->SCp.buffer->offset;
2059 /* ++roman: Try to merge some scatter-buffers if 2057 /* ++roman: Try to merge some scatter-buffers if
2060 * they are at contiguous physical addresses. 2058 * they are at contiguous physical addresses.
2061 */ 2059 */
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 96180bb47e41..982c5092be11 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -172,7 +172,7 @@ static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
172 SCp->Status = 0; 172 SCp->Status = 0;
173 else { 173 else {
174 SCp->buffer++; 174 SCp->buffer++;
175 SCp->ptr = page_address(SCp->buffer->page) + SCp->buffer->offset; 175 SCp->ptr = sg_virt(SCp->buffer);
176 SCp->this_residual = SCp->buffer->length; 176 SCp->this_residual = SCp->buffer->length;
177 } 177 }
178 } 178 }
@@ -410,7 +410,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
410 } else { 410 } else {
411 cmd->SCp.buffer = cmd->request_buffer; 411 cmd->SCp.buffer = cmd->request_buffer;
412 cmd->SCp.buffers_residual = cmd->use_sg; 412 cmd->SCp.buffers_residual = cmd->use_sg;
413 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset; 413 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
414 cmd->SCp.this_residual = cmd->SCp.buffer->length; 414 cmd->SCp.this_residual = cmd->SCp.buffer->length;
415 } 415 }
416 cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes 416 cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 668569e8856b..8335b608e571 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -973,7 +973,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
973 if (current_SC->SCp.buffers_residual) { 973 if (current_SC->SCp.buffers_residual) {
974 --current_SC->SCp.buffers_residual; 974 --current_SC->SCp.buffers_residual;
975 ++current_SC->SCp.buffer; 975 ++current_SC->SCp.buffer;
976 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 976 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
977 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 977 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
978 } else 978 } else
979 break; 979 break;
@@ -1006,7 +1006,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
1006 if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) { 1006 if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) {
1007 --current_SC->SCp.buffers_residual; 1007 --current_SC->SCp.buffers_residual;
1008 ++current_SC->SCp.buffer; 1008 ++current_SC->SCp.buffer;
1009 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 1009 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
1010 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1010 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1011 } 1011 }
1012 } 1012 }
@@ -1109,7 +1109,7 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
1109 1109
1110 if (current_SC->use_sg) { 1110 if (current_SC->use_sg) {
1111 current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer; 1111 current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer;
1112 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 1112 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
1113 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1113 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1114 current_SC->SCp.buffers_residual = current_SC->use_sg - 1; 1114 current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
1115 } else { 1115 } else {
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 5d282e6a6ae1..2cd6b4959eb2 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1321,7 +1321,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
1321 if (current_SC->SCp.buffers_residual) { 1321 if (current_SC->SCp.buffers_residual) {
1322 --current_SC->SCp.buffers_residual; 1322 --current_SC->SCp.buffers_residual;
1323 ++current_SC->SCp.buffer; 1323 ++current_SC->SCp.buffer;
1324 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 1324 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
1325 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1325 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1326 } else 1326 } else
1327 break; 1327 break;
@@ -1354,7 +1354,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
1354 && current_SC->SCp.buffers_residual) { 1354 && current_SC->SCp.buffers_residual) {
1355 --current_SC->SCp.buffers_residual; 1355 --current_SC->SCp.buffers_residual;
1356 ++current_SC->SCp.buffer; 1356 ++current_SC->SCp.buffer;
1357 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 1357 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
1358 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1358 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1359 } 1359 }
1360 } 1360 }
@@ -1439,8 +1439,7 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
1439 1439
1440 if (scsi_sg_count(current_SC)) { 1440 if (scsi_sg_count(current_SC)) {
1441 current_SC->SCp.buffer = scsi_sglist(current_SC); 1441 current_SC->SCp.buffer = scsi_sglist(current_SC);
1442 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) 1442 current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
1443 + current_SC->SCp.buffer->offset;
1444 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1443 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1445 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1; 1444 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
1446 } else { 1445 } else {
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 3ac080ee6e2f..5ab3ce762485 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2374,18 +2374,18 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2374 if (cpsum+cpnow > cpcount) 2374 if (cpsum+cpnow > cpcount)
2375 cpnow = cpcount - cpsum; 2375 cpnow = cpcount - cpsum;
2376 cpsum += cpnow; 2376 cpsum += cpnow;
2377 if (!sl->page) { 2377 if (!sg_page(sl)) {
2378 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n", 2378 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
2379 ha->hanum); 2379 ha->hanum);
2380 return; 2380 return;
2381 } 2381 }
2382 local_irq_save(flags); 2382 local_irq_save(flags);
2383 address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset; 2383 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
2384 if (to_buffer) 2384 if (to_buffer)
2385 memcpy(buffer, address, cpnow); 2385 memcpy(buffer, address, cpnow);
2386 else 2386 else
2387 memcpy(address, buffer, cpnow); 2387 memcpy(address, buffer, cpnow);
2388 flush_dcache_page(sl->page); 2388 flush_dcache_page(sg_page(sl));
2389 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2389 kunmap_atomic(address, KM_BIO_SRC_IRQ);
2390 local_irq_restore(flags); 2390 local_irq_restore(flags);
2391 if (cpsum == cpcount) 2391 if (cpsum == cpcount)
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 714e6273a70d..db004a450732 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -1828,7 +1828,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1828 BUG_ON(scsi_sg_count(cmd) > 16); 1828 BUG_ON(scsi_sg_count(cmd) > 16);
1829 1829
1830 scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) { 1830 scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
1831 ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset); 1831 ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
1832 ld(shpnt)[ldn].sge[i].byte_length = sg->length; 1832 ld(shpnt)[ldn].sge[i].byte_length = sg->length;
1833 } 1833 }
1834 scb->enable |= IM_POINTER_TO_LIST; 1834 scb->enable |= IM_POINTER_TO_LIST;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 252d1806467f..8d0244c2e7d4 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -175,18 +175,18 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
175 175
176 while (bcount) { 176 while (bcount) {
177 count = min(pc->sg->length - pc->b_count, bcount); 177 count = min(pc->sg->length - pc->b_count, bcount);
178 if (PageHighMem(pc->sg->page)) { 178 if (PageHighMem(sg_page(pc->sg))) {
179 unsigned long flags; 179 unsigned long flags;
180 180
181 local_irq_save(flags); 181 local_irq_save(flags);
182 buf = kmap_atomic(pc->sg->page, KM_IRQ0) + 182 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
183 pc->sg->offset; 183 pc->sg->offset;
184 drive->hwif->atapi_input_bytes(drive, 184 drive->hwif->atapi_input_bytes(drive,
185 buf + pc->b_count, count); 185 buf + pc->b_count, count);
186 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0); 186 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
187 local_irq_restore(flags); 187 local_irq_restore(flags);
188 } else { 188 } else {
189 buf = page_address(pc->sg->page) + pc->sg->offset; 189 buf = sg_virt(pc->sg);
190 drive->hwif->atapi_input_bytes(drive, 190 drive->hwif->atapi_input_bytes(drive,
191 buf + pc->b_count, count); 191 buf + pc->b_count, count);
192 } 192 }
@@ -212,18 +212,18 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
212 212
213 while (bcount) { 213 while (bcount) {
214 count = min(pc->sg->length - pc->b_count, bcount); 214 count = min(pc->sg->length - pc->b_count, bcount);
215 if (PageHighMem(pc->sg->page)) { 215 if (PageHighMem(sg_page(pc->sg))) {
216 unsigned long flags; 216 unsigned long flags;
217 217
218 local_irq_save(flags); 218 local_irq_save(flags);
219 buf = kmap_atomic(pc->sg->page, KM_IRQ0) + 219 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
220 pc->sg->offset; 220 pc->sg->offset;
221 drive->hwif->atapi_output_bytes(drive, 221 drive->hwif->atapi_output_bytes(drive,
222 buf + pc->b_count, count); 222 buf + pc->b_count, count);
223 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0); 223 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
224 local_irq_restore(flags); 224 local_irq_restore(flags);
225 } else { 225 } else {
226 buf = page_address(pc->sg->page) + pc->sg->offset; 226 buf = sg_virt(pc->sg);
227 drive->hwif->atapi_output_bytes(drive, 227 drive->hwif->atapi_output_bytes(drive,
228 buf + pc->b_count, count); 228 buf + pc->b_count, count);
229 } 229 }
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 74cdc1f0a78f..a3d0c6b14958 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -705,9 +705,7 @@ static int imm_completion(struct scsi_cmnd *cmd)
705 cmd->SCp.buffer++; 705 cmd->SCp.buffer++;
706 cmd->SCp.this_residual = 706 cmd->SCp.this_residual =
707 cmd->SCp.buffer->length; 707 cmd->SCp.buffer->length;
708 cmd->SCp.ptr = 708 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
709 page_address(cmd->SCp.buffer->page) +
710 cmd->SCp.buffer->offset;
711 709
712 /* 710 /*
713 * Make sure that we transfer even number of bytes 711 * Make sure that we transfer even number of bytes
@@ -844,9 +842,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
844 cmd->SCp.buffer = 842 cmd->SCp.buffer =
845 (struct scatterlist *) cmd->request_buffer; 843 (struct scatterlist *) cmd->request_buffer;
846 cmd->SCp.this_residual = cmd->SCp.buffer->length; 844 cmd->SCp.this_residual = cmd->SCp.buffer->length;
847 cmd->SCp.ptr = 845 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
848 page_address(cmd->SCp.buffer->page) +
849 cmd->SCp.buffer->offset;
850 } else { 846 } else {
851 /* else fill the only available buffer */ 847 /* else fill the only available buffer */
852 cmd->SCp.buffer = NULL; 848 cmd->SCp.buffer = NULL;
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index ab7cbf3449ce..c8b452f2878c 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -372,7 +372,7 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
372 if (cmd->use_sg) { 372 if (cmd->use_sg) {
373 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 373 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
374 cmd->SCp.buffers_residual = cmd->use_sg - 1; 374 cmd->SCp.buffers_residual = cmd->use_sg - 1;
375 cmd->SCp.ptr = (char *) page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset; 375 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
376 cmd->SCp.this_residual = cmd->SCp.buffer->length; 376 cmd->SCp.this_residual = cmd->SCp.buffer->length;
377 } else { 377 } else {
378 cmd->SCp.buffer = NULL; 378 cmd->SCp.buffer = NULL;
@@ -764,7 +764,7 @@ static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
764 ++cmd->SCp.buffer; 764 ++cmd->SCp.buffer;
765 --cmd->SCp.buffers_residual; 765 --cmd->SCp.buffers_residual;
766 cmd->SCp.this_residual = cmd->SCp.buffer->length; 766 cmd->SCp.this_residual = cmd->SCp.buffer->length;
767 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset; 767 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
768 } 768 }
769 769
770/* Set up hardware registers */ 770/* Set up hardware registers */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index c316a0bcae6c..439b97a6a269 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2872,6 +2872,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2872 } 2872 }
2873 2873
2874 scatterlist = sglist->scatterlist; 2874 scatterlist = sglist->scatterlist;
2875 sg_init_table(scatterlist, num_elem);
2875 2876
2876 sglist->order = order; 2877 sglist->order = order;
2877 sglist->num_sg = num_elem; 2878 sglist->num_sg = num_elem;
@@ -2884,12 +2885,12 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2884 2885
2885 /* Free up what we already allocated */ 2886 /* Free up what we already allocated */
2886 for (j = i - 1; j >= 0; j--) 2887 for (j = i - 1; j >= 0; j--)
2887 __free_pages(scatterlist[j].page, order); 2888 __free_pages(sg_page(&scatterlist[j]), order);
2888 kfree(sglist); 2889 kfree(sglist);
2889 return NULL; 2890 return NULL;
2890 } 2891 }
2891 2892
2892 scatterlist[i].page = page; 2893 sg_set_page(&scatterlist[i], page);
2893 } 2894 }
2894 2895
2895 return sglist; 2896 return sglist;
@@ -2910,7 +2911,7 @@ static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2910 int i; 2911 int i;
2911 2912
2912 for (i = 0; i < sglist->num_sg; i++) 2913 for (i = 0; i < sglist->num_sg; i++)
2913 __free_pages(sglist->scatterlist[i].page, sglist->order); 2914 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
2914 2915
2915 kfree(sglist); 2916 kfree(sglist);
2916} 2917}
@@ -2940,9 +2941,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2940 scatterlist = sglist->scatterlist; 2941 scatterlist = sglist->scatterlist;
2941 2942
2942 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 2943 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2943 kaddr = kmap(scatterlist[i].page); 2944 struct page *page = sg_page(&scatterlist[i]);
2945
2946 kaddr = kmap(page);
2944 memcpy(kaddr, buffer, bsize_elem); 2947 memcpy(kaddr, buffer, bsize_elem);
2945 kunmap(scatterlist[i].page); 2948 kunmap(page);
2946 2949
2947 scatterlist[i].length = bsize_elem; 2950 scatterlist[i].length = bsize_elem;
2948 2951
@@ -2953,9 +2956,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2953 } 2956 }
2954 2957
2955 if (len % bsize_elem) { 2958 if (len % bsize_elem) {
2956 kaddr = kmap(scatterlist[i].page); 2959 struct page *page = sg_page(&scatterlist[i]);
2960
2961 kaddr = kmap(page);
2957 memcpy(kaddr, buffer, len % bsize_elem); 2962 memcpy(kaddr, buffer, len % bsize_elem);
2958 kunmap(scatterlist[i].page); 2963 kunmap(page);
2959 2964
2960 scatterlist[i].length = len % bsize_elem; 2965 scatterlist[i].length = len % bsize_elem;
2961 } 2966 }
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index edaac2714c5a..5c5a9b2628fc 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1515,7 +1515,7 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
1515 /* kmap_atomic() ensures addressability of the user buffer.*/ 1515 /* kmap_atomic() ensures addressability of the user buffer.*/
1516 /* local_irq_save() protects the KM_IRQ0 address slot. */ 1516 /* local_irq_save() protects the KM_IRQ0 address slot. */
1517 local_irq_save(flags); 1517 local_irq_save(flags);
1518 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1518 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1519 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1519 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1520 buffer[2] == 'P' && buffer[3] == 'P') { 1520 buffer[2] == 'P' && buffer[3] == 'P') {
1521 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1521 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
@@ -3523,7 +3523,7 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3523 /* kmap_atomic() ensures addressability of the data buffer.*/ 3523 /* kmap_atomic() ensures addressability of the data buffer.*/
3524 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3524 /* local_irq_save() protects the KM_IRQ0 address slot. */
3525 local_irq_save(flags); 3525 local_irq_save(flags);
3526 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3526 buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
3527 memcpy(buffer, &cdata[xfer_cnt], min_cnt); 3527 memcpy(buffer, &cdata[xfer_cnt], min_cnt);
3528 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3528 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3529 local_irq_restore(flags); 3529 local_irq_restore(flags);
@@ -3556,7 +3556,7 @@ ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3556 /* kmap_atomic() ensures addressability of the data buffer.*/ 3556 /* kmap_atomic() ensures addressability of the data buffer.*/
3557 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3557 /* local_irq_save() protects the KM_IRQ0 address slot. */
3558 local_irq_save(flags); 3558 local_irq_save(flags);
3559 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3559 buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
3560 memcpy(&cdata[xfer_cnt], buffer, min_cnt); 3560 memcpy(&cdata[xfer_cnt], buffer, min_cnt);
3561 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3561 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3562 local_irq_restore(flags); 3562 local_irq_restore(flags);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index a21455d0274c..6ce4109efdf3 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -70,9 +70,7 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
70static inline void 70static inline void
71iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size) 71iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
72{ 72{
73 ibuf->sg.page = virt_to_page(vbuf); 73 sg_init_one(&ibuf->sg, vbuf, size);
74 ibuf->sg.offset = offset_in_page(vbuf);
75 ibuf->sg.length = size;
76 ibuf->sent = 0; 74 ibuf->sent = 0;
77 ibuf->use_sendmsg = 1; 75 ibuf->use_sendmsg = 1;
78} 76}
@@ -80,13 +78,14 @@ iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
80static inline void 78static inline void
81iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) 79iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
82{ 80{
83 ibuf->sg.page = sg->page; 81 sg_init_table(&ibuf->sg, 1);
82 sg_set_page(&ibuf->sg, sg_page(sg));
84 ibuf->sg.offset = sg->offset; 83 ibuf->sg.offset = sg->offset;
85 ibuf->sg.length = sg->length; 84 ibuf->sg.length = sg->length;
86 /* 85 /*
87 * Fastpath: sg element fits into single page 86 * Fastpath: sg element fits into single page
88 */ 87 */
89 if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page)) 88 if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg)))
90 ibuf->use_sendmsg = 0; 89 ibuf->use_sendmsg = 0;
91 else 90 else
92 ibuf->use_sendmsg = 1; 91 ibuf->use_sendmsg = 1;
@@ -716,7 +715,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
716 for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) { 715 for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
717 char *dest; 716 char *dest;
718 717
719 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 718 dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0);
720 rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset, 719 rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
721 sg[i].length, offset); 720 sg[i].length, offset);
722 kunmap_atomic(dest, KM_SOFTIRQ0); 721 kunmap_atomic(dest, KM_SOFTIRQ0);
@@ -1103,9 +1102,9 @@ iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
1103 * slab case. 1102 * slab case.
1104 */ 1103 */
1105 if (buf->use_sendmsg) 1104 if (buf->use_sendmsg)
1106 res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags); 1105 res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags);
1107 else 1106 else
1108 res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags); 1107 res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags);
1109 1108
1110 if (res >= 0) { 1109 if (res >= 0) {
1111 conn->txdata_octets += res; 1110 conn->txdata_octets += res;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 10d1aff9938a..66c652035730 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -658,7 +658,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
658 struct scatterlist *sg; 658 struct scatterlist *sg;
659 659
660 sg = scsi_sglist(cmd); 660 sg = scsi_sglist(cmd);
661 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 661 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
662 662
663 memset(buf, 0, cmd->cmnd[4]); 663 memset(buf, 0, cmd->cmnd[4]);
664 kunmap_atomic(buf - sg->offset, KM_IRQ0); 664 kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -1542,10 +1542,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1542 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1542 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1543 1543
1544 sgl = scsi_sglist(cmd); 1544 sgl = scsi_sglist(cmd);
1545 if( sgl->page ) { 1545 if( sg_page(sgl) ) {
1546 c = *(unsigned char *) 1546 c = *(unsigned char *) sg_virt(&sgl[0]);
1547 page_address((&sgl[0])->page) +
1548 (&sgl[0])->offset;
1549 } else { 1547 } else {
1550 printk(KERN_WARNING 1548 printk(KERN_WARNING
1551 "megaraid: invalid sg.\n"); 1549 "megaraid: invalid sg.\n");
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 78779209ac89..c8923108183a 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1584,10 +1584,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1584 caddr_t vaddr; 1584 caddr_t vaddr;
1585 1585
1586 sgl = scsi_sglist(scp); 1586 sgl = scsi_sglist(scp);
1587 if (sgl->page) { 1587 if (sg_page(sgl)) {
1588 vaddr = (caddr_t) 1588 vaddr = (caddr_t) sg_virt(&sgl[0]);
1589 (page_address((&sgl[0])->page)
1590 + (&sgl[0])->offset);
1591 1589
1592 memset(vaddr, 0, scp->cmnd[4]); 1590 memset(vaddr, 0, scp->cmnd[4]);
1593 } 1591 }
@@ -2328,10 +2326,8 @@ megaraid_mbox_dpc(unsigned long devp)
2328 && IS_RAID_CH(raid_dev, scb->dev_channel)) { 2326 && IS_RAID_CH(raid_dev, scb->dev_channel)) {
2329 2327
2330 sgl = scsi_sglist(scp); 2328 sgl = scsi_sglist(scp);
2331 if (sgl->page) { 2329 if (sg_page(sgl)) {
2332 c = *(unsigned char *) 2330 c = *(unsigned char *) sg_virt(&sgl[0]);
2333 (page_address((&sgl[0])->page) +
2334 (&sgl[0])->offset);
2335 } else { 2331 } else {
2336 con_log(CL_ANN, (KERN_WARNING 2332 con_log(CL_ANN, (KERN_WARNING
2337 "megaraid mailbox: invalid sg:%d\n", 2333 "megaraid mailbox: invalid sg:%d\n",
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index 26a6d55faf3e..8e5eadbd5c51 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -550,8 +550,7 @@ void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
550 550
551void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp) 551void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
552{ 552{
553 sp->SCp.ptr = page_address(sp->SCp.buffer->page)+ 553 sp->SCp.ptr = sg_virt(sp->SCp.buffer);
554 sp->SCp.buffer->offset;
555} 554}
556 555
557void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp) 556void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
@@ -564,8 +563,7 @@ void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
564 563
565void dma_advance_sg(Scsi_Cmnd *sp) 564void dma_advance_sg(Scsi_Cmnd *sp)
566{ 565{
567 sp->SCp.ptr = page_address(sp->SCp.buffer->page)+ 566 sp->SCp.ptr = sg_virt(sp->SCp.buffer);
568 sp->SCp.buffer->offset;
569} 567}
570 568
571 569
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 331b789937c4..1c5c4b68f20f 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -542,7 +542,7 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q
542 if (STp->raw) { 542 if (STp->raw) {
543 if (STp->buffer->syscall_result) { 543 if (STp->buffer->syscall_result) {
544 for (i=0; i < STp->buffer->sg_segs; i++) 544 for (i=0; i < STp->buffer->sg_segs; i++)
545 memset(page_address(STp->buffer->sg[i].page), 545 memset(page_address(sg_page(&STp->buffer->sg[i])),
546 0, STp->buffer->sg[i].length); 546 0, STp->buffer->sg[i].length);
547 strcpy(STp->buffer->b_data, "READ ERROR ON FRAME"); 547 strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
548 } else 548 } else
@@ -4437,7 +4437,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
4437 for (i = 0, b_size = 0; 4437 for (i = 0, b_size = 0;
4438 (i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE); 4438 (i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE);
4439 b_size += STp->buffer->sg[i++].length); 4439 b_size += STp->buffer->sg[i++].length);
4440 STp->buffer->aux = (os_aux_t *) (page_address(STp->buffer->sg[i].page) + OS_DATA_SIZE - b_size); 4440 STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
4441#if DEBUG 4441#if DEBUG
4442 printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name, 4442 printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
4443 STp->buffer->b_data, page_address(STp->buffer->sg[0].page)); 4443 STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
@@ -5252,25 +5252,26 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
5252 /* Try to allocate the first segment up to OS_DATA_SIZE and the others 5252 /* Try to allocate the first segment up to OS_DATA_SIZE and the others
5253 big enough to reach the goal (code assumes no segments in place) */ 5253 big enough to reach the goal (code assumes no segments in place) */
5254 for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) { 5254 for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
5255 STbuffer->sg[0].page = alloc_pages(priority, order); 5255 struct page *page = alloc_pages(priority, order);
5256
5256 STbuffer->sg[0].offset = 0; 5257 STbuffer->sg[0].offset = 0;
5257 if (STbuffer->sg[0].page != NULL) { 5258 if (page != NULL) {
5259 sg_set_page(&STbuffer->sg[0], page);
5258 STbuffer->sg[0].length = b_size; 5260 STbuffer->sg[0].length = b_size;
5259 STbuffer->b_data = page_address(STbuffer->sg[0].page); 5261 STbuffer->b_data = page_address(page);
5260 break; 5262 break;
5261 } 5263 }
5262 } 5264 }
5263 if (STbuffer->sg[0].page == NULL) { 5265 if (sg_page(&STbuffer->sg[0]) == NULL) {
5264 printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n"); 5266 printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
5265 return 0; 5267 return 0;
5266 } 5268 }
5267 /* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */ 5269 /* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
5268 for (segs=STbuffer->sg_segs=1, got=b_size; 5270 for (segs=STbuffer->sg_segs=1, got=b_size;
5269 segs < max_segs && got < OS_FRAME_SIZE; ) { 5271 segs < max_segs && got < OS_FRAME_SIZE; ) {
5270 STbuffer->sg[segs].page = 5272 struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
5271 alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
5272 STbuffer->sg[segs].offset = 0; 5273 STbuffer->sg[segs].offset = 0;
5273 if (STbuffer->sg[segs].page == NULL) { 5274 if (page == NULL) {
5274 if (OS_FRAME_SIZE - got <= (max_segs - segs) * b_size / 2 && order) { 5275 if (OS_FRAME_SIZE - got <= (max_segs - segs) * b_size / 2 && order) {
5275 b_size /= 2; /* Large enough for the rest of the buffers */ 5276 b_size /= 2; /* Large enough for the rest of the buffers */
5276 order--; 5277 order--;
@@ -5284,6 +5285,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
5284 normalize_buffer(STbuffer); 5285 normalize_buffer(STbuffer);
5285 return 0; 5286 return 0;
5286 } 5287 }
5288 sg_set_page(&STbuffer->sg[segs], page);
5287 STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size; 5289 STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size;
5288 got += STbuffer->sg[segs].length; 5290 got += STbuffer->sg[segs].length;
5289 STbuffer->buffer_size = got; 5291 STbuffer->buffer_size = got;
@@ -5316,7 +5318,7 @@ static void normalize_buffer(struct osst_buffer *STbuffer)
5316 b_size < STbuffer->sg[i].length; 5318 b_size < STbuffer->sg[i].length;
5317 b_size *= 2, order++); 5319 b_size *= 2, order++);
5318 5320
5319 __free_pages(STbuffer->sg[i].page, order); 5321 __free_pages(sg_page(&STbuffer->sg[i]), order);
5320 STbuffer->buffer_size -= STbuffer->sg[i].length; 5322 STbuffer->buffer_size -= STbuffer->sg[i].length;
5321 } 5323 }
5322#if DEBUG 5324#if DEBUG
@@ -5344,7 +5346,7 @@ static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, i
5344 for ( ; i < st_bp->sg_segs && do_count > 0; i++) { 5346 for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
5345 cnt = st_bp->sg[i].length - offset < do_count ? 5347 cnt = st_bp->sg[i].length - offset < do_count ?
5346 st_bp->sg[i].length - offset : do_count; 5348 st_bp->sg[i].length - offset : do_count;
5347 res = copy_from_user(page_address(st_bp->sg[i].page) + offset, ubp, cnt); 5349 res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
5348 if (res) 5350 if (res)
5349 return (-EFAULT); 5351 return (-EFAULT);
5350 do_count -= cnt; 5352 do_count -= cnt;
@@ -5377,7 +5379,7 @@ static int from_buffer(struct osst_buffer *st_bp, char __user *ubp, int do_count
5377 for ( ; i < st_bp->sg_segs && do_count > 0; i++) { 5379 for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
5378 cnt = st_bp->sg[i].length - offset < do_count ? 5380 cnt = st_bp->sg[i].length - offset < do_count ?
5379 st_bp->sg[i].length - offset : do_count; 5381 st_bp->sg[i].length - offset : do_count;
5380 res = copy_to_user(ubp, page_address(st_bp->sg[i].page) + offset, cnt); 5382 res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
5381 if (res) 5383 if (res)
5382 return (-EFAULT); 5384 return (-EFAULT);
5383 do_count -= cnt; 5385 do_count -= cnt;
@@ -5410,7 +5412,7 @@ static int osst_zero_buffer_tail(struct osst_buffer *st_bp)
5410 i < st_bp->sg_segs && do_count > 0; i++) { 5412 i < st_bp->sg_segs && do_count > 0; i++) {
5411 cnt = st_bp->sg[i].length - offset < do_count ? 5413 cnt = st_bp->sg[i].length - offset < do_count ?
5412 st_bp->sg[i].length - offset : do_count ; 5414 st_bp->sg[i].length - offset : do_count ;
5413 memset(page_address(st_bp->sg[i].page) + offset, 0, cnt); 5415 memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
5414 do_count -= cnt; 5416 do_count -= cnt;
5415 offset = 0; 5417 offset = 0;
5416 } 5418 }
@@ -5430,7 +5432,7 @@ static int osst_copy_to_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
5430 for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) { 5432 for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
5431 cnt = st_bp->sg[i].length < do_count ? 5433 cnt = st_bp->sg[i].length < do_count ?
5432 st_bp->sg[i].length : do_count ; 5434 st_bp->sg[i].length : do_count ;
5433 memcpy(page_address(st_bp->sg[i].page), ptr, cnt); 5435 memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
5434 do_count -= cnt; 5436 do_count -= cnt;
5435 ptr += cnt; 5437 ptr += cnt;
5436 } 5438 }
@@ -5451,7 +5453,7 @@ static int osst_copy_from_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
5451 for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) { 5453 for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
5452 cnt = st_bp->sg[i].length < do_count ? 5454 cnt = st_bp->sg[i].length < do_count ?
5453 st_bp->sg[i].length : do_count ; 5455 st_bp->sg[i].length : do_count ;
5454 memcpy(ptr, page_address(st_bp->sg[i].page), cnt); 5456 memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
5455 do_count -= cnt; 5457 do_count -= cnt;
5456 ptr += cnt; 5458 ptr += cnt;
5457 } 5459 }
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 98397559c53b..7db28cd49446 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -393,7 +393,7 @@ enum _burst_mode {
393#define MSG_EXT_SDTR 0x01 393#define MSG_EXT_SDTR 0x01
394 394
395/* scatter-gather table */ 395/* scatter-gather table */
396# define BUFFER_ADDR ((char *)((unsigned int)(SCpnt->SCp.buffer->page) + SCpnt->SCp.buffer->offset)) 396# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
397 397
398#endif /*__nsp_cs__*/ 398#endif /*__nsp_cs__*/
399/* end */ 399/* end */
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 190e2a7d7067..969b9387a0c3 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -443,8 +443,7 @@ SYM53C500_intr(int irq, void *dev_id)
443 443
444 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) { 444 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
445 SYM53C500_pio_write(fast_pio, port_base, 445 SYM53C500_pio_write(fast_pio, port_base,
446 page_address(sg->page) + sg->offset, 446 sg_virt(sg), sg->length);
447 sg->length);
448 } 447 }
449 REG0(port_base); 448 REG0(port_base);
450 } 449 }
@@ -463,8 +462,7 @@ SYM53C500_intr(int irq, void *dev_id)
463 462
464 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) { 463 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
465 SYM53C500_pio_read(fast_pio, port_base, 464 SYM53C500_pio_read(fast_pio, port_base,
466 page_address(sg->page) + sg->offset, 465 sg_virt(sg), sg->length);
467 sg->length);
468 } 466 }
469 REG0(port_base); 467 REG0(port_base);
470 } 468 }
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 67b6d76a6c8d..67ee51a3d7e1 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -608,9 +608,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
608 cmd->SCp.buffer++; 608 cmd->SCp.buffer++;
609 cmd->SCp.this_residual = 609 cmd->SCp.this_residual =
610 cmd->SCp.buffer->length; 610 cmd->SCp.buffer->length;
611 cmd->SCp.ptr = 611 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
612 page_address(cmd->SCp.buffer->page) +
613 cmd->SCp.buffer->offset;
614 } 612 }
615 } 613 }
616 /* Now check to see if the drive is ready to comunicate */ 614 /* Now check to see if the drive is ready to comunicate */
@@ -756,8 +754,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
756 /* if many buffers are available, start filling the first */ 754 /* if many buffers are available, start filling the first */
757 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 755 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
758 cmd->SCp.this_residual = cmd->SCp.buffer->length; 756 cmd->SCp.this_residual = cmd->SCp.buffer->length;
759 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 757 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
760 cmd->SCp.buffer->offset;
761 } else { 758 } else {
762 /* else fill the only available buffer */ 759 /* else fill the only available buffer */
763 cmd->SCp.buffer = NULL; 760 cmd->SCp.buffer = NULL;
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 0f43d1d046d9..17b4a7c4618c 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -111,14 +111,14 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
111 req_len = act_len = 0; 111 req_len = act_len = 0;
112 scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) { 112 scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
113 if (active) { 113 if (active) {
114 kaddr = kmap_atomic(sgpnt->page, KM_IRQ0); 114 kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
115 len = sgpnt->length; 115 len = sgpnt->length;
116 if ((req_len + len) > buflen) { 116 if ((req_len + len) > buflen) {
117 active = 0; 117 active = 0;
118 len = buflen - req_len; 118 len = buflen - req_len;
119 } 119 }
120 memcpy(kaddr + sgpnt->offset, buf + req_len, len); 120 memcpy(kaddr + sgpnt->offset, buf + req_len, len);
121 flush_kernel_dcache_page(sgpnt->page); 121 flush_kernel_dcache_page(sg_page(sgpnt));
122 kunmap_atomic(kaddr, KM_IRQ0); 122 kunmap_atomic(kaddr, KM_IRQ0);
123 act_len += len; 123 act_len += len;
124 } 124 }
@@ -147,7 +147,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
147 147
148 req_len = fin = 0; 148 req_len = fin = 0;
149 scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) { 149 scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
150 kaddr = kmap_atomic(sgpnt->page, KM_IRQ0); 150 kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
151 len = sgpnt->length; 151 len = sgpnt->length;
152 if ((req_len + len) > buflen) { 152 if ((req_len + len) > buflen) {
153 len = buflen - req_len; 153 len = buflen - req_len;
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 2bfbf26c00ed..de7b3bc2cbc9 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -317,7 +317,7 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
317 return ((priv->qabort == 1 ? 317 return ((priv->qabort == 1 ?
318 DID_ABORT : DID_RESET) << 16); 318 DID_ABORT : DID_RESET) << 16);
319 } 319 }
320 buf = page_address(sg->page) + sg->offset; 320 buf = sg_virt(sg);
321 if (ql_pdma(priv, phase, buf, sg->length)) 321 if (ql_pdma(priv, phase, buf, sg->length))
322 break; 322 break;
323 } 323 }
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 72ee4c9cfb1a..46cae5a212de 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -625,7 +625,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
625 scsi_for_each_sg(scp, sg, scp->use_sg, k) { 625 scsi_for_each_sg(scp, sg, scp->use_sg, k) {
626 if (active) { 626 if (active) {
627 kaddr = (unsigned char *) 627 kaddr = (unsigned char *)
628 kmap_atomic(sg->page, KM_USER0); 628 kmap_atomic(sg_page(sg), KM_USER0);
629 if (NULL == kaddr) 629 if (NULL == kaddr)
630 return (DID_ERROR << 16); 630 return (DID_ERROR << 16);
631 kaddr_off = (unsigned char *)kaddr + sg->offset; 631 kaddr_off = (unsigned char *)kaddr + sg->offset;
@@ -672,7 +672,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
672 sg = scsi_sglist(scp); 672 sg = scsi_sglist(scp);
673 req_len = fin = 0; 673 req_len = fin = 0;
674 for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) { 674 for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
675 kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0); 675 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
676 if (NULL == kaddr) 676 if (NULL == kaddr)
677 return -1; 677 return -1;
678 kaddr_off = (unsigned char *)kaddr + sg->offset; 678 kaddr_off = (unsigned char *)kaddr + sg->offset;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index aac8a02cbe80..61fdaf02f251 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -295,7 +295,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
295 int i, err, nr_vecs = 0; 295 int i, err, nr_vecs = 0;
296 296
297 for_each_sg(sgl, sg, nsegs, i) { 297 for_each_sg(sgl, sg, nsegs, i) {
298 page = sg->page; 298 page = sg_page(sg);
299 off = sg->offset; 299 off = sg->offset;
300 len = sg->length; 300 len = sg->length;
301 data_len += len; 301 data_len += len;
@@ -764,7 +764,7 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
764 if (unlikely(!sgl)) 764 if (unlikely(!sgl))
765 goto enomem; 765 goto enomem;
766 766
767 memset(sgl, 0, sizeof(*sgl) * sgp->size); 767 sg_init_table(sgl, sgp->size);
768 768
769 /* 769 /*
770 * first loop through, set initial index and return value 770 * first loop through, set initial index and return value
@@ -781,6 +781,13 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
781 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); 781 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
782 782
783 /* 783 /*
784 * if we have nothing left, mark the last segment as
785 * end-of-list
786 */
787 if (!left)
788 sg_mark_end(sgl, this);
789
790 /*
784 * don't allow subsequent mempool allocs to sleep, it would 791 * don't allow subsequent mempool allocs to sleep, it would
785 * violate the mempool principle. 792 * violate the mempool principle.
786 */ 793 */
@@ -2353,7 +2360,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2353 *offset = *offset - len_complete + sg->offset; 2360 *offset = *offset - len_complete + sg->offset;
2354 2361
2355 /* Assumption: contiguous pages can be accessed as "page + i" */ 2362 /* Assumption: contiguous pages can be accessed as "page + i" */
2356 page = nth_page(sg->page, (*offset >> PAGE_SHIFT)); 2363 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2357 *offset &= ~PAGE_MASK; 2364 *offset &= ~PAGE_MASK;
2358 2365
2359 /* Bytes in this sg-entry from *offset to the end of the page */ 2366 /* Bytes in this sg-entry from *offset to the end of the page */
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index ce80fa9ad815..b11324479b5b 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -999,14 +999,14 @@ connect_loop:
999 for (i = 0; i < nobuffs; ++i) 999 for (i = 0; i < nobuffs; ++i)
1000 printk("scsi%d : buffer %d address = %p length = %d\n", 1000 printk("scsi%d : buffer %d address = %p length = %d\n",
1001 hostno, i, 1001 hostno, i,
1002 page_address(buffer[i].page) + buffer[i].offset, 1002 sg_virt(&buffer[i]),
1003 buffer[i].length); 1003 buffer[i].length);
1004 } 1004 }
1005#endif 1005#endif
1006 1006
1007 buffer = (struct scatterlist *) SCint->request_buffer; 1007 buffer = (struct scatterlist *) SCint->request_buffer;
1008 len = buffer->length; 1008 len = buffer->length;
1009 data = page_address(buffer->page) + buffer->offset; 1009 data = sg_virt(buffer);
1010 } else { 1010 } else {
1011 DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno); 1011 DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno);
1012 buffer = NULL; 1012 buffer = NULL;
@@ -1239,7 +1239,7 @@ connect_loop:
1239 --nobuffs; 1239 --nobuffs;
1240 ++buffer; 1240 ++buffer;
1241 len = buffer->length; 1241 len = buffer->length;
1242 data = page_address(buffer->page) + buffer->offset; 1242 data = sg_virt(buffer);
1243 DPRINTK (DEBUG_SG, 1243 DPRINTK (DEBUG_SG,
1244 "scsi%d : next scatter-gather buffer len = %d address = %08x\n", 1244 "scsi%d : next scatter-gather buffer len = %d address = %08x\n",
1245 hostno, len, data); 1245 hostno, len, data);
@@ -1396,7 +1396,7 @@ connect_loop:
1396 --nobuffs; 1396 --nobuffs;
1397 ++buffer; 1397 ++buffer;
1398 len = buffer->length; 1398 len = buffer->length;
1399 data = page_address(buffer->page) + buffer->offset; 1399 data = sg_virt(buffer);
1400 DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data); 1400 DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data);
1401 } 1401 }
1402 break; 1402 break;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 7238b2dfc497..cc1971002846 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1169,7 +1169,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1169 len = vma->vm_end - sa; 1169 len = vma->vm_end - sa;
1170 len = (len < sg->length) ? len : sg->length; 1170 len = (len < sg->length) ? len : sg->length;
1171 if (offset < len) { 1171 if (offset < len) {
1172 page = virt_to_page(page_address(sg->page) + offset); 1172 page = virt_to_page(page_address(sg_page(sg)) + offset);
1173 get_page(page); /* increment page count */ 1173 get_page(page); /* increment page count */
1174 break; 1174 break;
1175 } 1175 }
@@ -1717,13 +1717,13 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1717 goto out_unlock; */ 1717 goto out_unlock; */
1718 } 1718 }
1719 1719
1720 sgl[0].page = pages[0]; 1720 sg_set_page(sgl, pages[0]);
1721 sgl[0].offset = uaddr & ~PAGE_MASK; 1721 sgl[0].offset = uaddr & ~PAGE_MASK;
1722 if (nr_pages > 1) { 1722 if (nr_pages > 1) {
1723 sgl[0].length = PAGE_SIZE - sgl[0].offset; 1723 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1724 count -= sgl[0].length; 1724 count -= sgl[0].length;
1725 for (i=1; i < nr_pages ; i++) { 1725 for (i=1; i < nr_pages ; i++) {
1726 sgl[i].page = pages[i]; 1726 sg_set_page(&sgl[i], pages[i]);
1727 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; 1727 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1728 count -= PAGE_SIZE; 1728 count -= PAGE_SIZE;
1729 } 1729 }
@@ -1754,7 +1754,7 @@ st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1754 int i; 1754 int i;
1755 1755
1756 for (i=0; i < nr_pages; i++) { 1756 for (i=0; i < nr_pages; i++) {
1757 struct page *page = sgl[i].page; 1757 struct page *page = sg_page(&sgl[i]);
1758 1758
1759 if (dirtied) 1759 if (dirtied)
1760 SetPageDirty(page); 1760 SetPageDirty(page);
@@ -1854,7 +1854,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1854 scatter_elem_sz_prev = ret_sz; 1854 scatter_elem_sz_prev = ret_sz;
1855 } 1855 }
1856 } 1856 }
1857 sg->page = p; 1857 sg_set_page(sg, p);
1858 sg->length = (ret_sz > num) ? num : ret_sz; 1858 sg->length = (ret_sz > num) ? num : ret_sz;
1859 1859
1860 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1860 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
@@ -1907,14 +1907,14 @@ sg_write_xfer(Sg_request * srp)
1907 onum = 1; 1907 onum = 1;
1908 1908
1909 ksglen = sg->length; 1909 ksglen = sg->length;
1910 p = page_address(sg->page); 1910 p = page_address(sg_page(sg));
1911 for (j = 0, k = 0; j < onum; ++j) { 1911 for (j = 0, k = 0; j < onum; ++j) {
1912 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); 1912 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1913 if (res) 1913 if (res)
1914 return res; 1914 return res;
1915 1915
1916 for (; p; sg = sg_next(sg), ksglen = sg->length, 1916 for (; p; sg = sg_next(sg), ksglen = sg->length,
1917 p = page_address(sg->page)) { 1917 p = page_address(sg_page(sg))) {
1918 if (usglen <= 0) 1918 if (usglen <= 0)
1919 break; 1919 break;
1920 if (ksglen > usglen) { 1920 if (ksglen > usglen) {
@@ -1991,12 +1991,12 @@ sg_remove_scat(Sg_scatter_hold * schp)
1991 } else { 1991 } else {
1992 int k; 1992 int k;
1993 1993
1994 for (k = 0; (k < schp->k_use_sg) && sg->page; 1994 for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
1995 ++k, sg = sg_next(sg)) { 1995 ++k, sg = sg_next(sg)) {
1996 SCSI_LOG_TIMEOUT(5, printk( 1996 SCSI_LOG_TIMEOUT(5, printk(
1997 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1997 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
1998 k, sg->page, sg->length)); 1998 k, sg_page(sg), sg->length));
1999 sg_page_free(sg->page, sg->length); 1999 sg_page_free(sg_page(sg), sg->length);
2000 } 2000 }
2001 } 2001 }
2002 kfree(schp->buffer); 2002 kfree(schp->buffer);
@@ -2038,7 +2038,7 @@ sg_read_xfer(Sg_request * srp)
2038 } else 2038 } else
2039 onum = 1; 2039 onum = 1;
2040 2040
2041 p = page_address(sg->page); 2041 p = page_address(sg_page(sg));
2042 ksglen = sg->length; 2042 ksglen = sg->length;
2043 for (j = 0, k = 0; j < onum; ++j) { 2043 for (j = 0, k = 0; j < onum; ++j) {
2044 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); 2044 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
@@ -2046,7 +2046,7 @@ sg_read_xfer(Sg_request * srp)
2046 return res; 2046 return res;
2047 2047
2048 for (; p; sg = sg_next(sg), ksglen = sg->length, 2048 for (; p; sg = sg_next(sg), ksglen = sg->length,
2049 p = page_address(sg->page)) { 2049 p = page_address(sg_page(sg))) {
2050 if (usglen <= 0) 2050 if (usglen <= 0)
2051 break; 2051 break;
2052 if (ksglen > usglen) { 2052 if (ksglen > usglen) {
@@ -2092,15 +2092,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2092 if ((!outp) || (num_read_xfer <= 0)) 2092 if ((!outp) || (num_read_xfer <= 0))
2093 return 0; 2093 return 0;
2094 2094
2095 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) { 2095 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
2096 num = sg->length; 2096 num = sg->length;
2097 if (num > num_read_xfer) { 2097 if (num > num_read_xfer) {
2098 if (__copy_to_user(outp, page_address(sg->page), 2098 if (__copy_to_user(outp, page_address(sg_page(sg)),
2099 num_read_xfer)) 2099 num_read_xfer))
2100 return -EFAULT; 2100 return -EFAULT;
2101 break; 2101 break;
2102 } else { 2102 } else {
2103 if (__copy_to_user(outp, page_address(sg->page), 2103 if (__copy_to_user(outp, page_address(sg_page(sg)),
2104 num)) 2104 num))
2105 return -EFAULT; 2105 return -EFAULT;
2106 num_read_xfer -= num; 2106 num_read_xfer -= num;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 73c44cbdea47..ce69b9efc102 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3797,7 +3797,7 @@ static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
3797 sg = &(STbp->sg[0]); 3797 sg = &(STbp->sg[0]);
3798 frp = STbp->frp; 3798 frp = STbp->frp;
3799 for (i=count=0; count < length; i++) { 3799 for (i=count=0; count < length; i++) {
3800 sg[i].page = frp[i].page; 3800 sg_set_page(&sg[i], frp[i].page);
3801 if (length - count > frp[i].length) 3801 if (length - count > frp[i].length)
3802 sg[i].length = frp[i].length; 3802 sg[i].length = frp[i].length;
3803 else 3803 else
@@ -4446,14 +4446,14 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
4446 } 4446 }
4447 4447
4448 /* Populate the scatter/gather list */ 4448 /* Populate the scatter/gather list */
4449 sgl[0].page = pages[0]; 4449 sg_set_page(&sgl[0], pages[0]);
4450 sgl[0].offset = uaddr & ~PAGE_MASK; 4450 sgl[0].offset = uaddr & ~PAGE_MASK;
4451 if (nr_pages > 1) { 4451 if (nr_pages > 1) {
4452 sgl[0].length = PAGE_SIZE - sgl[0].offset; 4452 sgl[0].length = PAGE_SIZE - sgl[0].offset;
4453 count -= sgl[0].length; 4453 count -= sgl[0].length;
4454 for (i=1; i < nr_pages ; i++) { 4454 for (i=1; i < nr_pages ; i++) {
4455 sg_set_page(&sgl[i], pages[i]);;
4455 sgl[i].offset = 0; 4456 sgl[i].offset = 0;
4456 sgl[i].page = pages[i];
4457 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; 4457 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
4458 count -= PAGE_SIZE; 4458 count -= PAGE_SIZE;
4459 } 4459 }
@@ -4483,7 +4483,7 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
4483 int i; 4483 int i;
4484 4484
4485 for (i=0; i < nr_pages; i++) { 4485 for (i=0; i < nr_pages; i++) {
4486 struct page *page = sgl[i].page; 4486 struct page *page = sg_page(&sgl[i]);
4487 4487
4488 if (dirtied) 4488 if (dirtied)
4489 SetPageDirty(page); 4489 SetPageDirty(page);
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 4aafe89b557f..2dcde373b20e 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -272,8 +272,7 @@ static struct scsi_host_template *the_template = NULL;
272#define HOSTNO instance->host_no 272#define HOSTNO instance->host_no
273#define H_NO(cmd) (cmd)->device->host->host_no 273#define H_NO(cmd) (cmd)->device->host->host_no
274 274
275#define SGADDR(buffer) (void *)(((unsigned long)page_address((buffer)->page)) + \ 275#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer)))))
276 (buffer)->offset)
277 276
278#ifdef SUPPORT_TAGS 277#ifdef SUPPORT_TAGS
279 278
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 8befab7e9839..90cee94d9522 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -196,7 +196,7 @@ static unsigned int sym53c416_base_3[2] = {0,0};
196 196
197#define MAXHOSTS 4 197#define MAXHOSTS 4
198 198
199#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset)) 199#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
200 200
201enum phases 201enum phases
202{ 202{
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 5c72ca31a47a..44193049c4ae 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -430,10 +430,7 @@ static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_
430 430
431static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length) 431static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length)
432{ 432{
433 memset(sg, 0, sizeof(struct scatterlist)); 433 sg_init_one(sg, addr, length);
434 sg->page = virt_to_page(addr);
435 sg->length = length;
436 sg->offset = (unsigned long)addr & ~PAGE_MASK;
437 return sg; 434 return sg;
438} 435}
439 436
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index ea72bbeb8f9d..6d1f0edd7985 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -681,7 +681,7 @@ static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
681 681
682 max = scsi_sg_count(SCpnt); 682 max = scsi_sg_count(SCpnt);
683 scsi_for_each_sg(SCpnt, sg, max, i) { 683 scsi_for_each_sg(SCpnt, sg, max, i) {
684 mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset; 684 mscp->sglist[i].address = isa_page_to_bus(sg_page(sg)) + sg->offset;
685 mscp->sglist[i].num_bytes = sg->length; 685 mscp->sglist[i].num_bytes = sg->length;
686 transfer_length += sg->length; 686 transfer_length += sg->length;
687 } 687 }
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 0e8e642fd3b0..fdbb92d1f722 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -410,8 +410,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
410 if (cmd->use_sg) { 410 if (cmd->use_sg) {
411 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 411 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
412 cmd->SCp.buffers_residual = cmd->use_sg - 1; 412 cmd->SCp.buffers_residual = cmd->use_sg - 1;
413 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 413 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
414 cmd->SCp.buffer->offset;
415 cmd->SCp.this_residual = cmd->SCp.buffer->length; 414 cmd->SCp.this_residual = cmd->SCp.buffer->length;
416 } else { 415 } else {
417 cmd->SCp.buffer = NULL; 416 cmd->SCp.buffer = NULL;
@@ -745,8 +744,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
745 ++cmd->SCp.buffer; 744 ++cmd->SCp.buffer;
746 --cmd->SCp.buffers_residual; 745 --cmd->SCp.buffers_residual;
747 cmd->SCp.this_residual = cmd->SCp.buffer->length; 746 cmd->SCp.this_residual = cmd->SCp.buffer->length;
748 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 747 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
749 cmd->SCp.buffer->offset;
750 } 748 }
751 if (!cmd->SCp.this_residual) /* avoid bogus setups */ 749 if (!cmd->SCp.this_residual) /* avoid bogus setups */
752 return; 750 return;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 255c611e78b8..03cd44f231df 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1123,7 +1123,7 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1123 any2scsi(scb->maxlen, nseg * sizeof(Sgb)); 1123 any2scsi(scb->maxlen, nseg * sizeof(Sgb));
1124 1124
1125 scsi_for_each_sg(SCpnt, sg, nseg, i) { 1125 scsi_for_each_sg(SCpnt, sg, nseg, i) {
1126 any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset); 1126 any2scsi(sgb[i].ptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
1127 any2scsi(sgb[i].len, sg->length); 1127 any2scsi(sgb[i].len, sg->length);
1128 } 1128 }
1129 } else { 1129 } else {
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
new file mode 100644
index 000000000000..a7d4360ea7df
--- /dev/null
+++ b/drivers/serial/mcf.c
@@ -0,0 +1,653 @@
1/****************************************************************************/
2
3/*
4 * mcf.c -- Freescale ColdFire UART driver
5 *
6 * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14/****************************************************************************/
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/module.h>
20#include <linux/console.h>
21#include <linux/tty.h>
22#include <linux/tty_flip.h>
23#include <linux/serial.h>
24#include <linux/serial_core.h>
25#include <linux/io.h>
26#include <asm/coldfire.h>
27#include <asm/mcfsim.h>
28#include <asm/mcfuart.h>
29#include <asm/nettel.h>
30
31/****************************************************************************/
32
33/*
34 * Some boards implement the DTR/DCD lines using GPIO lines, most
35 * don't. Dummy out the access macros for those that don't. Those
36 * that do should define these macros somewhere in there board
37 * specific inlude files.
38 */
39#if !defined(mcf_getppdcd)
40#define mcf_getppdcd(p) (1)
41#endif
42#if !defined(mcf_getppdtr)
43#define mcf_getppdtr(p) (1)
44#endif
45#if !defined(mcf_setppdtr)
46#define mcf_setppdtr(p, v) do { } while (0)
47#endif
48
49/****************************************************************************/
50
51/*
52 * Local per-uart structure.
53 */
54struct mcf_uart {
55 struct uart_port port;
56 unsigned int sigs; /* Local copy of line sigs */
57 unsigned char imr; /* Local IMR mirror */
58};
59
60/****************************************************************************/
61
62static unsigned int mcf_tx_empty(struct uart_port *port)
63{
64 return (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXEMPTY) ?
65 TIOCSER_TEMT : 0;
66}
67
68/****************************************************************************/
69
70static unsigned int mcf_get_mctrl(struct uart_port *port)
71{
72 struct mcf_uart *pp = (struct mcf_uart *) port;
73 unsigned long flags;
74 unsigned int sigs;
75
76 spin_lock_irqsave(&port->lock, flags);
77 sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ?
78 0 : TIOCM_CTS;
79 sigs |= (pp->sigs & TIOCM_RTS);
80 sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0);
81 sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0);
82 spin_unlock_irqrestore(&port->lock, flags);
83 return sigs;
84}
85
86/****************************************************************************/
87
88static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
89{
90 struct mcf_uart *pp = (struct mcf_uart *) port;
91 unsigned long flags;
92
93 spin_lock_irqsave(&port->lock, flags);
94 pp->sigs = sigs;
95 mcf_setppdtr(port->line, (sigs & TIOCM_DTR));
96 if (sigs & TIOCM_RTS)
97 writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
98 else
99 writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0);
100 spin_unlock_irqrestore(&port->lock, flags);
101}
102
103/****************************************************************************/
104
105static void mcf_start_tx(struct uart_port *port)
106{
107 struct mcf_uart *pp = (struct mcf_uart *) port;
108 unsigned long flags;
109
110 spin_lock_irqsave(&port->lock, flags);
111 pp->imr |= MCFUART_UIR_TXREADY;
112 writeb(pp->imr, port->membase + MCFUART_UIMR);
113 spin_unlock_irqrestore(&port->lock, flags);
114}
115
116/****************************************************************************/
117
118static void mcf_stop_tx(struct uart_port *port)
119{
120 struct mcf_uart *pp = (struct mcf_uart *) port;
121 unsigned long flags;
122
123 spin_lock_irqsave(&port->lock, flags);
124 pp->imr &= ~MCFUART_UIR_TXREADY;
125 writeb(pp->imr, port->membase + MCFUART_UIMR);
126 spin_unlock_irqrestore(&port->lock, flags);
127}
128
129/****************************************************************************/
130
131static void mcf_stop_rx(struct uart_port *port)
132{
133 struct mcf_uart *pp = (struct mcf_uart *) port;
134 unsigned long flags;
135
136 spin_lock_irqsave(&port->lock, flags);
137 pp->imr &= ~MCFUART_UIR_RXREADY;
138 writeb(pp->imr, port->membase + MCFUART_UIMR);
139 spin_unlock_irqrestore(&port->lock, flags);
140}
141
142/****************************************************************************/
143
144static void mcf_break_ctl(struct uart_port *port, int break_state)
145{
146 unsigned long flags;
147
148 spin_lock_irqsave(&port->lock, flags);
149 if (break_state == -1)
150 writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
151 else
152 writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
153 spin_unlock_irqrestore(&port->lock, flags);
154}
155
156/****************************************************************************/
157
158static void mcf_enable_ms(struct uart_port *port)
159{
160}
161
162/****************************************************************************/
163
164static int mcf_startup(struct uart_port *port)
165{
166 struct mcf_uart *pp = (struct mcf_uart *) port;
167 unsigned long flags;
168
169 spin_lock_irqsave(&port->lock, flags);
170
171 /* Reset UART, get it into known state... */
172 writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
173 writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
174
175 /* Enable the UART transmitter and receiver */
176 writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
177 port->membase + MCFUART_UCR);
178
179 /* Enable RX interrupts now */
180 pp->imr = MCFUART_UIR_RXREADY;
181 writeb(pp->imr, port->membase + MCFUART_UIMR);
182
183 spin_unlock_irqrestore(&port->lock, flags);
184
185 return 0;
186}
187
188/****************************************************************************/
189
190static void mcf_shutdown(struct uart_port *port)
191{
192 struct mcf_uart *pp = (struct mcf_uart *) port;
193 unsigned long flags;
194
195 spin_lock_irqsave(&port->lock, flags);
196
197 /* Disable all interrupts now */
198 pp->imr = 0;
199 writeb(pp->imr, port->membase + MCFUART_UIMR);
200
201 /* Disable UART transmitter and receiver */
202 writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
203 writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
204
205 spin_unlock_irqrestore(&port->lock, flags);
206}
207
208/****************************************************************************/
209
210static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
211 struct ktermios *old)
212{
213 unsigned long flags;
214 unsigned int baud, baudclk;
215 unsigned char mr1, mr2;
216
217 baud = uart_get_baud_rate(port, termios, old, 0, 230400);
218 baudclk = ((MCF_BUSCLK / baud) + 16) / 32;
219
220 mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR;
221 mr2 = 0;
222
223 switch (termios->c_cflag & CSIZE) {
224 case CS5: mr1 |= MCFUART_MR1_CS5; break;
225 case CS6: mr1 |= MCFUART_MR1_CS6; break;
226 case CS7: mr1 |= MCFUART_MR1_CS7; break;
227 case CS8:
228 default: mr1 |= MCFUART_MR1_CS8; break;
229 }
230
231 if (termios->c_cflag & PARENB) {
232 if (termios->c_cflag & CMSPAR) {
233 if (termios->c_cflag & PARODD)
234 mr1 |= MCFUART_MR1_PARITYMARK;
235 else
236 mr1 |= MCFUART_MR1_PARITYSPACE;
237 } else {
238 if (termios->c_cflag & PARODD)
239 mr1 |= MCFUART_MR1_PARITYODD;
240 else
241 mr1 |= MCFUART_MR1_PARITYEVEN;
242 }
243 } else {
244 mr1 |= MCFUART_MR1_PARITYNONE;
245 }
246
247 if (termios->c_cflag & CSTOPB)
248 mr2 |= MCFUART_MR2_STOP2;
249 else
250 mr2 |= MCFUART_MR2_STOP1;
251
252 if (termios->c_cflag & CRTSCTS) {
253 mr1 |= MCFUART_MR1_RXRTS;
254 mr2 |= MCFUART_MR2_TXCTS;
255 }
256
257 spin_lock_irqsave(&port->lock, flags);
258 writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
259 writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
260 writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
261 writeb(mr1, port->membase + MCFUART_UMR);
262 writeb(mr2, port->membase + MCFUART_UMR);
263 writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1);
264 writeb((baudclk & 0xff), port->membase + MCFUART_UBG2);
265 writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER,
266 port->membase + MCFUART_UCSR);
267 writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
268 port->membase + MCFUART_UCR);
269 spin_unlock_irqrestore(&port->lock, flags);
270}
271
272/****************************************************************************/
273
274static void mcf_rx_chars(struct mcf_uart *pp)
275{
276 struct uart_port *port = (struct uart_port *) pp;
277 unsigned char status, ch, flag;
278
279 while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) {
280 ch = readb(port->membase + MCFUART_URB);
281 flag = TTY_NORMAL;
282 port->icount.rx++;
283
284 if (status & MCFUART_USR_RXERR) {
285 writeb(MCFUART_UCR_CMDRESETERR,
286 port->membase + MCFUART_UCR);
287
288 if (status & MCFUART_USR_RXBREAK) {
289 port->icount.brk++;
290 if (uart_handle_break(port))
291 continue;
292 } else if (status & MCFUART_USR_RXPARITY) {
293 port->icount.parity++;
294 } else if (status & MCFUART_USR_RXOVERRUN) {
295 port->icount.overrun++;
296 } else if (status & MCFUART_USR_RXFRAMING) {
297 port->icount.frame++;
298 }
299
300 status &= port->read_status_mask;
301
302 if (status & MCFUART_USR_RXBREAK)
303 flag = TTY_BREAK;
304 else if (status & MCFUART_USR_RXPARITY)
305 flag = TTY_PARITY;
306 else if (status & MCFUART_USR_RXFRAMING)
307 flag = TTY_FRAME;
308 }
309
310 if (uart_handle_sysrq_char(port, ch))
311 continue;
312 uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
313 }
314
315 tty_flip_buffer_push(port->info->tty);
316}
317
318/****************************************************************************/
319
320static void mcf_tx_chars(struct mcf_uart *pp)
321{
322 struct uart_port *port = (struct uart_port *) pp;
323 struct circ_buf *xmit = &port->info->xmit;
324
325 if (port->x_char) {
326 /* Send special char - probably flow control */
327 writeb(port->x_char, port->membase + MCFUART_UTB);
328 port->x_char = 0;
329 port->icount.tx++;
330 return;
331 }
332
333 while (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) {
334 if (xmit->head == xmit->tail)
335 break;
336 writeb(xmit->buf[xmit->tail], port->membase + MCFUART_UTB);
337 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
338 port->icount.tx++;
339 }
340
341 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
342 uart_write_wakeup(port);
343
344 if (xmit->head == xmit->tail) {
345 pp->imr &= ~MCFUART_UIR_TXREADY;
346 writeb(pp->imr, port->membase + MCFUART_UIMR);
347 }
348}
349
350/****************************************************************************/
351
352static irqreturn_t mcf_interrupt(int irq, void *data)
353{
354 struct uart_port *port = data;
355 struct mcf_uart *pp = (struct mcf_uart *) port;
356 unsigned int isr;
357
358 isr = readb(port->membase + MCFUART_UISR) & pp->imr;
359 if (isr & MCFUART_UIR_RXREADY)
360 mcf_rx_chars(pp);
361 if (isr & MCFUART_UIR_TXREADY)
362 mcf_tx_chars(pp);
363 return IRQ_HANDLED;
364}
365
366/****************************************************************************/
367
368static void mcf_config_port(struct uart_port *port, int flags)
369{
370 port->type = PORT_MCF;
371
372 /* Clear mask, so no surprise interrupts. */
373 writeb(0, port->membase + MCFUART_UIMR);
374
375 if (request_irq(port->irq, mcf_interrupt, IRQF_DISABLED, "UART", port))
376 printk(KERN_ERR "MCF: unable to attach ColdFire UART %d "
377 "interrupt vector=%d\n", port->line, port->irq);
378}
379
380/****************************************************************************/
381
382static const char *mcf_type(struct uart_port *port)
383{
384 return (port->type == PORT_MCF) ? "ColdFire UART" : NULL;
385}
386
387/****************************************************************************/
388
389static int mcf_request_port(struct uart_port *port)
390{
391 /* UARTs always present */
392 return 0;
393}
394
395/****************************************************************************/
396
397static void mcf_release_port(struct uart_port *port)
398{
399 /* Nothing to release... */
400}
401
402/****************************************************************************/
403
404static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
405{
406 if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_MCF))
407 return -EINVAL;
408 return 0;
409}
410
411/****************************************************************************/
412
413/*
414 * Define the basic serial functions we support.
415 */
416static struct uart_ops mcf_uart_ops = {
417 .tx_empty = mcf_tx_empty,
418 .get_mctrl = mcf_get_mctrl,
419 .set_mctrl = mcf_set_mctrl,
420 .start_tx = mcf_start_tx,
421 .stop_tx = mcf_stop_tx,
422 .stop_rx = mcf_stop_rx,
423 .enable_ms = mcf_enable_ms,
424 .break_ctl = mcf_break_ctl,
425 .startup = mcf_startup,
426 .shutdown = mcf_shutdown,
427 .set_termios = mcf_set_termios,
428 .type = mcf_type,
429 .request_port = mcf_request_port,
430 .release_port = mcf_release_port,
431 .config_port = mcf_config_port,
432 .verify_port = mcf_verify_port,
433};
434
435static struct mcf_uart mcf_ports[3];
436
437#define MCF_MAXPORTS (sizeof(mcf_ports) / sizeof(struct mcf_uart))
438
439/****************************************************************************/
440#if defined(CONFIG_SERIAL_MCF_CONSOLE)
441/****************************************************************************/
442
443int __init early_mcf_setup(struct mcf_platform_uart *platp)
444{
445 struct uart_port *port;
446 int i;
447
448 for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
449 port = &mcf_ports[i].port;
450
451 port->line = i;
452 port->type = PORT_MCF;
453 port->mapbase = platp[i].mapbase;
454 port->membase = (platp[i].membase) ? platp[i].membase :
455 (unsigned char __iomem *) port->mapbase;
456 port->iotype = SERIAL_IO_MEM;
457 port->irq = platp[i].irq;
458 port->uartclk = MCF_BUSCLK;
459 port->flags = ASYNC_BOOT_AUTOCONF;
460 port->ops = &mcf_uart_ops;
461 }
462
463 return 0;
464}
465
466/****************************************************************************/
467
468static void mcf_console_putc(struct console *co, const char c)
469{
470 struct uart_port *port = &(mcf_ports + co->index)->port;
471 int i;
472
473 for (i = 0; (i < 0x10000); i++) {
474 if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
475 break;
476 }
477 writeb(c, port->membase + MCFUART_UTB);
478 for (i = 0; (i < 0x10000); i++) {
479 if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
480 break;
481 }
482}
483
484/****************************************************************************/
485
486static void mcf_console_write(struct console *co, const char *s, unsigned int count)
487{
488 for (; (count); count--, s++) {
489 mcf_console_putc(co, *s);
490 if (*s == '\n')
491 mcf_console_putc(co, '\r');
492 }
493}
494
495/****************************************************************************/
496
497static int __init mcf_console_setup(struct console *co, char *options)
498{
499 struct uart_port *port;
500 int baud = CONFIG_SERIAL_MCF_BAUDRATE;
501 int bits = 8;
502 int parity = 'n';
503 int flow = 'n';
504
505 if ((co->index >= 0) && (co->index <= MCF_MAXPORTS))
506 co->index = 0;
507 port = &mcf_ports[co->index].port;
508 if (port->membase == 0)
509 return -ENODEV;
510
511 if (options)
512 uart_parse_options(options, &baud, &parity, &bits, &flow);
513
514 return uart_set_options(port, co, baud, parity, bits, flow);
515}
516
517/****************************************************************************/
518
519static struct uart_driver mcf_driver;
520
521static struct console mcf_console = {
522 .name = "ttyS",
523 .write = mcf_console_write,
524 .device = uart_console_device,
525 .setup = mcf_console_setup,
526 .flags = CON_PRINTBUFFER,
527 .index = -1,
528 .data = &mcf_driver,
529};
530
531static int __init mcf_console_init(void)
532{
533 register_console(&mcf_console);
534 return 0;
535}
536
537console_initcall(mcf_console_init);
538
539#define MCF_CONSOLE &mcf_console
540
541/****************************************************************************/
542#else
543/****************************************************************************/
544
545#define MCF_CONSOLE NULL
546
547/****************************************************************************/
548#endif /* CONFIG_MCF_CONSOLE */
549/****************************************************************************/
550
551/*
552 * Define the mcf UART driver structure.
553 */
554static struct uart_driver mcf_driver = {
555 .owner = THIS_MODULE,
556 .driver_name = "mcf",
557 .dev_name = "ttyS",
558 .major = TTY_MAJOR,
559 .minor = 64,
560 .nr = MCF_MAXPORTS,
561 .cons = MCF_CONSOLE,
562};
563
564/****************************************************************************/
565
566static int __devinit mcf_probe(struct platform_device *pdev)
567{
568 struct mcf_platform_uart *platp = pdev->dev.platform_data;
569 struct uart_port *port;
570 int i;
571
572 for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
573 port = &mcf_ports[i].port;
574
575 port->line = i;
576 port->type = PORT_MCF;
577 port->mapbase = platp[i].mapbase;
578 port->membase = (platp[i].membase) ? platp[i].membase :
579 (unsigned char __iomem *) platp[i].mapbase;
580 port->iotype = SERIAL_IO_MEM;
581 port->irq = platp[i].irq;
582 port->uartclk = MCF_BUSCLK;
583 port->ops = &mcf_uart_ops;
584 port->flags = ASYNC_BOOT_AUTOCONF;
585
586 uart_add_one_port(&mcf_driver, port);
587 }
588
589 return 0;
590}
591
592/****************************************************************************/
593
594static int mcf_remove(struct platform_device *pdev)
595{
596 struct uart_port *port;
597 int i;
598
599 for (i = 0; (i < MCF_MAXPORTS); i++) {
600 port = &mcf_ports[i].port;
601 if (port)
602 uart_remove_one_port(&mcf_driver, port);
603 }
604
605 return 0;
606}
607
608/****************************************************************************/
609
610static struct platform_driver mcf_platform_driver = {
611 .probe = mcf_probe,
612 .remove = __devexit_p(mcf_remove),
613 .driver = {
614 .name = "mcfuart",
615 .owner = THIS_MODULE,
616 },
617};
618
619/****************************************************************************/
620
621static int __init mcf_init(void)
622{
623 int rc;
624
625 printk("ColdFire internal UART serial driver\n");
626
627 rc = uart_register_driver(&mcf_driver);
628 if (rc)
629 return rc;
630 rc = platform_driver_register(&mcf_platform_driver);
631 if (rc)
632 return rc;
633 return 0;
634}
635
636/****************************************************************************/
637
638static void __exit mcf_exit(void)
639{
640 platform_driver_unregister(&mcf_platform_driver);
641 uart_unregister_driver(&mcf_driver);
642}
643
644/****************************************************************************/
645
646module_init(mcf_init);
647module_exit(mcf_exit);
648
649MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
650MODULE_DESCRIPTION("Freescale ColdFire UART driver");
651MODULE_LICENSE("GPL");
652
653/****************************************************************************/
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 8dd5a6afd513..90d64a808464 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -437,13 +437,11 @@ int usb_sg_init (
437#if defined(CONFIG_HIGHMEM) || defined(CONFIG_IOMMU) 437#if defined(CONFIG_HIGHMEM) || defined(CONFIG_IOMMU)
438 io->urbs[i]->transfer_buffer = NULL; 438 io->urbs[i]->transfer_buffer = NULL;
439#else 439#else
440 io->urbs[i]->transfer_buffer = 440 io->urbs[i]->transfer_buffer = sg_virt(&sg[i]);
441 page_address(sg[i].page) + sg[i].offset;
442#endif 441#endif
443 } else { 442 } else {
444 /* hc may use _only_ transfer_buffer */ 443 /* hc may use _only_ transfer_buffer */
445 io->urbs [i]->transfer_buffer = 444 io->urbs [i]->transfer_buffer = sg_virt(&sg[i]);
446 page_address (sg [i].page) + sg [i].offset;
447 len = sg [i].length; 445 len = sg [i].length;
448 } 446 }
449 447
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index e7d982a71548..91e999c9f680 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -519,8 +519,7 @@ static void mts_do_sg (struct urb* transfer)
519 context->fragment++; 519 context->fragment++;
520 mts_int_submit_urb(transfer, 520 mts_int_submit_urb(transfer,
521 context->data_pipe, 521 context->data_pipe,
522 page_address(sg[context->fragment].page) + 522 sg_virt(&sg[context->fragment]),
523 sg[context->fragment].offset,
524 sg[context->fragment].length, 523 sg[context->fragment].length,
525 context->fragment + 1 == scsi_sg_count(context->srb) ? 524 context->fragment + 1 == scsi_sg_count(context->srb) ?
526 mts_data_done : mts_do_sg); 525 mts_data_done : mts_do_sg);
@@ -557,7 +556,7 @@ mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
557 return; 556 return;
558 } else { 557 } else {
559 sg = scsi_sglist(srb); 558 sg = scsi_sglist(srb);
560 desc->context.data = page_address(sg[0].page) + sg[0].offset; 559 desc->context.data = sg_virt(&sg[0]);
561 desc->context.data_length = sg[0].length; 560 desc->context.data_length = sg[0].length;
562 } 561 }
563 562
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index e901d31e051b..ea3162146481 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -360,9 +360,9 @@ static void free_sglist (struct scatterlist *sg, int nents)
360 if (!sg) 360 if (!sg)
361 return; 361 return;
362 for (i = 0; i < nents; i++) { 362 for (i = 0; i < nents; i++) {
363 if (!sg [i].page) 363 if (!sg_page(&sg[i]))
364 continue; 364 continue;
365 kfree (page_address (sg [i].page) + sg [i].offset); 365 kfree (sg_virt(&sg[i]));
366 } 366 }
367 kfree (sg); 367 kfree (sg);
368} 368}
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index cc8f7c52c729..889622baac20 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -195,7 +195,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
195 * the *offset and *index values for the next loop. */ 195 * the *offset and *index values for the next loop. */
196 cnt = 0; 196 cnt = 0;
197 while (cnt < buflen) { 197 while (cnt < buflen) {
198 struct page *page = sg->page + 198 struct page *page = sg_page(sg) +
199 ((sg->offset + *offset) >> PAGE_SHIFT); 199 ((sg->offset + *offset) >> PAGE_SHIFT);
200 unsigned int poff = 200 unsigned int poff =
201 (sg->offset + *offset) & (PAGE_SIZE-1); 201 (sg->offset + *offset) & (PAGE_SIZE-1);
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c
index 9cfb97576623..11f6a111e75b 100644
--- a/drivers/watchdog/mpc5200_wdt.c
+++ b/drivers/watchdog/mpc5200_wdt.c
@@ -176,6 +176,8 @@ static int mpc5200_wdt_probe(struct of_device *op, const struct of_device_id *ma
176 176
177 has_wdt = of_get_property(op->node, "has-wdt", NULL); 177 has_wdt = of_get_property(op->node, "has-wdt", NULL);
178 if (!has_wdt) 178 if (!has_wdt)
179 has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL);
180 if (!has_wdt)
179 return -ENODEV; 181 return -ENODEV;
180 182
181 wdt = kzalloc(sizeof(*wdt), GFP_KERNEL); 183 wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
@@ -254,6 +256,7 @@ static int mpc5200_wdt_shutdown(struct of_device *op)
254 256
255static struct of_device_id mpc5200_wdt_match[] = { 257static struct of_device_id mpc5200_wdt_match[] = {
256 { .compatible = "mpc5200-gpt", }, 258 { .compatible = "mpc5200-gpt", },
259 { .compatible = "fsl,mpc5200-gpt", },
257 {}, 260 {},
258}; 261};
259static struct of_platform_driver mpc5200_wdt_driver = { 262static struct of_platform_driver mpc5200_wdt_driver = {
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 0a3ee5a322b0..5574ba3ab1f9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -103,7 +103,7 @@ extern int cifs_ioctl(struct inode *inode, struct file *filep,
103 unsigned int command, unsigned long arg); 103 unsigned int command, unsigned long arg);
104 104
105#ifdef CONFIG_CIFS_EXPERIMENTAL 105#ifdef CONFIG_CIFS_EXPERIMENTAL
106extern struct export_operations cifs_export_ops; 106extern const struct export_operations cifs_export_ops;
107#endif /* EXPERIMENTAL */ 107#endif /* EXPERIMENTAL */
108 108
109#define CIFS_VERSION "1.51" 109#define CIFS_VERSION "1.51"
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index d614b91caeca..75949d6a5f1b 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -53,7 +53,7 @@ static struct dentry *cifs_get_parent(struct dentry *dentry)
53 return ERR_PTR(-EACCES); 53 return ERR_PTR(-EACCES);
54} 54}
55 55
56struct export_operations cifs_export_ops = { 56const struct export_operations cifs_export_ops = {
57 .get_parent = cifs_get_parent, 57 .get_parent = cifs_get_parent,
58/* Following five export operations are unneeded so far and can default: 58/* Following five export operations are unneeded so far and can default:
59 .get_dentry = 59 .get_dentry =
diff --git a/fs/dcache.c b/fs/dcache.c
index 2bb3f7ac683b..d9ca1e5ceb92 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1479,6 +1479,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
1479 * dentry:internal, target:external. Steal target's 1479 * dentry:internal, target:external. Steal target's
1480 * storage and make target internal. 1480 * storage and make target internal.
1481 */ 1481 */
1482 memcpy(target->d_iname, dentry->d_name.name,
1483 dentry->d_name.len + 1);
1482 dentry->d_name.name = target->d_name.name; 1484 dentry->d_name.name = target->d_name.name;
1483 target->d_name.name = target->d_iname; 1485 target->d_name.name = target->d_iname;
1484 } 1486 }
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 1ae90ef2c74d..0a9882edf562 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -283,7 +283,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
283 pg = virt_to_page(addr); 283 pg = virt_to_page(addr);
284 offset = offset_in_page(addr); 284 offset = offset_in_page(addr);
285 if (sg) { 285 if (sg) {
286 sg[i].page = pg; 286 sg_set_page(&sg[i], pg);
287 sg[i].offset = offset; 287 sg[i].offset = offset;
288 } 288 }
289 remainder_of_page = PAGE_CACHE_SIZE - offset; 289 remainder_of_page = PAGE_CACHE_SIZE - offset;
@@ -713,10 +713,13 @@ ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
713{ 713{
714 struct scatterlist src_sg, dst_sg; 714 struct scatterlist src_sg, dst_sg;
715 715
716 src_sg.page = src_page; 716 sg_init_table(&src_sg, 1);
717 sg_init_table(&dst_sg, 1);
718
719 sg_set_page(&src_sg, src_page);
717 src_sg.offset = src_offset; 720 src_sg.offset = src_offset;
718 src_sg.length = size; 721 src_sg.length = size;
719 dst_sg.page = dst_page; 722 sg_set_page(&dst_sg, dst_page);
720 dst_sg.offset = dst_offset; 723 dst_sg.offset = dst_offset;
721 dst_sg.length = size; 724 dst_sg.length = size;
722 return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); 725 return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv);
@@ -742,10 +745,13 @@ ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
742{ 745{
743 struct scatterlist src_sg, dst_sg; 746 struct scatterlist src_sg, dst_sg;
744 747
745 src_sg.page = src_page; 748 sg_init_table(&src_sg, 1);
749 sg_init_table(&dst_sg, 1);
750
751 sg_set_page(&src_sg, src_page);
746 src_sg.offset = src_offset; 752 src_sg.offset = src_offset;
747 src_sg.length = size; 753 src_sg.length = size;
748 dst_sg.page = dst_page; 754 sg_set_page(&dst_sg, dst_page);
749 dst_sg.offset = dst_offset; 755 dst_sg.offset = dst_offset;
750 dst_sg.length = size; 756 dst_sg.length = size;
751 return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); 757 return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 89d9710dd63d..263fed88c0ca 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1040,6 +1040,9 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1040 }; 1040 };
1041 int rc = 0; 1041 int rc = 0;
1042 1042
1043 sg_init_table(&dst_sg, 1);
1044 sg_init_table(&src_sg, 1);
1045
1043 if (unlikely(ecryptfs_verbosity > 0)) { 1046 if (unlikely(ecryptfs_verbosity > 0)) {
1044 ecryptfs_printk( 1047 ecryptfs_printk(
1045 KERN_DEBUG, "Session key encryption key (size [%d]):\n", 1048 KERN_DEBUG, "Session key encryption key (size [%d]):\n",
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index 5276b19423c1..f7f407075be1 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -10,6 +10,8 @@
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/efs_fs.h> 11#include <linux/efs_fs.h>
12#include <linux/smp_lock.h> 12#include <linux/smp_lock.h>
13#include <linux/exportfs.h>
14
13 15
14static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len) { 16static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len) {
15 struct buffer_head *bh; 17 struct buffer_head *bh;
@@ -75,13 +77,10 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei
75 return NULL; 77 return NULL;
76} 78}
77 79
78struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp) 80static struct inode *efs_nfs_get_inode(struct super_block *sb, u64 ino,
81 u32 generation)
79{ 82{
80 __u32 *objp = vobjp;
81 unsigned long ino = objp[0];
82 __u32 generation = objp[1];
83 struct inode *inode; 83 struct inode *inode;
84 struct dentry *result;
85 84
86 if (ino == 0) 85 if (ino == 0)
87 return ERR_PTR(-ESTALE); 86 return ERR_PTR(-ESTALE);
@@ -91,20 +90,25 @@ struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp)
91 90
92 if (is_bad_inode(inode) || 91 if (is_bad_inode(inode) ||
93 (generation && inode->i_generation != generation)) { 92 (generation && inode->i_generation != generation)) {
94 result = ERR_PTR(-ESTALE); 93 iput(inode);
95 goto out_iput; 94 return ERR_PTR(-ESTALE);
96 } 95 }
97 96
98 result = d_alloc_anon(inode); 97 return inode;
99 if (!result) { 98}
100 result = ERR_PTR(-ENOMEM);
101 goto out_iput;
102 }
103 return result;
104 99
105 out_iput: 100struct dentry *efs_fh_to_dentry(struct super_block *sb, struct fid *fid,
106 iput(inode); 101 int fh_len, int fh_type)
107 return result; 102{
103 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
104 efs_nfs_get_inode);
105}
106
107struct dentry *efs_fh_to_parent(struct super_block *sb, struct fid *fid,
108 int fh_len, int fh_type)
109{
110 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
111 efs_nfs_get_inode);
108} 112}
109 113
110struct dentry *efs_get_parent(struct dentry *child) 114struct dentry *efs_get_parent(struct dentry *child)
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 25d0326c5f1c..c79bc627f107 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -113,8 +113,9 @@ static const struct super_operations efs_superblock_operations = {
113 .remount_fs = efs_remount, 113 .remount_fs = efs_remount,
114}; 114};
115 115
116static struct export_operations efs_export_ops = { 116static const struct export_operations efs_export_ops = {
117 .get_dentry = efs_get_dentry, 117 .fh_to_dentry = efs_fh_to_dentry,
118 .fh_to_parent = efs_fh_to_parent,
118 .get_parent = efs_get_parent, 119 .get_parent = efs_get_parent,
119}; 120};
120 121
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 8adb32a9387a..109ab5e44eca 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -1,4 +1,13 @@
1 1/*
2 * Copyright (C) Neil Brown 2002
3 * Copyright (C) Christoph Hellwig 2007
4 *
5 * This file contains the code mapping from inodes to NFS file handles,
6 * and for mapping back from file handles to dentries.
7 *
8 * For details on why we do all the strange and hairy things in here
9 * take a look at Documentation/filesystems/Exporting.
10 */
2#include <linux/exportfs.h> 11#include <linux/exportfs.h>
3#include <linux/fs.h> 12#include <linux/fs.h>
4#include <linux/file.h> 13#include <linux/file.h>
@@ -9,32 +18,19 @@
9#define dprintk(fmt, args...) do{}while(0) 18#define dprintk(fmt, args...) do{}while(0)
10 19
11 20
12static int get_name(struct dentry *dentry, char *name, 21static int get_name(struct vfsmount *mnt, struct dentry *dentry, char *name,
13 struct dentry *child); 22 struct dentry *child);
14 23
15 24
16static struct dentry *exportfs_get_dentry(struct super_block *sb, void *obj) 25static int exportfs_get_name(struct vfsmount *mnt, struct dentry *dir,
26 char *name, struct dentry *child)
17{ 27{
18 struct dentry *result = ERR_PTR(-ESTALE); 28 const struct export_operations *nop = dir->d_sb->s_export_op;
19
20 if (sb->s_export_op->get_dentry) {
21 result = sb->s_export_op->get_dentry(sb, obj);
22 if (!result)
23 result = ERR_PTR(-ESTALE);
24 }
25
26 return result;
27}
28
29static int exportfs_get_name(struct dentry *dir, char *name,
30 struct dentry *child)
31{
32 struct export_operations *nop = dir->d_sb->s_export_op;
33 29
34 if (nop->get_name) 30 if (nop->get_name)
35 return nop->get_name(dir, name, child); 31 return nop->get_name(dir, name, child);
36 else 32 else
37 return get_name(dir, name, child); 33 return get_name(mnt, dir, name, child);
38} 34}
39 35
40/* 36/*
@@ -98,7 +94,7 @@ find_disconnected_root(struct dentry *dentry)
98 * It may already be, as the flag isn't always updated when connection happens. 94 * It may already be, as the flag isn't always updated when connection happens.
99 */ 95 */
100static int 96static int
101reconnect_path(struct super_block *sb, struct dentry *target_dir) 97reconnect_path(struct vfsmount *mnt, struct dentry *target_dir)
102{ 98{
103 char nbuf[NAME_MAX+1]; 99 char nbuf[NAME_MAX+1];
104 int noprogress = 0; 100 int noprogress = 0;
@@ -121,7 +117,7 @@ reconnect_path(struct super_block *sb, struct dentry *target_dir)
121 pd->d_flags &= ~DCACHE_DISCONNECTED; 117 pd->d_flags &= ~DCACHE_DISCONNECTED;
122 spin_unlock(&pd->d_lock); 118 spin_unlock(&pd->d_lock);
123 noprogress = 0; 119 noprogress = 0;
124 } else if (pd == sb->s_root) { 120 } else if (pd == mnt->mnt_sb->s_root) {
125 printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n"); 121 printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
126 spin_lock(&pd->d_lock); 122 spin_lock(&pd->d_lock);
127 pd->d_flags &= ~DCACHE_DISCONNECTED; 123 pd->d_flags &= ~DCACHE_DISCONNECTED;
@@ -147,8 +143,8 @@ reconnect_path(struct super_block *sb, struct dentry *target_dir)
147 struct dentry *npd; 143 struct dentry *npd;
148 144
149 mutex_lock(&pd->d_inode->i_mutex); 145 mutex_lock(&pd->d_inode->i_mutex);
150 if (sb->s_export_op->get_parent) 146 if (mnt->mnt_sb->s_export_op->get_parent)
151 ppd = sb->s_export_op->get_parent(pd); 147 ppd = mnt->mnt_sb->s_export_op->get_parent(pd);
152 mutex_unlock(&pd->d_inode->i_mutex); 148 mutex_unlock(&pd->d_inode->i_mutex);
153 149
154 if (IS_ERR(ppd)) { 150 if (IS_ERR(ppd)) {
@@ -161,7 +157,7 @@ reconnect_path(struct super_block *sb, struct dentry *target_dir)
161 157
162 dprintk("%s: find name of %lu in %lu\n", __FUNCTION__, 158 dprintk("%s: find name of %lu in %lu\n", __FUNCTION__,
163 pd->d_inode->i_ino, ppd->d_inode->i_ino); 159 pd->d_inode->i_ino, ppd->d_inode->i_ino);
164 err = exportfs_get_name(ppd, nbuf, pd); 160 err = exportfs_get_name(mnt, ppd, nbuf, pd);
165 if (err) { 161 if (err) {
166 dput(ppd); 162 dput(ppd);
167 dput(pd); 163 dput(pd);
@@ -214,125 +210,6 @@ reconnect_path(struct super_block *sb, struct dentry *target_dir)
214 return 0; 210 return 0;
215} 211}
216 212
217/**
218 * find_exported_dentry - helper routine to implement export_operations->decode_fh
219 * @sb: The &super_block identifying the filesystem
220 * @obj: An opaque identifier of the object to be found - passed to
221 * get_inode
222 * @parent: An optional opqaue identifier of the parent of the object.
223 * @acceptable: A function used to test possible &dentries to see if they are
224 * acceptable
225 * @context: A parameter to @acceptable so that it knows on what basis to
226 * judge.
227 *
228 * find_exported_dentry is the central helper routine to enable file systems
229 * to provide the decode_fh() export_operation. It's main task is to take
230 * an &inode, find or create an appropriate &dentry structure, and possibly
231 * splice this into the dcache in the correct place.
232 *
233 * The decode_fh() operation provided by the filesystem should call
234 * find_exported_dentry() with the same parameters that it received except
235 * that instead of the file handle fragment, pointers to opaque identifiers
236 * for the object and optionally its parent are passed. The default decode_fh
237 * routine passes one pointer to the start of the filehandle fragment, and
238 * one 8 bytes into the fragment. It is expected that most filesystems will
239 * take this approach, though the offset to the parent identifier may well be
240 * different.
241 *
242 * find_exported_dentry() will call get_dentry to get an dentry pointer from
243 * the file system. If any &dentry in the d_alias list is acceptable, it will
244 * be returned. Otherwise find_exported_dentry() will attempt to splice a new
245 * &dentry into the dcache using get_name() and get_parent() to find the
246 * appropriate place.
247 */
248
249struct dentry *
250find_exported_dentry(struct super_block *sb, void *obj, void *parent,
251 int (*acceptable)(void *context, struct dentry *de),
252 void *context)
253{
254 struct dentry *result, *alias;
255 int err = -ESTALE;
256
257 /*
258 * Attempt to find the inode.
259 */
260 result = exportfs_get_dentry(sb, obj);
261 if (IS_ERR(result))
262 return result;
263
264 if (S_ISDIR(result->d_inode->i_mode)) {
265 if (!(result->d_flags & DCACHE_DISCONNECTED)) {
266 if (acceptable(context, result))
267 return result;
268 err = -EACCES;
269 goto err_result;
270 }
271
272 err = reconnect_path(sb, result);
273 if (err)
274 goto err_result;
275 } else {
276 struct dentry *target_dir, *nresult;
277 char nbuf[NAME_MAX+1];
278
279 alias = find_acceptable_alias(result, acceptable, context);
280 if (alias)
281 return alias;
282
283 if (parent == NULL)
284 goto err_result;
285
286 target_dir = exportfs_get_dentry(sb,parent);
287 if (IS_ERR(target_dir)) {
288 err = PTR_ERR(target_dir);
289 goto err_result;
290 }
291
292 err = reconnect_path(sb, target_dir);
293 if (err) {
294 dput(target_dir);
295 goto err_result;
296 }
297
298 /*
299 * As we weren't after a directory, have one more step to go.
300 */
301 err = exportfs_get_name(target_dir, nbuf, result);
302 if (!err) {
303 mutex_lock(&target_dir->d_inode->i_mutex);
304 nresult = lookup_one_len(nbuf, target_dir,
305 strlen(nbuf));
306 mutex_unlock(&target_dir->d_inode->i_mutex);
307 if (!IS_ERR(nresult)) {
308 if (nresult->d_inode) {
309 dput(result);
310 result = nresult;
311 } else
312 dput(nresult);
313 }
314 }
315 dput(target_dir);
316 }
317
318 alias = find_acceptable_alias(result, acceptable, context);
319 if (alias)
320 return alias;
321
322 /* drat - I just cannot find anything acceptable */
323 dput(result);
324 /* It might be justifiable to return ESTALE here,
325 * but the filehandle at-least looks reasonable good
326 * and it may just be a permission problem, so returning
327 * -EACCESS is safer
328 */
329 return ERR_PTR(-EACCES);
330
331 err_result:
332 dput(result);
333 return ERR_PTR(err);
334}
335
336struct getdents_callback { 213struct getdents_callback {
337 char *name; /* name that was found. It already points to a 214 char *name; /* name that was found. It already points to a
338 buffer NAME_MAX+1 is size */ 215 buffer NAME_MAX+1 is size */
@@ -370,8 +247,8 @@ static int filldir_one(void * __buf, const char * name, int len,
370 * calls readdir on the parent until it finds an entry with 247 * calls readdir on the parent until it finds an entry with
371 * the same inode number as the child, and returns that. 248 * the same inode number as the child, and returns that.
372 */ 249 */
373static int get_name(struct dentry *dentry, char *name, 250static int get_name(struct vfsmount *mnt, struct dentry *dentry,
374 struct dentry *child) 251 char *name, struct dentry *child)
375{ 252{
376 struct inode *dir = dentry->d_inode; 253 struct inode *dir = dentry->d_inode;
377 int error; 254 int error;
@@ -387,7 +264,7 @@ static int get_name(struct dentry *dentry, char *name,
387 /* 264 /*
388 * Open the directory ... 265 * Open the directory ...
389 */ 266 */
390 file = dentry_open(dget(dentry), NULL, O_RDONLY); 267 file = dentry_open(dget(dentry), mntget(mnt), O_RDONLY);
391 error = PTR_ERR(file); 268 error = PTR_ERR(file);
392 if (IS_ERR(file)) 269 if (IS_ERR(file))
393 goto out; 270 goto out;
@@ -434,100 +311,177 @@ out:
434 * can be used to check that it is still valid. It places them in the 311 * can be used to check that it is still valid. It places them in the
435 * filehandle fragment where export_decode_fh expects to find them. 312 * filehandle fragment where export_decode_fh expects to find them.
436 */ 313 */
437static int export_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, 314static int export_encode_fh(struct dentry *dentry, struct fid *fid,
438 int connectable) 315 int *max_len, int connectable)
439{ 316{
440 struct inode * inode = dentry->d_inode; 317 struct inode * inode = dentry->d_inode;
441 int len = *max_len; 318 int len = *max_len;
442 int type = 1; 319 int type = FILEID_INO32_GEN;
443 320
444 if (len < 2 || (connectable && len < 4)) 321 if (len < 2 || (connectable && len < 4))
445 return 255; 322 return 255;
446 323
447 len = 2; 324 len = 2;
448 fh[0] = inode->i_ino; 325 fid->i32.ino = inode->i_ino;
449 fh[1] = inode->i_generation; 326 fid->i32.gen = inode->i_generation;
450 if (connectable && !S_ISDIR(inode->i_mode)) { 327 if (connectable && !S_ISDIR(inode->i_mode)) {
451 struct inode *parent; 328 struct inode *parent;
452 329
453 spin_lock(&dentry->d_lock); 330 spin_lock(&dentry->d_lock);
454 parent = dentry->d_parent->d_inode; 331 parent = dentry->d_parent->d_inode;
455 fh[2] = parent->i_ino; 332 fid->i32.parent_ino = parent->i_ino;
456 fh[3] = parent->i_generation; 333 fid->i32.parent_gen = parent->i_generation;
457 spin_unlock(&dentry->d_lock); 334 spin_unlock(&dentry->d_lock);
458 len = 4; 335 len = 4;
459 type = 2; 336 type = FILEID_INO32_GEN_PARENT;
460 } 337 }
461 *max_len = len; 338 *max_len = len;
462 return type; 339 return type;
463} 340}
464 341
465 342int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
466/**
467 * export_decode_fh - default export_operations->decode_fh function
468 * @sb: The superblock
469 * @fh: pointer to the file handle fragment
470 * @fh_len: length of file handle fragment
471 * @acceptable: function for testing acceptability of dentrys
472 * @context: context for @acceptable
473 *
474 * This is the default decode_fh() function.
475 * a fileid_type of 1 indicates that the filehandlefragment
476 * just contains an object identifier understood by get_dentry.
477 * a fileid_type of 2 says that there is also a directory
478 * identifier 8 bytes in to the filehandlefragement.
479 */
480static struct dentry *export_decode_fh(struct super_block *sb, __u32 *fh, int fh_len,
481 int fileid_type,
482 int (*acceptable)(void *context, struct dentry *de),
483 void *context)
484{
485 __u32 parent[2];
486 parent[0] = parent[1] = 0;
487 if (fh_len < 2 || fileid_type > 2)
488 return NULL;
489 if (fileid_type == 2) {
490 if (fh_len > 2) parent[0] = fh[2];
491 if (fh_len > 3) parent[1] = fh[3];
492 }
493 return find_exported_dentry(sb, fh, parent,
494 acceptable, context);
495}
496
497int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
498 int connectable) 343 int connectable)
499{ 344{
500 struct export_operations *nop = dentry->d_sb->s_export_op; 345 const struct export_operations *nop = dentry->d_sb->s_export_op;
501 int error; 346 int error;
502 347
503 if (nop->encode_fh) 348 if (nop->encode_fh)
504 error = nop->encode_fh(dentry, fh, max_len, connectable); 349 error = nop->encode_fh(dentry, fid->raw, max_len, connectable);
505 else 350 else
506 error = export_encode_fh(dentry, fh, max_len, connectable); 351 error = export_encode_fh(dentry, fid, max_len, connectable);
507 352
508 return error; 353 return error;
509} 354}
510EXPORT_SYMBOL_GPL(exportfs_encode_fh); 355EXPORT_SYMBOL_GPL(exportfs_encode_fh);
511 356
512struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh, int fh_len, 357struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
513 int fileid_type, int (*acceptable)(void *, struct dentry *), 358 int fh_len, int fileid_type,
514 void *context) 359 int (*acceptable)(void *, struct dentry *), void *context)
515{ 360{
516 struct export_operations *nop = mnt->mnt_sb->s_export_op; 361 const struct export_operations *nop = mnt->mnt_sb->s_export_op;
517 struct dentry *result; 362 struct dentry *result, *alias;
363 int err;
518 364
519 if (nop->decode_fh) { 365 /*
520 result = nop->decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type, 366 * Try to get any dentry for the given file handle from the filesystem.
521 acceptable, context); 367 */
368 result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
369 if (!result)
370 result = ERR_PTR(-ESTALE);
371 if (IS_ERR(result))
372 return result;
373
374 if (S_ISDIR(result->d_inode->i_mode)) {
375 /*
376 * This request is for a directory.
377 *
378 * On the positive side there is only one dentry for each
379 * directory inode. On the negative side this implies that we
380 * to ensure our dentry is connected all the way up to the
381 * filesystem root.
382 */
383 if (result->d_flags & DCACHE_DISCONNECTED) {
384 err = reconnect_path(mnt, result);
385 if (err)
386 goto err_result;
387 }
388
389 if (!acceptable(context, result)) {
390 err = -EACCES;
391 goto err_result;
392 }
393
394 return result;
522 } else { 395 } else {
523 result = export_decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type, 396 /*
524 acceptable, context); 397 * It's not a directory. Life is a little more complicated.
398 */
399 struct dentry *target_dir, *nresult;
400 char nbuf[NAME_MAX+1];
401
402 /*
403 * See if either the dentry we just got from the filesystem
404 * or any alias for it is acceptable. This is always true
405 * if this filesystem is exported without the subtreecheck
406 * option. If the filesystem is exported with the subtree
407 * check option there's a fair chance we need to look at
408 * the parent directory in the file handle and make sure
409 * it's connected to the filesystem root.
410 */
411 alias = find_acceptable_alias(result, acceptable, context);
412 if (alias)
413 return alias;
414
415 /*
416 * Try to extract a dentry for the parent directory from the
417 * file handle. If this fails we'll have to give up.
418 */
419 err = -ESTALE;
420 if (!nop->fh_to_parent)
421 goto err_result;
422
423 target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
424 fh_len, fileid_type);
425 if (!target_dir)
426 goto err_result;
427 err = PTR_ERR(target_dir);
428 if (IS_ERR(target_dir))
429 goto err_result;
430
431 /*
432 * And as usual we need to make sure the parent directory is
433 * connected to the filesystem root. The VFS really doesn't
434 * like disconnected directories..
435 */
436 err = reconnect_path(mnt, target_dir);
437 if (err) {
438 dput(target_dir);
439 goto err_result;
440 }
441
442 /*
443 * Now that we've got both a well-connected parent and a
444 * dentry for the inode we're after, make sure that our
445 * inode is actually connected to the parent.
446 */
447 err = exportfs_get_name(mnt, target_dir, nbuf, result);
448 if (!err) {
449 mutex_lock(&target_dir->d_inode->i_mutex);
450 nresult = lookup_one_len(nbuf, target_dir,
451 strlen(nbuf));
452 mutex_unlock(&target_dir->d_inode->i_mutex);
453 if (!IS_ERR(nresult)) {
454 if (nresult->d_inode) {
455 dput(result);
456 result = nresult;
457 } else
458 dput(nresult);
459 }
460 }
461
462 /*
463 * At this point we are done with the parent, but it's pinned
464 * by the child dentry anyway.
465 */
466 dput(target_dir);
467
468 /*
469 * And finally make sure the dentry is actually acceptable
470 * to NFSD.
471 */
472 alias = find_acceptable_alias(result, acceptable, context);
473 if (!alias) {
474 err = -EACCES;
475 goto err_result;
476 }
477
478 return alias;
525 } 479 }
526 480
527 return result; 481 err_result:
482 dput(result);
483 return ERR_PTR(err);
528} 484}
529EXPORT_SYMBOL_GPL(exportfs_decode_fh); 485EXPORT_SYMBOL_GPL(exportfs_decode_fh);
530 486
531EXPORT_SYMBOL(find_exported_dentry);
532
533MODULE_LICENSE("GPL"); 487MODULE_LICENSE("GPL");
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 05d9342bb64e..d868e26c15eb 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -28,6 +28,24 @@
28 28
29typedef struct ext2_dir_entry_2 ext2_dirent; 29typedef struct ext2_dir_entry_2 ext2_dirent;
30 30
31static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
32{
33 unsigned len = le16_to_cpu(dlen);
34
35 if (len == EXT2_MAX_REC_LEN)
36 return 1 << 16;
37 return len;
38}
39
40static inline __le16 ext2_rec_len_to_disk(unsigned len)
41{
42 if (len == (1 << 16))
43 return cpu_to_le16(EXT2_MAX_REC_LEN);
44 else if (len > (1 << 16))
45 BUG();
46 return cpu_to_le16(len);
47}
48
31/* 49/*
32 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be 50 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
33 * more robust, but we have what we have 51 * more robust, but we have what we have
@@ -106,7 +124,7 @@ static void ext2_check_page(struct page *page)
106 } 124 }
107 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { 125 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
108 p = (ext2_dirent *)(kaddr + offs); 126 p = (ext2_dirent *)(kaddr + offs);
109 rec_len = le16_to_cpu(p->rec_len); 127 rec_len = ext2_rec_len_from_disk(p->rec_len);
110 128
111 if (rec_len < EXT2_DIR_REC_LEN(1)) 129 if (rec_len < EXT2_DIR_REC_LEN(1))
112 goto Eshort; 130 goto Eshort;
@@ -204,7 +222,8 @@ static inline int ext2_match (int len, const char * const name,
204 */ 222 */
205static inline ext2_dirent *ext2_next_entry(ext2_dirent *p) 223static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
206{ 224{
207 return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len)); 225 return (ext2_dirent *)((char *)p +
226 ext2_rec_len_from_disk(p->rec_len));
208} 227}
209 228
210static inline unsigned 229static inline unsigned
@@ -316,7 +335,7 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
316 return 0; 335 return 0;
317 } 336 }
318 } 337 }
319 filp->f_pos += le16_to_cpu(de->rec_len); 338 filp->f_pos += ext2_rec_len_from_disk(de->rec_len);
320 } 339 }
321 ext2_put_page(page); 340 ext2_put_page(page);
322 } 341 }
@@ -425,7 +444,7 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
425{ 444{
426 loff_t pos = page_offset(page) + 445 loff_t pos = page_offset(page) +
427 (char *) de - (char *) page_address(page); 446 (char *) de - (char *) page_address(page);
428 unsigned len = le16_to_cpu(de->rec_len); 447 unsigned len = ext2_rec_len_from_disk(de->rec_len);
429 int err; 448 int err;
430 449
431 lock_page(page); 450 lock_page(page);
@@ -482,7 +501,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
482 /* We hit i_size */ 501 /* We hit i_size */
483 name_len = 0; 502 name_len = 0;
484 rec_len = chunk_size; 503 rec_len = chunk_size;
485 de->rec_len = cpu_to_le16(chunk_size); 504 de->rec_len = ext2_rec_len_to_disk(chunk_size);
486 de->inode = 0; 505 de->inode = 0;
487 goto got_it; 506 goto got_it;
488 } 507 }
@@ -496,7 +515,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
496 if (ext2_match (namelen, name, de)) 515 if (ext2_match (namelen, name, de))
497 goto out_unlock; 516 goto out_unlock;
498 name_len = EXT2_DIR_REC_LEN(de->name_len); 517 name_len = EXT2_DIR_REC_LEN(de->name_len);
499 rec_len = le16_to_cpu(de->rec_len); 518 rec_len = ext2_rec_len_from_disk(de->rec_len);
500 if (!de->inode && rec_len >= reclen) 519 if (!de->inode && rec_len >= reclen)
501 goto got_it; 520 goto got_it;
502 if (rec_len >= name_len + reclen) 521 if (rec_len >= name_len + reclen)
@@ -518,8 +537,8 @@ got_it:
518 goto out_unlock; 537 goto out_unlock;
519 if (de->inode) { 538 if (de->inode) {
520 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len); 539 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
521 de1->rec_len = cpu_to_le16(rec_len - name_len); 540 de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len);
522 de->rec_len = cpu_to_le16(name_len); 541 de->rec_len = ext2_rec_len_to_disk(name_len);
523 de = de1; 542 de = de1;
524 } 543 }
525 de->name_len = namelen; 544 de->name_len = namelen;
@@ -550,7 +569,8 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
550 struct inode *inode = mapping->host; 569 struct inode *inode = mapping->host;
551 char *kaddr = page_address(page); 570 char *kaddr = page_address(page);
552 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); 571 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
553 unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len); 572 unsigned to = ((char *)dir - kaddr) +
573 ext2_rec_len_from_disk(dir->rec_len);
554 loff_t pos; 574 loff_t pos;
555 ext2_dirent * pde = NULL; 575 ext2_dirent * pde = NULL;
556 ext2_dirent * de = (ext2_dirent *) (kaddr + from); 576 ext2_dirent * de = (ext2_dirent *) (kaddr + from);
@@ -574,7 +594,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
574 &page, NULL); 594 &page, NULL);
575 BUG_ON(err); 595 BUG_ON(err);
576 if (pde) 596 if (pde)
577 pde->rec_len = cpu_to_le16(to - from); 597 pde->rec_len = ext2_rec_len_to_disk(to - from);
578 dir->inode = 0; 598 dir->inode = 0;
579 err = ext2_commit_chunk(page, pos, to - from); 599 err = ext2_commit_chunk(page, pos, to - from);
580 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; 600 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
@@ -610,14 +630,14 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
610 memset(kaddr, 0, chunk_size); 630 memset(kaddr, 0, chunk_size);
611 de = (struct ext2_dir_entry_2 *)kaddr; 631 de = (struct ext2_dir_entry_2 *)kaddr;
612 de->name_len = 1; 632 de->name_len = 1;
613 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1)); 633 de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1));
614 memcpy (de->name, ".\0\0", 4); 634 memcpy (de->name, ".\0\0", 4);
615 de->inode = cpu_to_le32(inode->i_ino); 635 de->inode = cpu_to_le32(inode->i_ino);
616 ext2_set_de_type (de, inode); 636 ext2_set_de_type (de, inode);
617 637
618 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1)); 638 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
619 de->name_len = 2; 639 de->name_len = 2;
620 de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1)); 640 de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1));
621 de->inode = cpu_to_le32(parent->i_ino); 641 de->inode = cpu_to_le32(parent->i_ino);
622 memcpy (de->name, "..\0", 4); 642 memcpy (de->name, "..\0", 4);
623 ext2_set_de_type (de, inode); 643 ext2_set_de_type (de, inode);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 77bd5f9262f9..154e25f13d77 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -311,13 +311,10 @@ static const struct super_operations ext2_sops = {
311#endif 311#endif
312}; 312};
313 313
314static struct dentry *ext2_get_dentry(struct super_block *sb, void *vobjp) 314static struct inode *ext2_nfs_get_inode(struct super_block *sb,
315 u64 ino, u32 generation)
315{ 316{
316 __u32 *objp = vobjp;
317 unsigned long ino = objp[0];
318 __u32 generation = objp[1];
319 struct inode *inode; 317 struct inode *inode;
320 struct dentry *result;
321 318
322 if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO) 319 if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO)
323 return ERR_PTR(-ESTALE); 320 return ERR_PTR(-ESTALE);
@@ -338,15 +335,21 @@ static struct dentry *ext2_get_dentry(struct super_block *sb, void *vobjp)
338 iput(inode); 335 iput(inode);
339 return ERR_PTR(-ESTALE); 336 return ERR_PTR(-ESTALE);
340 } 337 }
341 /* now to find a dentry. 338 return inode;
342 * If possible, get a well-connected one 339}
343 */ 340
344 result = d_alloc_anon(inode); 341static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid,
345 if (!result) { 342 int fh_len, int fh_type)
346 iput(inode); 343{
347 return ERR_PTR(-ENOMEM); 344 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
348 } 345 ext2_nfs_get_inode);
349 return result; 346}
347
348static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
349 int fh_len, int fh_type)
350{
351 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
352 ext2_nfs_get_inode);
350} 353}
351 354
352/* Yes, most of these are left as NULL!! 355/* Yes, most of these are left as NULL!!
@@ -354,9 +357,10 @@ static struct dentry *ext2_get_dentry(struct super_block *sb, void *vobjp)
354 * systems, but can be improved upon. 357 * systems, but can be improved upon.
355 * Currently only get_parent is required. 358 * Currently only get_parent is required.
356 */ 359 */
357static struct export_operations ext2_export_ops = { 360static const struct export_operations ext2_export_ops = {
361 .fh_to_dentry = ext2_fh_to_dentry,
362 .fh_to_parent = ext2_fh_to_parent,
358 .get_parent = ext2_get_parent, 363 .get_parent = ext2_get_parent,
359 .get_dentry = ext2_get_dentry,
360}; 364};
361 365
362static unsigned long get_sb_block(void **data) 366static unsigned long get_sb_block(void **data)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 81868c0bc40e..de55da9e28ba 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -631,13 +631,10 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
631} 631}
632 632
633 633
634static struct dentry *ext3_get_dentry(struct super_block *sb, void *vobjp) 634static struct inode *ext3_nfs_get_inode(struct super_block *sb,
635 u64 ino, u32 generation)
635{ 636{
636 __u32 *objp = vobjp;
637 unsigned long ino = objp[0];
638 __u32 generation = objp[1];
639 struct inode *inode; 637 struct inode *inode;
640 struct dentry *result;
641 638
642 if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO) 639 if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO)
643 return ERR_PTR(-ESTALE); 640 return ERR_PTR(-ESTALE);
@@ -660,15 +657,22 @@ static struct dentry *ext3_get_dentry(struct super_block *sb, void *vobjp)
660 iput(inode); 657 iput(inode);
661 return ERR_PTR(-ESTALE); 658 return ERR_PTR(-ESTALE);
662 } 659 }
663 /* now to find a dentry. 660
664 * If possible, get a well-connected one 661 return inode;
665 */ 662}
666 result = d_alloc_anon(inode); 663
667 if (!result) { 664static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid,
668 iput(inode); 665 int fh_len, int fh_type)
669 return ERR_PTR(-ENOMEM); 666{
670 } 667 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
671 return result; 668 ext3_nfs_get_inode);
669}
670
671static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid,
672 int fh_len, int fh_type)
673{
674 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
675 ext3_nfs_get_inode);
672} 676}
673 677
674#ifdef CONFIG_QUOTA 678#ifdef CONFIG_QUOTA
@@ -737,9 +741,10 @@ static const struct super_operations ext3_sops = {
737#endif 741#endif
738}; 742};
739 743
740static struct export_operations ext3_export_ops = { 744static const struct export_operations ext3_export_ops = {
745 .fh_to_dentry = ext3_fh_to_dentry,
746 .fh_to_parent = ext3_fh_to_parent,
741 .get_parent = ext3_get_parent, 747 .get_parent = ext3_get_parent,
742 .get_dentry = ext3_get_dentry,
743}; 748};
744 749
745enum { 750enum {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b11e9e2bcd01..8031dc0e24e5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -686,13 +686,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
686} 686}
687 687
688 688
689static struct dentry *ext4_get_dentry(struct super_block *sb, void *vobjp) 689static struct inode *ext4_nfs_get_inode(struct super_block *sb,
690 u64 ino, u32 generation)
690{ 691{
691 __u32 *objp = vobjp;
692 unsigned long ino = objp[0];
693 __u32 generation = objp[1];
694 struct inode *inode; 692 struct inode *inode;
695 struct dentry *result;
696 693
697 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) 694 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
698 return ERR_PTR(-ESTALE); 695 return ERR_PTR(-ESTALE);
@@ -715,15 +712,22 @@ static struct dentry *ext4_get_dentry(struct super_block *sb, void *vobjp)
715 iput(inode); 712 iput(inode);
716 return ERR_PTR(-ESTALE); 713 return ERR_PTR(-ESTALE);
717 } 714 }
718 /* now to find a dentry. 715
719 * If possible, get a well-connected one 716 return inode;
720 */ 717}
721 result = d_alloc_anon(inode); 718
722 if (!result) { 719static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
723 iput(inode); 720 int fh_len, int fh_type)
724 return ERR_PTR(-ENOMEM); 721{
725 } 722 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
726 return result; 723 ext4_nfs_get_inode);
724}
725
726static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
727 int fh_len, int fh_type)
728{
729 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
730 ext4_nfs_get_inode);
727} 731}
728 732
729#ifdef CONFIG_QUOTA 733#ifdef CONFIG_QUOTA
@@ -792,9 +796,10 @@ static const struct super_operations ext4_sops = {
792#endif 796#endif
793}; 797};
794 798
795static struct export_operations ext4_export_ops = { 799static const struct export_operations ext4_export_ops = {
800 .fh_to_dentry = ext4_fh_to_dentry,
801 .fh_to_parent = ext4_fh_to_parent,
796 .get_parent = ext4_get_parent, 802 .get_parent = ext4_get_parent,
797 .get_dentry = ext4_get_dentry,
798}; 803};
799 804
800enum { 805enum {
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index c0c5e9c55b58..920a576e1c25 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -653,24 +653,15 @@ static const struct super_operations fat_sops = {
653 * of i_logstart is used to store the directory entry offset. 653 * of i_logstart is used to store the directory entry offset.
654 */ 654 */
655 655
656static struct dentry * 656static struct dentry *fat_fh_to_dentry(struct super_block *sb,
657fat_decode_fh(struct super_block *sb, __u32 *fh, int len, int fhtype, 657 struct fid *fid, int fh_len, int fh_type)
658 int (*acceptable)(void *context, struct dentry *de),
659 void *context)
660{
661 if (fhtype != 3)
662 return ERR_PTR(-ESTALE);
663 if (len < 5)
664 return ERR_PTR(-ESTALE);
665
666 return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable, context);
667}
668
669static struct dentry *fat_get_dentry(struct super_block *sb, void *inump)
670{ 658{
671 struct inode *inode = NULL; 659 struct inode *inode = NULL;
672 struct dentry *result; 660 struct dentry *result;
673 __u32 *fh = inump; 661 u32 *fh = fid->raw;
662
663 if (fh_len < 5 || fh_type != 3)
664 return NULL;
674 665
675 inode = iget(sb, fh[0]); 666 inode = iget(sb, fh[0]);
676 if (!inode || is_bad_inode(inode) || inode->i_generation != fh[1]) { 667 if (!inode || is_bad_inode(inode) || inode->i_generation != fh[1]) {
@@ -783,10 +774,9 @@ out:
783 return parent; 774 return parent;
784} 775}
785 776
786static struct export_operations fat_export_ops = { 777static const struct export_operations fat_export_ops = {
787 .decode_fh = fat_decode_fh,
788 .encode_fh = fat_encode_fh, 778 .encode_fh = fat_encode_fh,
789 .get_dentry = fat_get_dentry, 779 .fh_to_dentry = fat_fh_to_dentry,
790 .get_parent = fat_get_parent, 780 .get_parent = fat_get_parent,
791}; 781};
792 782
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index e2d1347796a9..b9da62348a87 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -31,40 +31,6 @@
31#define GFS2_LARGE_FH_SIZE 8 31#define GFS2_LARGE_FH_SIZE 8
32#define GFS2_OLD_FH_SIZE 10 32#define GFS2_OLD_FH_SIZE 10
33 33
34static struct dentry *gfs2_decode_fh(struct super_block *sb,
35 __u32 *p,
36 int fh_len,
37 int fh_type,
38 int (*acceptable)(void *context,
39 struct dentry *dentry),
40 void *context)
41{
42 __be32 *fh = (__force __be32 *)p;
43 struct gfs2_inum_host inum, parent;
44
45 memset(&parent, 0, sizeof(struct gfs2_inum));
46
47 switch (fh_len) {
48 case GFS2_LARGE_FH_SIZE:
49 case GFS2_OLD_FH_SIZE:
50 parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
51 parent.no_formal_ino |= be32_to_cpu(fh[5]);
52 parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
53 parent.no_addr |= be32_to_cpu(fh[7]);
54 case GFS2_SMALL_FH_SIZE:
55 inum.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
56 inum.no_formal_ino |= be32_to_cpu(fh[1]);
57 inum.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
58 inum.no_addr |= be32_to_cpu(fh[3]);
59 break;
60 default:
61 return NULL;
62 }
63
64 return gfs2_export_ops.find_exported_dentry(sb, &inum, &parent,
65 acceptable, context);
66}
67
68static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, 34static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
69 int connectable) 35 int connectable)
70{ 36{
@@ -189,10 +155,10 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
189 return dentry; 155 return dentry;
190} 156}
191 157
192static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj) 158static struct dentry *gfs2_get_dentry(struct super_block *sb,
159 struct gfs2_inum_host *inum)
193{ 160{
194 struct gfs2_sbd *sdp = sb->s_fs_info; 161 struct gfs2_sbd *sdp = sb->s_fs_info;
195 struct gfs2_inum_host *inum = inum_obj;
196 struct gfs2_holder i_gh, ri_gh, rgd_gh; 162 struct gfs2_holder i_gh, ri_gh, rgd_gh;
197 struct gfs2_rgrpd *rgd; 163 struct gfs2_rgrpd *rgd;
198 struct inode *inode; 164 struct inode *inode;
@@ -289,11 +255,50 @@ fail:
289 return ERR_PTR(error); 255 return ERR_PTR(error);
290} 256}
291 257
292struct export_operations gfs2_export_ops = { 258static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
293 .decode_fh = gfs2_decode_fh, 259 int fh_len, int fh_type)
260{
261 struct gfs2_inum_host this;
262 __be32 *fh = (__force __be32 *)fid->raw;
263
264 switch (fh_type) {
265 case GFS2_SMALL_FH_SIZE:
266 case GFS2_LARGE_FH_SIZE:
267 case GFS2_OLD_FH_SIZE:
268 this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
269 this.no_formal_ino |= be32_to_cpu(fh[1]);
270 this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
271 this.no_addr |= be32_to_cpu(fh[3]);
272 return gfs2_get_dentry(sb, &this);
273 default:
274 return NULL;
275 }
276}
277
278static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
279 int fh_len, int fh_type)
280{
281 struct gfs2_inum_host parent;
282 __be32 *fh = (__force __be32 *)fid->raw;
283
284 switch (fh_type) {
285 case GFS2_LARGE_FH_SIZE:
286 case GFS2_OLD_FH_SIZE:
287 parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
288 parent.no_formal_ino |= be32_to_cpu(fh[5]);
289 parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
290 parent.no_addr |= be32_to_cpu(fh[7]);
291 return gfs2_get_dentry(sb, &parent);
292 default:
293 return NULL;
294 }
295}
296
297const struct export_operations gfs2_export_ops = {
294 .encode_fh = gfs2_encode_fh, 298 .encode_fh = gfs2_encode_fh,
299 .fh_to_dentry = gfs2_fh_to_dentry,
300 .fh_to_parent = gfs2_fh_to_parent,
295 .get_name = gfs2_get_name, 301 .get_name = gfs2_get_name,
296 .get_parent = gfs2_get_parent, 302 .get_parent = gfs2_get_parent,
297 .get_dentry = gfs2_get_dentry,
298}; 303};
299 304
diff --git a/fs/gfs2/ops_fstype.h b/fs/gfs2/ops_fstype.h
index 407029b3b2b3..da8490511836 100644
--- a/fs/gfs2/ops_fstype.h
+++ b/fs/gfs2/ops_fstype.h
@@ -14,6 +14,6 @@
14 14
15extern struct file_system_type gfs2_fs_type; 15extern struct file_system_type gfs2_fs_type;
16extern struct file_system_type gfs2meta_fs_type; 16extern struct file_system_type gfs2meta_fs_type;
17extern struct export_operations gfs2_export_ops; 17extern const struct export_operations gfs2_export_ops;
18 18
19#endif /* __OPS_FSTYPE_DOT_H__ */ 19#endif /* __OPS_FSTYPE_DOT_H__ */
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 4af856a7fda7..29f9753ae5e5 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -42,16 +42,6 @@ isofs_export_iget(struct super_block *sb,
42 return result; 42 return result;
43} 43}
44 44
45static struct dentry *
46isofs_export_get_dentry(struct super_block *sb, void *vobjp)
47{
48 __u32 *objp = vobjp;
49 unsigned long block = objp[0];
50 unsigned long offset = objp[1];
51 __u32 generation = objp[2];
52 return isofs_export_iget(sb, block, offset, generation);
53}
54
55/* This function is surprisingly simple. The trick is understanding 45/* This function is surprisingly simple. The trick is understanding
56 * that "child" is always a directory. So, to find its parent, you 46 * that "child" is always a directory. So, to find its parent, you
57 * simply need to find its ".." entry, normalize its block and offset, 47 * simply need to find its ".." entry, normalize its block and offset,
@@ -182,43 +172,44 @@ isofs_export_encode_fh(struct dentry *dentry,
182 return type; 172 return type;
183} 173}
184 174
175struct isofs_fid {
176 u32 block;
177 u16 offset;
178 u16 parent_offset;
179 u32 generation;
180 u32 parent_block;
181 u32 parent_generation;
182};
185 183
186static struct dentry * 184static struct dentry *isofs_fh_to_dentry(struct super_block *sb,
187isofs_export_decode_fh(struct super_block *sb, 185 struct fid *fid, int fh_len, int fh_type)
188 __u32 *fh32,
189 int fh_len,
190 int fileid_type,
191 int (*acceptable)(void *context, struct dentry *de),
192 void *context)
193{ 186{
194 __u16 *fh16 = (__u16*)fh32; 187 struct isofs_fid *ifid = (struct isofs_fid *)fid;
195 __u32 child[3]; /* The child is what triggered all this. */
196 __u32 parent[3]; /* The parent is just along for the ride. */
197 188
198 if (fh_len < 3 || fileid_type > 2) 189 if (fh_len < 3 || fh_type > 2)
199 return NULL; 190 return NULL;
200 191
201 child[0] = fh32[0]; 192 return isofs_export_iget(sb, ifid->block, ifid->offset,
202 child[1] = fh16[2]; /* fh16 [sic] */ 193 ifid->generation);
203 child[2] = fh32[2];
204
205 parent[0] = 0;
206 parent[1] = 0;
207 parent[2] = 0;
208 if (fileid_type == 2) {
209 if (fh_len > 2) parent[0] = fh32[3];
210 parent[1] = fh16[3]; /* fh16 [sic] */
211 if (fh_len > 4) parent[2] = fh32[4];
212 }
213
214 return sb->s_export_op->find_exported_dentry(sb, child, parent,
215 acceptable, context);
216} 194}
217 195
196static struct dentry *isofs_fh_to_parent(struct super_block *sb,
197 struct fid *fid, int fh_len, int fh_type)
198{
199 struct isofs_fid *ifid = (struct isofs_fid *)fid;
200
201 if (fh_type != 2)
202 return NULL;
203
204 return isofs_export_iget(sb,
205 fh_len > 2 ? ifid->parent_block : 0,
206 ifid->parent_offset,
207 fh_len > 4 ? ifid->parent_generation : 0);
208}
218 209
219struct export_operations isofs_export_ops = { 210const struct export_operations isofs_export_ops = {
220 .decode_fh = isofs_export_decode_fh,
221 .encode_fh = isofs_export_encode_fh, 211 .encode_fh = isofs_export_encode_fh,
222 .get_dentry = isofs_export_get_dentry, 212 .fh_to_dentry = isofs_fh_to_dentry,
213 .fh_to_parent = isofs_fh_to_parent,
223 .get_parent = isofs_export_get_parent, 214 .get_parent = isofs_export_get_parent,
224}; 215};
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index a07e67b1ea7f..f3213f9f89af 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -178,4 +178,4 @@ isofs_normalize_block_and_offset(struct iso_directory_record* de,
178extern const struct inode_operations isofs_dir_inode_operations; 178extern const struct inode_operations isofs_dir_inode_operations;
179extern const struct file_operations isofs_dir_operations; 179extern const struct file_operations isofs_dir_operations;
180extern const struct address_space_operations isofs_symlink_aops; 180extern const struct address_space_operations isofs_symlink_aops;
181extern struct export_operations isofs_export_ops; 181extern const struct export_operations isofs_export_ops;
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 8ec9323e830a..9728614b8958 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -228,11 +228,28 @@ struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
228 return acl; 228 return acl;
229} 229}
230 230
231static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *acl)
232{
233 char *value = NULL;
234 size_t size = 0;
235 int rc;
236
237 if (acl) {
238 value = jffs2_acl_to_medium(acl, &size);
239 if (IS_ERR(value))
240 return PTR_ERR(value);
241 }
242 rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
243 if (!value && rc == -ENODATA)
244 rc = 0;
245 kfree(value);
246
247 return rc;
248}
249
231static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) 250static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
232{ 251{
233 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 252 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
234 size_t size = 0;
235 char *value = NULL;
236 int rc, xprefix; 253 int rc, xprefix;
237 254
238 if (S_ISLNK(inode->i_mode)) 255 if (S_ISLNK(inode->i_mode))
@@ -267,17 +284,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
267 default: 284 default:
268 return -EINVAL; 285 return -EINVAL;
269 } 286 }
270 if (acl) { 287 rc = __jffs2_set_acl(inode, xprefix, acl);
271 value = jffs2_acl_to_medium(acl, &size);
272 if (IS_ERR(value))
273 return PTR_ERR(value);
274 }
275
276 rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
277 if (!value && rc == -ENODATA)
278 rc = 0;
279 if (value)
280 kfree(value);
281 if (!rc) { 288 if (!rc) {
282 switch(type) { 289 switch(type) {
283 case ACL_TYPE_ACCESS: 290 case ACL_TYPE_ACCESS:
@@ -312,37 +319,59 @@ int jffs2_permission(struct inode *inode, int mask, struct nameidata *nd)
312 return generic_permission(inode, mask, jffs2_check_acl); 319 return generic_permission(inode, mask, jffs2_check_acl);
313} 320}
314 321
315int jffs2_init_acl(struct inode *inode, struct posix_acl *acl) 322int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
316{ 323{
317 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 324 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
318 struct posix_acl *clone; 325 struct posix_acl *acl, *clone;
319 mode_t mode; 326 int rc;
320 int rc = 0;
321 327
322 f->i_acl_access = JFFS2_ACL_NOT_CACHED; 328 f->i_acl_default = NULL;
323 f->i_acl_default = JFFS2_ACL_NOT_CACHED; 329 f->i_acl_access = NULL;
330
331 if (S_ISLNK(*i_mode))
332 return 0; /* Symlink always has no-ACL */
333
334 acl = jffs2_get_acl(dir_i, ACL_TYPE_DEFAULT);
335 if (IS_ERR(acl))
336 return PTR_ERR(acl);
337
338 if (!acl) {
339 *i_mode &= ~current->fs->umask;
340 } else {
341 if (S_ISDIR(*i_mode))
342 jffs2_iset_acl(inode, &f->i_acl_default, acl);
324 343
325 if (acl) {
326 if (S_ISDIR(inode->i_mode)) {
327 rc = jffs2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
328 if (rc)
329 goto cleanup;
330 }
331 clone = posix_acl_clone(acl, GFP_KERNEL); 344 clone = posix_acl_clone(acl, GFP_KERNEL);
332 rc = -ENOMEM;
333 if (!clone) 345 if (!clone)
334 goto cleanup; 346 return -ENOMEM;
335 mode = inode->i_mode; 347 rc = posix_acl_create_masq(clone, (mode_t *)i_mode);
336 rc = posix_acl_create_masq(clone, &mode); 348 if (rc < 0)
337 if (rc >= 0) { 349 return rc;
338 inode->i_mode = mode; 350 if (rc > 0)
339 if (rc > 0) 351 jffs2_iset_acl(inode, &f->i_acl_access, clone);
340 rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone); 352
341 }
342 posix_acl_release(clone); 353 posix_acl_release(clone);
343 } 354 }
344 cleanup: 355 return 0;
345 posix_acl_release(acl); 356}
357
358int jffs2_init_acl_post(struct inode *inode)
359{
360 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
361 int rc;
362
363 if (f->i_acl_default) {
364 rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, f->i_acl_default);
365 if (rc)
366 return rc;
367 }
368
369 if (f->i_acl_access) {
370 rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, f->i_acl_access);
371 if (rc)
372 return rc;
373 }
374
346 return rc; 375 return rc;
347} 376}
348 377
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 90a2dbf59051..76c6ebd1acd9 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -31,7 +31,8 @@ struct jffs2_acl_header {
31extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type); 31extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
32extern int jffs2_permission(struct inode *, int, struct nameidata *); 32extern int jffs2_permission(struct inode *, int, struct nameidata *);
33extern int jffs2_acl_chmod(struct inode *); 33extern int jffs2_acl_chmod(struct inode *);
34extern int jffs2_init_acl(struct inode *, struct posix_acl *); 34extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
35extern int jffs2_init_acl_post(struct inode *);
35extern void jffs2_clear_acl(struct jffs2_inode_info *); 36extern void jffs2_clear_acl(struct jffs2_inode_info *);
36 37
37extern struct xattr_handler jffs2_acl_access_xattr_handler; 38extern struct xattr_handler jffs2_acl_access_xattr_handler;
@@ -39,10 +40,11 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
39 40
40#else 41#else
41 42
42#define jffs2_get_acl(inode, type) (NULL) 43#define jffs2_get_acl(inode, type) (NULL)
43#define jffs2_permission NULL 44#define jffs2_permission (NULL)
44#define jffs2_acl_chmod(inode) (0) 45#define jffs2_acl_chmod(inode) (0)
45#define jffs2_init_acl(inode,dir) (0) 46#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
47#define jffs2_init_acl_post(inode) (0)
46#define jffs2_clear_acl(f) 48#define jffs2_clear_acl(f)
47 49
48#endif /* CONFIG_JFFS2_FS_POSIX_ACL */ 50#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 8353eb9c1799..787e392ffd41 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -182,7 +182,6 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
182 struct jffs2_inode_info *f, *dir_f; 182 struct jffs2_inode_info *f, *dir_f;
183 struct jffs2_sb_info *c; 183 struct jffs2_sb_info *c;
184 struct inode *inode; 184 struct inode *inode;
185 struct posix_acl *acl;
186 int ret; 185 int ret;
187 186
188 ri = jffs2_alloc_raw_inode(); 187 ri = jffs2_alloc_raw_inode();
@@ -193,7 +192,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
193 192
194 D1(printk(KERN_DEBUG "jffs2_create()\n")); 193 D1(printk(KERN_DEBUG "jffs2_create()\n"));
195 194
196 inode = jffs2_new_inode(dir_i, mode, ri, &acl); 195 inode = jffs2_new_inode(dir_i, mode, ri);
197 196
198 if (IS_ERR(inode)) { 197 if (IS_ERR(inode)) {
199 D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n")); 198 D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
@@ -211,14 +210,6 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
211 210
212 ret = jffs2_do_create(c, dir_f, f, ri, 211 ret = jffs2_do_create(c, dir_f, f, ri,
213 dentry->d_name.name, dentry->d_name.len); 212 dentry->d_name.name, dentry->d_name.len);
214
215 if (ret)
216 goto fail_acl;
217
218 ret = jffs2_init_security(inode, dir_i);
219 if (ret)
220 goto fail_acl;
221 ret = jffs2_init_acl(inode, acl);
222 if (ret) 213 if (ret)
223 goto fail; 214 goto fail;
224 215
@@ -231,8 +222,6 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
231 inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); 222 inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
232 return 0; 223 return 0;
233 224
234 fail_acl:
235 posix_acl_release(acl);
236 fail: 225 fail:
237 make_bad_inode(inode); 226 make_bad_inode(inode);
238 iput(inode); 227 iput(inode);
@@ -309,7 +298,6 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
309 struct jffs2_full_dirent *fd; 298 struct jffs2_full_dirent *fd;
310 int namelen; 299 int namelen;
311 uint32_t alloclen; 300 uint32_t alloclen;
312 struct posix_acl *acl;
313 int ret, targetlen = strlen(target); 301 int ret, targetlen = strlen(target);
314 302
315 /* FIXME: If you care. We'd need to use frags for the target 303 /* FIXME: If you care. We'd need to use frags for the target
@@ -336,7 +324,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
336 return ret; 324 return ret;
337 } 325 }
338 326
339 inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri, &acl); 327 inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri);
340 328
341 if (IS_ERR(inode)) { 329 if (IS_ERR(inode)) {
342 jffs2_free_raw_inode(ri); 330 jffs2_free_raw_inode(ri);
@@ -366,7 +354,6 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
366 up(&f->sem); 354 up(&f->sem);
367 jffs2_complete_reservation(c); 355 jffs2_complete_reservation(c);
368 jffs2_clear_inode(inode); 356 jffs2_clear_inode(inode);
369 posix_acl_release(acl);
370 return PTR_ERR(fn); 357 return PTR_ERR(fn);
371 } 358 }
372 359
@@ -377,7 +364,6 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
377 up(&f->sem); 364 up(&f->sem);
378 jffs2_complete_reservation(c); 365 jffs2_complete_reservation(c);
379 jffs2_clear_inode(inode); 366 jffs2_clear_inode(inode);
380 posix_acl_release(acl);
381 return -ENOMEM; 367 return -ENOMEM;
382 } 368 }
383 369
@@ -395,10 +381,9 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
395 ret = jffs2_init_security(inode, dir_i); 381 ret = jffs2_init_security(inode, dir_i);
396 if (ret) { 382 if (ret) {
397 jffs2_clear_inode(inode); 383 jffs2_clear_inode(inode);
398 posix_acl_release(acl);
399 return ret; 384 return ret;
400 } 385 }
401 ret = jffs2_init_acl(inode, acl); 386 ret = jffs2_init_acl_post(inode);
402 if (ret) { 387 if (ret) {
403 jffs2_clear_inode(inode); 388 jffs2_clear_inode(inode);
404 return ret; 389 return ret;
@@ -476,7 +461,6 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
476 struct jffs2_full_dirent *fd; 461 struct jffs2_full_dirent *fd;
477 int namelen; 462 int namelen;
478 uint32_t alloclen; 463 uint32_t alloclen;
479 struct posix_acl *acl;
480 int ret; 464 int ret;
481 465
482 mode |= S_IFDIR; 466 mode |= S_IFDIR;
@@ -499,7 +483,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
499 return ret; 483 return ret;
500 } 484 }
501 485
502 inode = jffs2_new_inode(dir_i, mode, ri, &acl); 486 inode = jffs2_new_inode(dir_i, mode, ri);
503 487
504 if (IS_ERR(inode)) { 488 if (IS_ERR(inode)) {
505 jffs2_free_raw_inode(ri); 489 jffs2_free_raw_inode(ri);
@@ -526,7 +510,6 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
526 up(&f->sem); 510 up(&f->sem);
527 jffs2_complete_reservation(c); 511 jffs2_complete_reservation(c);
528 jffs2_clear_inode(inode); 512 jffs2_clear_inode(inode);
529 posix_acl_release(acl);
530 return PTR_ERR(fn); 513 return PTR_ERR(fn);
531 } 514 }
532 /* No data here. Only a metadata node, which will be 515 /* No data here. Only a metadata node, which will be
@@ -540,10 +523,9 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
540 ret = jffs2_init_security(inode, dir_i); 523 ret = jffs2_init_security(inode, dir_i);
541 if (ret) { 524 if (ret) {
542 jffs2_clear_inode(inode); 525 jffs2_clear_inode(inode);
543 posix_acl_release(acl);
544 return ret; 526 return ret;
545 } 527 }
546 ret = jffs2_init_acl(inode, acl); 528 ret = jffs2_init_acl_post(inode);
547 if (ret) { 529 if (ret) {
548 jffs2_clear_inode(inode); 530 jffs2_clear_inode(inode);
549 return ret; 531 return ret;
@@ -639,7 +621,6 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
639 union jffs2_device_node dev; 621 union jffs2_device_node dev;
640 int devlen = 0; 622 int devlen = 0;
641 uint32_t alloclen; 623 uint32_t alloclen;
642 struct posix_acl *acl;
643 int ret; 624 int ret;
644 625
645 if (!new_valid_dev(rdev)) 626 if (!new_valid_dev(rdev))
@@ -666,7 +647,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
666 return ret; 647 return ret;
667 } 648 }
668 649
669 inode = jffs2_new_inode(dir_i, mode, ri, &acl); 650 inode = jffs2_new_inode(dir_i, mode, ri);
670 651
671 if (IS_ERR(inode)) { 652 if (IS_ERR(inode)) {
672 jffs2_free_raw_inode(ri); 653 jffs2_free_raw_inode(ri);
@@ -695,7 +676,6 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
695 up(&f->sem); 676 up(&f->sem);
696 jffs2_complete_reservation(c); 677 jffs2_complete_reservation(c);
697 jffs2_clear_inode(inode); 678 jffs2_clear_inode(inode);
698 posix_acl_release(acl);
699 return PTR_ERR(fn); 679 return PTR_ERR(fn);
700 } 680 }
701 /* No data here. Only a metadata node, which will be 681 /* No data here. Only a metadata node, which will be
@@ -709,10 +689,9 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
709 ret = jffs2_init_security(inode, dir_i); 689 ret = jffs2_init_security(inode, dir_i);
710 if (ret) { 690 if (ret) {
711 jffs2_clear_inode(inode); 691 jffs2_clear_inode(inode);
712 posix_acl_release(acl);
713 return ret; 692 return ret;
714 } 693 }
715 ret = jffs2_init_acl(inode, acl); 694 ret = jffs2_init_acl_post(inode);
716 if (ret) { 695 if (ret) {
717 jffs2_clear_inode(inode); 696 jffs2_clear_inode(inode);
718 return ret; 697 return ret;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 023a17539dd4..f9c5dd6f4b64 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -255,7 +255,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
255 _whole_ page. This helps to reduce the number of 255 _whole_ page. This helps to reduce the number of
256 nodes in files which have many short writes, like 256 nodes in files which have many short writes, like
257 syslog files. */ 257 syslog files. */
258 start = aligned_start = 0; 258 aligned_start = 0;
259 } 259 }
260 260
261 ri = jffs2_alloc_raw_inode(); 261 ri = jffs2_alloc_raw_inode();
@@ -291,14 +291,11 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
291 } 291 }
292 292
293 /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ 293 /* Adjust writtenlen for the padding we did, so we don't confuse our caller */
294 if (writtenlen < (start&3)) 294 writtenlen -= min(writtenlen, (start - aligned_start));
295 writtenlen = 0;
296 else
297 writtenlen -= (start&3);
298 295
299 if (writtenlen) { 296 if (writtenlen) {
300 if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { 297 if (inode->i_size < pos + writtenlen) {
301 inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; 298 inode->i_size = pos + writtenlen;
302 inode->i_blocks = (inode->i_size + 511) >> 9; 299 inode->i_blocks = (inode->i_size + 511) >> 9;
303 300
304 inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); 301 inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index ed85f9afdbc8..d2e06f7ea96f 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -402,8 +402,7 @@ void jffs2_write_super (struct super_block *sb)
402 402
403/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, 403/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
404 fill in the raw_inode while you're at it. */ 404 fill in the raw_inode while you're at it. */
405struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri, 405struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
406 struct posix_acl **acl)
407{ 406{
408 struct inode *inode; 407 struct inode *inode;
409 struct super_block *sb = dir_i->i_sb; 408 struct super_block *sb = dir_i->i_sb;
@@ -438,19 +437,11 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
438 437
439 /* POSIX ACLs have to be processed now, at least partly. 438 /* POSIX ACLs have to be processed now, at least partly.
440 The umask is only applied if there's no default ACL */ 439 The umask is only applied if there's no default ACL */
441 if (!S_ISLNK(mode)) { 440 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
442 *acl = jffs2_get_acl(dir_i, ACL_TYPE_DEFAULT); 441 if (ret) {
443 if (IS_ERR(*acl)) { 442 make_bad_inode(inode);
444 make_bad_inode(inode); 443 iput(inode);
445 iput(inode); 444 return ERR_PTR(ret);
446 inode = (void *)*acl;
447 *acl = NULL;
448 return inode;
449 }
450 if (!(*acl))
451 mode &= ~current->fs->umask;
452 } else {
453 *acl = NULL;
454 } 445 }
455 ret = jffs2_do_new_inode (c, f, mode, ri); 446 ret = jffs2_do_new_inode (c, f, mode, ri);
456 if (ret) { 447 if (ret) {
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index f6743a915cf3..bf64686cf098 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -173,15 +173,13 @@ int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
173extern const struct inode_operations jffs2_symlink_inode_operations; 173extern const struct inode_operations jffs2_symlink_inode_operations;
174 174
175/* fs.c */ 175/* fs.c */
176struct posix_acl;
177
178int jffs2_setattr (struct dentry *, struct iattr *); 176int jffs2_setattr (struct dentry *, struct iattr *);
179int jffs2_do_setattr (struct inode *, struct iattr *); 177int jffs2_do_setattr (struct inode *, struct iattr *);
180void jffs2_read_inode (struct inode *); 178void jffs2_read_inode (struct inode *);
181void jffs2_clear_inode (struct inode *); 179void jffs2_clear_inode (struct inode *);
182void jffs2_dirty_inode(struct inode *inode); 180void jffs2_dirty_inode(struct inode *inode);
183struct inode *jffs2_new_inode (struct inode *dir_i, int mode, 181struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
184 struct jffs2_raw_inode *ri, struct posix_acl **acl); 182 struct jffs2_raw_inode *ri);
185int jffs2_statfs (struct dentry *, struct kstatfs *); 183int jffs2_statfs (struct dentry *, struct kstatfs *);
186void jffs2_write_super (struct super_block *); 184void jffs2_write_super (struct super_block *);
187int jffs2_remount_fs (struct super_block *, int *, char *); 185int jffs2_remount_fs (struct super_block *, int *, char *);
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 2f5695446d0f..147e2cbee9e4 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -465,6 +465,14 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
465 465
466 up(&f->sem); 466 up(&f->sem);
467 jffs2_complete_reservation(c); 467 jffs2_complete_reservation(c);
468
469 ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode);
470 if (ret)
471 return ret;
472 ret = jffs2_init_acl_post(&f->vfs_inode);
473 if (ret)
474 return ret;
475
468 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, 476 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
469 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 477 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
470 478
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index f0ec72b263f1..8e2cf2cde185 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -18,6 +18,8 @@
18#ifndef _H_JFS_INODE 18#ifndef _H_JFS_INODE
19#define _H_JFS_INODE 19#define _H_JFS_INODE
20 20
21struct fid;
22
21extern struct inode *ialloc(struct inode *, umode_t); 23extern struct inode *ialloc(struct inode *, umode_t);
22extern int jfs_fsync(struct file *, struct dentry *, int); 24extern int jfs_fsync(struct file *, struct dentry *, int);
23extern int jfs_ioctl(struct inode *, struct file *, 25extern int jfs_ioctl(struct inode *, struct file *,
@@ -32,7 +34,10 @@ extern void jfs_truncate_nolock(struct inode *, loff_t);
32extern void jfs_free_zero_link(struct inode *); 34extern void jfs_free_zero_link(struct inode *);
33extern struct dentry *jfs_get_parent(struct dentry *dentry); 35extern struct dentry *jfs_get_parent(struct dentry *dentry);
34extern void jfs_get_inode_flags(struct jfs_inode_info *); 36extern void jfs_get_inode_flags(struct jfs_inode_info *);
35extern struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp); 37extern struct dentry *jfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
38 int fh_len, int fh_type);
39extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
40 int fh_len, int fh_type);
36extern void jfs_set_inode_flags(struct inode *); 41extern void jfs_set_inode_flags(struct inode *);
37extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 42extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
38 43
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 932797ba433b..4e0a8493cef6 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <linux/quotaops.h> 22#include <linux/quotaops.h>
23#include <linux/exportfs.h>
23#include "jfs_incore.h" 24#include "jfs_incore.h"
24#include "jfs_superblock.h" 25#include "jfs_superblock.h"
25#include "jfs_inode.h" 26#include "jfs_inode.h"
@@ -1477,13 +1478,10 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1477 return dentry; 1478 return dentry;
1478} 1479}
1479 1480
1480struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp) 1481static struct inode *jfs_nfs_get_inode(struct super_block *sb,
1482 u64 ino, u32 generation)
1481{ 1483{
1482 __u32 *objp = vobjp;
1483 unsigned long ino = objp[0];
1484 __u32 generation = objp[1];
1485 struct inode *inode; 1484 struct inode *inode;
1486 struct dentry *result;
1487 1485
1488 if (ino == 0) 1486 if (ino == 0)
1489 return ERR_PTR(-ESTALE); 1487 return ERR_PTR(-ESTALE);
@@ -1493,20 +1491,25 @@ struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp)
1493 1491
1494 if (is_bad_inode(inode) || 1492 if (is_bad_inode(inode) ||
1495 (generation && inode->i_generation != generation)) { 1493 (generation && inode->i_generation != generation)) {
1496 result = ERR_PTR(-ESTALE); 1494 iput(inode);
1497 goto out_iput; 1495 return ERR_PTR(-ESTALE);
1498 } 1496 }
1499 1497
1500 result = d_alloc_anon(inode); 1498 return inode;
1501 if (!result) { 1499}
1502 result = ERR_PTR(-ENOMEM);
1503 goto out_iput;
1504 }
1505 return result;
1506 1500
1507 out_iput: 1501struct dentry *jfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1508 iput(inode); 1502 int fh_len, int fh_type)
1509 return result; 1503{
1504 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1505 jfs_nfs_get_inode);
1506}
1507
1508struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1509 int fh_len, int fh_type)
1510{
1511 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1512 jfs_nfs_get_inode);
1510} 1513}
1511 1514
1512struct dentry *jfs_get_parent(struct dentry *dentry) 1515struct dentry *jfs_get_parent(struct dentry *dentry)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index cff60c171943..314bb4ff1ba8 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
48static struct kmem_cache * jfs_inode_cachep; 48static struct kmem_cache * jfs_inode_cachep;
49 49
50static const struct super_operations jfs_super_operations; 50static const struct super_operations jfs_super_operations;
51static struct export_operations jfs_export_operations; 51static const struct export_operations jfs_export_operations;
52static struct file_system_type jfs_fs_type; 52static struct file_system_type jfs_fs_type;
53 53
54#define MAX_COMMIT_THREADS 64 54#define MAX_COMMIT_THREADS 64
@@ -737,8 +737,9 @@ static const struct super_operations jfs_super_operations = {
737#endif 737#endif
738}; 738};
739 739
740static struct export_operations jfs_export_operations = { 740static const struct export_operations jfs_export_operations = {
741 .get_dentry = jfs_get_dentry, 741 .fh_to_dentry = jfs_fh_to_dentry,
742 .fh_to_parent = jfs_fh_to_parent,
742 .get_parent = jfs_get_parent, 743 .get_parent = jfs_get_parent,
743}; 744};
744 745
diff --git a/fs/libfs.c b/fs/libfs.c
index ae51481e45e5..6e68b700958d 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -8,6 +8,7 @@
8#include <linux/mount.h> 8#include <linux/mount.h>
9#include <linux/vfs.h> 9#include <linux/vfs.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/exportfs.h>
11 12
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13 14
@@ -678,6 +679,93 @@ out:
678 return ret; 679 return ret;
679} 680}
680 681
682/*
683 * This is what d_alloc_anon should have been. Once the exportfs
684 * argument transition has been finished I will update d_alloc_anon
685 * to this prototype and this wrapper will go away. --hch
686 */
687static struct dentry *exportfs_d_alloc(struct inode *inode)
688{
689 struct dentry *dentry;
690
691 if (!inode)
692 return NULL;
693 if (IS_ERR(inode))
694 return ERR_PTR(PTR_ERR(inode));
695
696 dentry = d_alloc_anon(inode);
697 if (!dentry) {
698 iput(inode);
699 dentry = ERR_PTR(-ENOMEM);
700 }
701 return dentry;
702}
703
704/**
705 * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
706 * @sb: filesystem to do the file handle conversion on
707 * @fid: file handle to convert
708 * @fh_len: length of the file handle in bytes
709 * @fh_type: type of file handle
710 * @get_inode: filesystem callback to retrieve inode
711 *
712 * This function decodes @fid as long as it has one of the well-known
713 * Linux filehandle types and calls @get_inode on it to retrieve the
714 * inode for the object specified in the file handle.
715 */
716struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
717 int fh_len, int fh_type, struct inode *(*get_inode)
718 (struct super_block *sb, u64 ino, u32 gen))
719{
720 struct inode *inode = NULL;
721
722 if (fh_len < 2)
723 return NULL;
724
725 switch (fh_type) {
726 case FILEID_INO32_GEN:
727 case FILEID_INO32_GEN_PARENT:
728 inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
729 break;
730 }
731
732 return exportfs_d_alloc(inode);
733}
734EXPORT_SYMBOL_GPL(generic_fh_to_dentry);
735
736/**
737 * generic_fh_to_dentry - generic helper for the fh_to_parent export operation
738 * @sb: filesystem to do the file handle conversion on
739 * @fid: file handle to convert
740 * @fh_len: length of the file handle in bytes
741 * @fh_type: type of file handle
742 * @get_inode: filesystem callback to retrieve inode
743 *
744 * This function decodes @fid as long as it has one of the well-known
745 * Linux filehandle types and calls @get_inode on it to retrieve the
746 * inode for the _parent_ object specified in the file handle if it
747 * is specified in the file handle, or NULL otherwise.
748 */
749struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
750 int fh_len, int fh_type, struct inode *(*get_inode)
751 (struct super_block *sb, u64 ino, u32 gen))
752{
753 struct inode *inode = NULL;
754
755 if (fh_len <= 2)
756 return NULL;
757
758 switch (fh_type) {
759 case FILEID_INO32_GEN_PARENT:
760 inode = get_inode(sb, fid->i32.parent_ino,
761 (fh_len > 3 ? fid->i32.parent_gen : 0));
762 break;
763 }
764
765 return exportfs_d_alloc(inode);
766}
767EXPORT_SYMBOL_GPL(generic_fh_to_parent);
768
681EXPORT_SYMBOL(dcache_dir_close); 769EXPORT_SYMBOL(dcache_dir_close);
682EXPORT_SYMBOL(dcache_dir_lseek); 770EXPORT_SYMBOL(dcache_dir_lseek);
683EXPORT_SYMBOL(dcache_dir_open); 771EXPORT_SYMBOL(dcache_dir_open);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 04b266729802..66d0aeb32a47 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -386,15 +386,13 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
386 dprintk("exp_export: export of non-dev fs without fsid\n"); 386 dprintk("exp_export: export of non-dev fs without fsid\n");
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 if (!inode->i_sb->s_export_op) { 389
390 if (!inode->i_sb->s_export_op ||
391 !inode->i_sb->s_export_op->fh_to_dentry) {
390 dprintk("exp_export: export of invalid fs type.\n"); 392 dprintk("exp_export: export of invalid fs type.\n");
391 return -EINVAL; 393 return -EINVAL;
392 } 394 }
393 395
394 /* Ok, we can export it */;
395 if (!inode->i_sb->s_export_op->find_exported_dentry)
396 inode->i_sb->s_export_op->find_exported_dentry =
397 find_exported_dentry;
398 return 0; 396 return 0;
399 397
400} 398}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ebd03cc07479..6f03918018a3 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -88,7 +88,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
88{ 88{
89 struct xdr_netobj cksum; 89 struct xdr_netobj cksum;
90 struct hash_desc desc; 90 struct hash_desc desc;
91 struct scatterlist sg[1]; 91 struct scatterlist sg;
92 __be32 status = nfserr_resource; 92 __be32 status = nfserr_resource;
93 93
94 dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", 94 dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
@@ -102,11 +102,9 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
102 if (cksum.data == NULL) 102 if (cksum.data == NULL)
103 goto out; 103 goto out;
104 104
105 sg[0].page = virt_to_page(clname->data); 105 sg_init_one(&sg, clname->data, clname->len);
106 sg[0].offset = offset_in_page(clname->data);
107 sg[0].length = clname->len;
108 106
109 if (crypto_hash_digest(&desc, sg, sg->length, cksum.data)) 107 if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data))
110 goto out; 108 goto out;
111 109
112 md5_to_hex(dname, cksum.data); 110 md5_to_hex(dname, cksum.data);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 7011d62acfc8..4f712e970584 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -115,8 +115,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
115 dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp)); 115 dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp));
116 116
117 if (!fhp->fh_dentry) { 117 if (!fhp->fh_dentry) {
118 __u32 *datap=NULL; 118 struct fid *fid = NULL, sfid;
119 __u32 tfh[3]; /* filehandle fragment for oldstyle filehandles */
120 int fileid_type; 119 int fileid_type;
121 int data_left = fh->fh_size/4; 120 int data_left = fh->fh_size/4;
122 121
@@ -128,7 +127,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
128 127
129 if (fh->fh_version == 1) { 128 if (fh->fh_version == 1) {
130 int len; 129 int len;
131 datap = fh->fh_auth;
132 if (--data_left<0) goto out; 130 if (--data_left<0) goto out;
133 switch (fh->fh_auth_type) { 131 switch (fh->fh_auth_type) {
134 case 0: break; 132 case 0: break;
@@ -144,9 +142,11 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
144 fh->fh_fsid[1] = fh->fh_fsid[2]; 142 fh->fh_fsid[1] = fh->fh_fsid[2];
145 } 143 }
146 if ((data_left -= len)<0) goto out; 144 if ((data_left -= len)<0) goto out;
147 exp = rqst_exp_find(rqstp, fh->fh_fsid_type, datap); 145 exp = rqst_exp_find(rqstp, fh->fh_fsid_type,
148 datap += len; 146 fh->fh_auth);
147 fid = (struct fid *)(fh->fh_auth + len);
149 } else { 148 } else {
149 __u32 tfh[2];
150 dev_t xdev; 150 dev_t xdev;
151 ino_t xino; 151 ino_t xino;
152 if (fh->fh_size != NFS_FHSIZE) 152 if (fh->fh_size != NFS_FHSIZE)
@@ -190,22 +190,22 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
190 error = nfserr_badhandle; 190 error = nfserr_badhandle;
191 191
192 if (fh->fh_version != 1) { 192 if (fh->fh_version != 1) {
193 tfh[0] = fh->ofh_ino; 193 sfid.i32.ino = fh->ofh_ino;
194 tfh[1] = fh->ofh_generation; 194 sfid.i32.gen = fh->ofh_generation;
195 tfh[2] = fh->ofh_dirino; 195 sfid.i32.parent_ino = fh->ofh_dirino;
196 datap = tfh; 196 fid = &sfid;
197 data_left = 3; 197 data_left = 3;
198 if (fh->ofh_dirino == 0) 198 if (fh->ofh_dirino == 0)
199 fileid_type = 1; 199 fileid_type = FILEID_INO32_GEN;
200 else 200 else
201 fileid_type = 2; 201 fileid_type = FILEID_INO32_GEN_PARENT;
202 } else 202 } else
203 fileid_type = fh->fh_fileid_type; 203 fileid_type = fh->fh_fileid_type;
204 204
205 if (fileid_type == 0) 205 if (fileid_type == FILEID_ROOT)
206 dentry = dget(exp->ex_dentry); 206 dentry = dget(exp->ex_dentry);
207 else { 207 else {
208 dentry = exportfs_decode_fh(exp->ex_mnt, datap, 208 dentry = exportfs_decode_fh(exp->ex_mnt, fid,
209 data_left, fileid_type, 209 data_left, fileid_type,
210 nfsd_acceptable, exp); 210 nfsd_acceptable, exp);
211 } 211 }
@@ -286,16 +286,21 @@ out:
286 * an inode. In this case a call to fh_update should be made 286 * an inode. In this case a call to fh_update should be made
287 * before the fh goes out on the wire ... 287 * before the fh goes out on the wire ...
288 */ 288 */
289static inline int _fh_update(struct dentry *dentry, struct svc_export *exp, 289static void _fh_update(struct svc_fh *fhp, struct svc_export *exp,
290 __u32 *datap, int *maxsize) 290 struct dentry *dentry)
291{ 291{
292 if (dentry == exp->ex_dentry) { 292 if (dentry != exp->ex_dentry) {
293 *maxsize = 0; 293 struct fid *fid = (struct fid *)
294 return 0; 294 (fhp->fh_handle.fh_auth + fhp->fh_handle.fh_size/4 - 1);
295 } 295 int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
296 int subtreecheck = !(exp->ex_flags & NFSEXP_NOSUBTREECHECK);
296 297
297 return exportfs_encode_fh(dentry, datap, maxsize, 298 fhp->fh_handle.fh_fileid_type =
298 !(exp->ex_flags & NFSEXP_NOSUBTREECHECK)); 299 exportfs_encode_fh(dentry, fid, &maxsize, subtreecheck);
300 fhp->fh_handle.fh_size += maxsize * 4;
301 } else {
302 fhp->fh_handle.fh_fileid_type = FILEID_ROOT;
303 }
299} 304}
300 305
301/* 306/*
@@ -457,12 +462,8 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
457 datap += len/4; 462 datap += len/4;
458 fhp->fh_handle.fh_size = 4 + len; 463 fhp->fh_handle.fh_size = 4 + len;
459 464
460 if (inode) { 465 if (inode)
461 int size = (fhp->fh_maxsize-len-4)/4; 466 _fh_update(fhp, exp, dentry);
462 fhp->fh_handle.fh_fileid_type =
463 _fh_update(dentry, exp, datap, &size);
464 fhp->fh_handle.fh_size += size*4;
465 }
466 if (fhp->fh_handle.fh_fileid_type == 255) 467 if (fhp->fh_handle.fh_fileid_type == 255)
467 return nfserr_opnotsupp; 468 return nfserr_opnotsupp;
468 } 469 }
@@ -479,7 +480,6 @@ __be32
479fh_update(struct svc_fh *fhp) 480fh_update(struct svc_fh *fhp)
480{ 481{
481 struct dentry *dentry; 482 struct dentry *dentry;
482 __u32 *datap;
483 483
484 if (!fhp->fh_dentry) 484 if (!fhp->fh_dentry)
485 goto out_bad; 485 goto out_bad;
@@ -490,15 +490,10 @@ fh_update(struct svc_fh *fhp)
490 if (fhp->fh_handle.fh_version != 1) { 490 if (fhp->fh_handle.fh_version != 1) {
491 _fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle); 491 _fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
492 } else { 492 } else {
493 int size; 493 if (fhp->fh_handle.fh_fileid_type != FILEID_ROOT)
494 if (fhp->fh_handle.fh_fileid_type != 0)
495 goto out; 494 goto out;
496 datap = fhp->fh_handle.fh_auth+ 495
497 fhp->fh_handle.fh_size/4 -1; 496 _fh_update(fhp, fhp->fh_export, dentry);
498 size = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
499 fhp->fh_handle.fh_fileid_type =
500 _fh_update(dentry, fhp->fh_export, datap, &size);
501 fhp->fh_handle.fh_size += size*4;
502 if (fhp->fh_handle.fh_fileid_type == 255) 497 if (fhp->fh_handle.fh_fileid_type == 255)
503 return nfserr_opnotsupp; 498 return nfserr_opnotsupp;
504 } 499 }
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index e93c6142b23c..e1781c8b1650 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -450,58 +450,40 @@ try_next:
450 return parent_dent; 450 return parent_dent;
451} 451}
452 452
453/** 453static struct inode *ntfs_nfs_get_inode(struct super_block *sb,
454 * ntfs_get_dentry - find a dentry for the inode from a file handle sub-fragment 454 u64 ino, u32 generation)
455 * @sb: super block identifying the mounted ntfs volume
456 * @fh: the file handle sub-fragment
457 *
458 * Find a dentry for the inode given a file handle sub-fragment. This function
459 * is called from fs/exportfs/expfs.c::find_exported_dentry() which in turn is
460 * called from the default ->decode_fh() which is export_decode_fh() in the
461 * same file. The code is closely based on the default ->get_dentry() helper
462 * fs/exportfs/expfs.c::get_object().
463 *
464 * The @fh contains two 32-bit unsigned values, the first one is the inode
465 * number and the second one is the inode generation.
466 *
467 * Return the dentry on success or the error code on error (IS_ERR() is true).
468 */
469static struct dentry *ntfs_get_dentry(struct super_block *sb, void *fh)
470{ 455{
471 struct inode *vi; 456 struct inode *inode;
472 struct dentry *dent;
473 unsigned long ino = ((u32 *)fh)[0];
474 u32 gen = ((u32 *)fh)[1];
475 457
476 ntfs_debug("Entering for inode 0x%lx, generation 0x%x.", ino, gen); 458 inode = ntfs_iget(sb, ino);
477 vi = ntfs_iget(sb, ino); 459 if (!IS_ERR(inode)) {
478 if (IS_ERR(vi)) { 460 if (is_bad_inode(inode) || inode->i_generation != generation) {
479 ntfs_error(sb, "Failed to get inode 0x%lx.", ino); 461 iput(inode);
480 return (struct dentry *)vi; 462 inode = ERR_PTR(-ESTALE);
481 } 463 }
482 if (unlikely(is_bad_inode(vi) || vi->i_generation != gen)) {
483 /* We didn't find the right inode. */
484 ntfs_error(sb, "Inode 0x%lx, bad count: %d %d or version 0x%x "
485 "0x%x.", vi->i_ino, vi->i_nlink,
486 atomic_read(&vi->i_count), vi->i_generation,
487 gen);
488 iput(vi);
489 return ERR_PTR(-ESTALE);
490 }
491 /* Now find a dentry. If possible, get a well-connected one. */
492 dent = d_alloc_anon(vi);
493 if (unlikely(!dent)) {
494 iput(vi);
495 return ERR_PTR(-ENOMEM);
496 } 464 }
497 ntfs_debug("Done for inode 0x%lx, generation 0x%x.", ino, gen); 465
498 return dent; 466 return inode;
467}
468
469static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
470 int fh_len, int fh_type)
471{
472 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
473 ntfs_nfs_get_inode);
474}
475
476static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
477 int fh_len, int fh_type)
478{
479 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
480 ntfs_nfs_get_inode);
499} 481}
500 482
501/** 483/**
502 * Export operations allowing NFS exporting of mounted NTFS partitions. 484 * Export operations allowing NFS exporting of mounted NTFS partitions.
503 * 485 *
504 * We use the default ->decode_fh() and ->encode_fh() for now. Note that they 486 * We use the default ->encode_fh() for now. Note that they
505 * use 32 bits to store the inode number which is an unsigned long so on 64-bit 487 * use 32 bits to store the inode number which is an unsigned long so on 64-bit
506 * architectures is usually 64 bits so it would all fail horribly on huge 488 * architectures is usually 64 bits so it would all fail horribly on huge
507 * volumes. I guess we need to define our own encode and decode fh functions 489 * volumes. I guess we need to define our own encode and decode fh functions
@@ -517,10 +499,9 @@ static struct dentry *ntfs_get_dentry(struct super_block *sb, void *fh)
517 * allowing the inode number 0 which is used in NTFS for the system file $MFT 499 * allowing the inode number 0 which is used in NTFS for the system file $MFT
518 * and due to using iget() whereas NTFS needs ntfs_iget(). 500 * and due to using iget() whereas NTFS needs ntfs_iget().
519 */ 501 */
520struct export_operations ntfs_export_ops = { 502const struct export_operations ntfs_export_ops = {
521 .get_parent = ntfs_get_parent, /* Find the parent of a given 503 .get_parent = ntfs_get_parent, /* Find the parent of a given
522 directory. */ 504 directory. */
523 .get_dentry = ntfs_get_dentry, /* Find a dentry for the inode 505 .fh_to_dentry = ntfs_fh_to_dentry,
524 given a file handle 506 .fh_to_parent = ntfs_fh_to_parent,
525 sub-fragment. */
526}; 507};
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index d73f5a9ac341..d6a340bf80fc 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -69,7 +69,7 @@ extern const struct inode_operations ntfs_dir_inode_ops;
69extern const struct file_operations ntfs_empty_file_ops; 69extern const struct file_operations ntfs_empty_file_ops;
70extern const struct inode_operations ntfs_empty_inode_ops; 70extern const struct inode_operations ntfs_empty_inode_ops;
71 71
72extern struct export_operations ntfs_export_ops; 72extern const struct export_operations ntfs_export_ops;
73 73
74/** 74/**
75 * NTFS_SB - return the ntfs volume given a vfs super block 75 * NTFS_SB - return the ntfs volume given a vfs super block
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index c3bbc198f9ce..535bfa9568a4 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -45,9 +45,9 @@ struct ocfs2_inode_handle
45 u32 ih_generation; 45 u32 ih_generation;
46}; 46};
47 47
48static struct dentry *ocfs2_get_dentry(struct super_block *sb, void *vobjp) 48static struct dentry *ocfs2_get_dentry(struct super_block *sb,
49 struct ocfs2_inode_handle *handle)
49{ 50{
50 struct ocfs2_inode_handle *handle = vobjp;
51 struct inode *inode; 51 struct inode *inode;
52 struct dentry *result; 52 struct dentry *result;
53 53
@@ -194,54 +194,37 @@ bail:
194 return type; 194 return type;
195} 195}
196 196
197static struct dentry *ocfs2_decode_fh(struct super_block *sb, u32 *fh_in, 197static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb,
198 int fh_len, int fileid_type, 198 struct fid *fid, int fh_len, int fh_type)
199 int (*acceptable)(void *context,
200 struct dentry *de),
201 void *context)
202{ 199{
203 struct ocfs2_inode_handle handle, parent; 200 struct ocfs2_inode_handle handle;
204 struct dentry *ret = NULL;
205 __le32 *fh = (__force __le32 *) fh_in;
206
207 mlog_entry("(0x%p, 0x%p, %d, %d, 0x%p, 0x%p)\n",
208 sb, fh, fh_len, fileid_type, acceptable, context);
209
210 if (fh_len < 3 || fileid_type > 2)
211 goto bail;
212
213 if (fileid_type == 2) {
214 if (fh_len < 6)
215 goto bail;
216
217 parent.ih_blkno = (u64)le32_to_cpu(fh[3]) << 32;
218 parent.ih_blkno |= (u64)le32_to_cpu(fh[4]);
219 parent.ih_generation = le32_to_cpu(fh[5]);
220 201
221 mlog(0, "Decoding parent: blkno: %llu, generation: %u\n", 202 if (fh_len < 3 || fh_type > 2)
222 (unsigned long long)parent.ih_blkno, 203 return NULL;
223 parent.ih_generation);
224 }
225 204
226 handle.ih_blkno = (u64)le32_to_cpu(fh[0]) << 32; 205 handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32;
227 handle.ih_blkno |= (u64)le32_to_cpu(fh[1]); 206 handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]);
228 handle.ih_generation = le32_to_cpu(fh[2]); 207 handle.ih_generation = le32_to_cpu(fid->raw[2]);
208 return ocfs2_get_dentry(sb, &handle);
209}
229 210
230 mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", 211static struct dentry *ocfs2_fh_to_parent(struct super_block *sb,
231 (unsigned long long)handle.ih_blkno, handle.ih_generation); 212 struct fid *fid, int fh_len, int fh_type)
213{
214 struct ocfs2_inode_handle parent;
232 215
233 ret = ocfs2_export_ops.find_exported_dentry(sb, &handle, &parent, 216 if (fh_type != 2 || fh_len < 6)
234 acceptable, context); 217 return NULL;
235 218
236bail: 219 parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32;
237 mlog_exit_ptr(ret); 220 parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]);
238 return ret; 221 parent.ih_generation = le32_to_cpu(fid->raw[5]);
222 return ocfs2_get_dentry(sb, &parent);
239} 223}
240 224
241struct export_operations ocfs2_export_ops = { 225const struct export_operations ocfs2_export_ops = {
242 .decode_fh = ocfs2_decode_fh,
243 .encode_fh = ocfs2_encode_fh, 226 .encode_fh = ocfs2_encode_fh,
244 227 .fh_to_dentry = ocfs2_fh_to_dentry,
228 .fh_to_parent = ocfs2_fh_to_parent,
245 .get_parent = ocfs2_get_parent, 229 .get_parent = ocfs2_get_parent,
246 .get_dentry = ocfs2_get_dentry,
247}; 230};
diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h
index e08bed9e45a0..41a738678c37 100644
--- a/fs/ocfs2/export.h
+++ b/fs/ocfs2/export.h
@@ -28,6 +28,6 @@
28 28
29#include <linux/exportfs.h> 29#include <linux/exportfs.h>
30 30
31extern struct export_operations ocfs2_export_ops; 31extern const struct export_operations ocfs2_export_ops;
32 32
33#endif /* OCFS2_EXPORT_H */ 33#endif /* OCFS2_EXPORT_H */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 39a3d7c969c5..aeaf0d0f2f51 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2255,27 +2255,6 @@ static const struct inode_operations proc_tgid_base_inode_operations = {
2255 .setattr = proc_setattr, 2255 .setattr = proc_setattr,
2256}; 2256};
2257 2257
2258/**
2259 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
2260 *
2261 * @task: task that should be flushed.
2262 *
2263 * Looks in the dcache for
2264 * /proc/@pid
2265 * /proc/@tgid/task/@pid
2266 * if either directory is present flushes it and all of it'ts children
2267 * from the dcache.
2268 *
2269 * It is safe and reasonable to cache /proc entries for a task until
2270 * that task exits. After that they just clog up the dcache with
2271 * useless entries, possibly causing useful dcache entries to be
2272 * flushed instead. This routine is proved to flush those useless
2273 * dcache entries at process exit time.
2274 *
2275 * NOTE: This routine is just an optimization so it does not guarantee
2276 * that no dcache entries will exist at process exit time it
2277 * just makes it very unlikely that any will persist.
2278 */
2279static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) 2258static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
2280{ 2259{
2281 struct dentry *dentry, *leader, *dir; 2260 struct dentry *dentry, *leader, *dir;
@@ -2322,10 +2301,29 @@ out:
2322 return; 2301 return;
2323} 2302}
2324 2303
2325/* 2304/**
2326 * when flushing dentries from proc one need to flush them from global 2305 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
2306 * @task: task that should be flushed.
2307 *
2308 * When flushing dentries from proc, one needs to flush them from global
2327 * proc (proc_mnt) and from all the namespaces' procs this task was seen 2309 * proc (proc_mnt) and from all the namespaces' procs this task was seen
2328 * in. this call is supposed to make all this job. 2310 * in. This call is supposed to do all of this job.
2311 *
2312 * Looks in the dcache for
2313 * /proc/@pid
2314 * /proc/@tgid/task/@pid
2315 * if either directory is present flushes it and all of it'ts children
2316 * from the dcache.
2317 *
2318 * It is safe and reasonable to cache /proc entries for a task until
2319 * that task exits. After that they just clog up the dcache with
2320 * useless entries, possibly causing useful dcache entries to be
2321 * flushed instead. This routine is proved to flush those useless
2322 * dcache entries at process exit time.
2323 *
2324 * NOTE: This routine is just an optimization so it does not guarantee
2325 * that no dcache entries will exist at process exit time it
2326 * just makes it very unlikely that any will persist.
2329 */ 2327 */
2330 2328
2331void proc_flush_task(struct task_struct *task) 2329void proc_flush_task(struct task_struct *task)
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a991af96f3f0..231fd5ccadc5 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1515,19 +1515,20 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1515 return inode; 1515 return inode;
1516} 1516}
1517 1517
1518struct dentry *reiserfs_get_dentry(struct super_block *sb, void *vobjp) 1518static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1519 u32 objectid, u32 dir_id, u32 generation)
1520
1519{ 1521{
1520 __u32 *data = vobjp;
1521 struct cpu_key key; 1522 struct cpu_key key;
1522 struct dentry *result; 1523 struct dentry *result;
1523 struct inode *inode; 1524 struct inode *inode;
1524 1525
1525 key.on_disk_key.k_objectid = data[0]; 1526 key.on_disk_key.k_objectid = objectid;
1526 key.on_disk_key.k_dir_id = data[1]; 1527 key.on_disk_key.k_dir_id = dir_id;
1527 reiserfs_write_lock(sb); 1528 reiserfs_write_lock(sb);
1528 inode = reiserfs_iget(sb, &key); 1529 inode = reiserfs_iget(sb, &key);
1529 if (inode && !IS_ERR(inode) && data[2] != 0 && 1530 if (inode && !IS_ERR(inode) && generation != 0 &&
1530 data[2] != inode->i_generation) { 1531 generation != inode->i_generation) {
1531 iput(inode); 1532 iput(inode);
1532 inode = NULL; 1533 inode = NULL;
1533 } 1534 }
@@ -1544,14 +1545,9 @@ struct dentry *reiserfs_get_dentry(struct super_block *sb, void *vobjp)
1544 return result; 1545 return result;
1545} 1546}
1546 1547
1547struct dentry *reiserfs_decode_fh(struct super_block *sb, __u32 * data, 1548struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1548 int len, int fhtype, 1549 int fh_len, int fh_type)
1549 int (*acceptable) (void *contect,
1550 struct dentry * de),
1551 void *context)
1552{ 1550{
1553 __u32 obj[3], parent[3];
1554
1555 /* fhtype happens to reflect the number of u32s encoded. 1551 /* fhtype happens to reflect the number of u32s encoded.
1556 * due to a bug in earlier code, fhtype might indicate there 1552 * due to a bug in earlier code, fhtype might indicate there
1557 * are more u32s then actually fitted. 1553 * are more u32s then actually fitted.
@@ -1564,32 +1560,28 @@ struct dentry *reiserfs_decode_fh(struct super_block *sb, __u32 * data,
1564 * 6 - as above plus generation of directory 1560 * 6 - as above plus generation of directory
1565 * 6 does not fit in NFSv2 handles 1561 * 6 does not fit in NFSv2 handles
1566 */ 1562 */
1567 if (fhtype > len) { 1563 if (fh_type > fh_len) {
1568 if (fhtype != 6 || len != 5) 1564 if (fh_type != 6 || fh_len != 5)
1569 reiserfs_warning(sb, 1565 reiserfs_warning(sb,
1570 "nfsd/reiserfs, fhtype=%d, len=%d - odd", 1566 "nfsd/reiserfs, fhtype=%d, len=%d - odd",
1571 fhtype, len); 1567 fh_type, fh_len);
1572 fhtype = 5; 1568 fh_type = 5;
1573 } 1569 }
1574 1570
1575 obj[0] = data[0]; 1571 return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1576 obj[1] = data[1]; 1572 (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1577 if (fhtype == 3 || fhtype >= 5) 1573}
1578 obj[2] = data[2];
1579 else
1580 obj[2] = 0; /* generation number */
1581 1574
1582 if (fhtype >= 4) { 1575struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1583 parent[0] = data[fhtype >= 5 ? 3 : 2]; 1576 int fh_len, int fh_type)
1584 parent[1] = data[fhtype >= 5 ? 4 : 3]; 1577{
1585 if (fhtype == 6) 1578 if (fh_type < 4)
1586 parent[2] = data[5]; 1579 return NULL;
1587 else 1580
1588 parent[2] = 0; 1581 return reiserfs_get_dentry(sb,
1589 } 1582 (fh_type >= 5) ? fid->raw[3] : fid->raw[2],
1590 return sb->s_export_op->find_exported_dentry(sb, obj, 1583 (fh_type >= 5) ? fid->raw[4] : fid->raw[3],
1591 fhtype < 4 ? NULL : parent, 1584 (fh_type == 6) ? fid->raw[5] : 0);
1592 acceptable, context);
1593} 1585}
1594 1586
1595int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, 1587int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 98c3781bc069..5cd85fe5df5d 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -661,11 +661,11 @@ static struct quotactl_ops reiserfs_qctl_operations = {
661}; 661};
662#endif 662#endif
663 663
664static struct export_operations reiserfs_export_ops = { 664static const struct export_operations reiserfs_export_ops = {
665 .encode_fh = reiserfs_encode_fh, 665 .encode_fh = reiserfs_encode_fh,
666 .decode_fh = reiserfs_decode_fh, 666 .fh_to_dentry = reiserfs_fh_to_dentry,
667 .fh_to_parent = reiserfs_fh_to_parent,
667 .get_parent = reiserfs_get_parent, 668 .get_parent = reiserfs_get_parent,
668 .get_dentry = reiserfs_get_dentry,
669}; 669};
670 670
671/* this struct is used in reiserfs_getopt () for containing the value for those 671/* this struct is used in reiserfs_getopt () for containing the value for those
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 3586c7a28d2c..15bd4948832c 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -33,62 +33,25 @@
33static struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, }; 33static struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
34 34
35/* 35/*
36 * XFS encodes and decodes the fileid portion of NFS filehandles 36 * Note that we only accept fileids which are long enough rather than allow
37 * itself instead of letting the generic NFS code do it. This 37 * the parent generation number to default to zero. XFS considers zero a
38 * allows filesystems with 64 bit inode numbers to be exported. 38 * valid generation number not an invalid/wildcard value.
39 *
40 * Note that a side effect is that xfs_vget() won't be passed a
41 * zero inode/generation pair under normal circumstances. As
42 * however a malicious client could send us such data, the check
43 * remains in that code.
44 */ 39 */
45 40static int xfs_fileid_length(int fileid_type)
46STATIC struct dentry *
47xfs_fs_decode_fh(
48 struct super_block *sb,
49 __u32 *fh,
50 int fh_len,
51 int fileid_type,
52 int (*acceptable)(
53 void *context,
54 struct dentry *de),
55 void *context)
56{ 41{
57 xfs_fid_t ifid; 42 switch (fileid_type) {
58 xfs_fid_t pfid; 43 case FILEID_INO32_GEN:
59 void *parent = NULL; 44 return 2;
60 int is64 = 0; 45 case FILEID_INO32_GEN_PARENT:
61 __u32 *p = fh; 46 return 4;
62 47 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
63#if XFS_BIG_INUMS 48 return 3;
64 is64 = (fileid_type & XFS_FILEID_TYPE_64FLAG); 49 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
65 fileid_type &= ~XFS_FILEID_TYPE_64FLAG; 50 return 6;
66#endif
67
68 /*
69 * Note that we only accept fileids which are long enough
70 * rather than allow the parent generation number to default
71 * to zero. XFS considers zero a valid generation number not
72 * an invalid/wildcard value. There's little point printk'ing
73 * a warning here as we don't have the client information
74 * which would make such a warning useful.
75 */
76 if (fileid_type > 2 ||
77 fh_len < xfs_fileid_length((fileid_type == 2), is64))
78 return NULL;
79
80 p = xfs_fileid_decode_fid2(p, &ifid, is64);
81
82 if (fileid_type == 2) {
83 p = xfs_fileid_decode_fid2(p, &pfid, is64);
84 parent = &pfid;
85 } 51 }
86 52 return 255; /* invalid */
87 fh = (__u32 *)&ifid;
88 return sb->s_export_op->find_exported_dentry(sb, fh, parent, acceptable, context);
89} 53}
90 54
91
92STATIC int 55STATIC int
93xfs_fs_encode_fh( 56xfs_fs_encode_fh(
94 struct dentry *dentry, 57 struct dentry *dentry,
@@ -96,21 +59,21 @@ xfs_fs_encode_fh(
96 int *max_len, 59 int *max_len,
97 int connectable) 60 int connectable)
98{ 61{
62 struct fid *fid = (struct fid *)fh;
63 struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fh;
99 struct inode *inode = dentry->d_inode; 64 struct inode *inode = dentry->d_inode;
100 int type = 1; 65 int fileid_type;
101 __u32 *p = fh;
102 int len; 66 int len;
103 int is64 = 0;
104#if XFS_BIG_INUMS
105 if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS)) {
106 /* filesystem may contain 64bit inode numbers */
107 is64 = XFS_FILEID_TYPE_64FLAG;
108 }
109#endif
110 67
111 /* Directories don't need their parent encoded, they have ".." */ 68 /* Directories don't need their parent encoded, they have ".." */
112 if (S_ISDIR(inode->i_mode)) 69 if (S_ISDIR(inode->i_mode))
113 connectable = 0; 70 fileid_type = FILEID_INO32_GEN;
71 else
72 fileid_type = FILEID_INO32_GEN_PARENT;
73
74 /* filesystem may contain 64bit inode numbers */
75 if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS))
76 fileid_type |= XFS_FILEID_TYPE_64FLAG;
114 77
115 /* 78 /*
116 * Only encode if there is enough space given. In practice 79 * Only encode if there is enough space given. In practice
@@ -118,39 +81,118 @@ xfs_fs_encode_fh(
118 * over NFSv2 with the subtree_check export option; the other 81 * over NFSv2 with the subtree_check export option; the other
119 * seven combinations work. The real answer is "don't use v2". 82 * seven combinations work. The real answer is "don't use v2".
120 */ 83 */
121 len = xfs_fileid_length(connectable, is64); 84 len = xfs_fileid_length(fileid_type);
122 if (*max_len < len) 85 if (*max_len < len)
123 return 255; 86 return 255;
124 *max_len = len; 87 *max_len = len;
125 88
126 p = xfs_fileid_encode_inode(p, inode, is64); 89 switch (fileid_type) {
127 if (connectable) { 90 case FILEID_INO32_GEN_PARENT:
128 spin_lock(&dentry->d_lock); 91 spin_lock(&dentry->d_lock);
129 p = xfs_fileid_encode_inode(p, dentry->d_parent->d_inode, is64); 92 fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
93 fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
130 spin_unlock(&dentry->d_lock); 94 spin_unlock(&dentry->d_lock);
131 type = 2; 95 /*FALLTHRU*/
96 case FILEID_INO32_GEN:
97 fid->i32.ino = inode->i_ino;
98 fid->i32.gen = inode->i_generation;
99 break;
100 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
101 spin_lock(&dentry->d_lock);
102 fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
103 fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
104 spin_unlock(&dentry->d_lock);
105 /*FALLTHRU*/
106 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
107 fid64->ino = inode->i_ino;
108 fid64->gen = inode->i_generation;
109 break;
132 } 110 }
133 BUG_ON((p - fh) != len); 111
134 return type | is64; 112 return fileid_type;
135} 113}
136 114
137STATIC struct dentry * 115STATIC struct inode *
138xfs_fs_get_dentry( 116xfs_nfs_get_inode(
139 struct super_block *sb, 117 struct super_block *sb,
140 void *data) 118 u64 ino,
141{ 119 u32 generation)
120 {
121 xfs_fid_t xfid;
142 bhv_vnode_t *vp; 122 bhv_vnode_t *vp;
143 struct inode *inode;
144 struct dentry *result;
145 int error; 123 int error;
146 124
147 error = xfs_vget(XFS_M(sb), &vp, data); 125 xfid.fid_len = sizeof(xfs_fid_t) - sizeof(xfid.fid_len);
148 if (error || vp == NULL) 126 xfid.fid_pad = 0;
149 return ERR_PTR(-ESTALE) ; 127 xfid.fid_ino = ino;
128 xfid.fid_gen = generation;
150 129
151 inode = vn_to_inode(vp); 130 error = xfs_vget(XFS_M(sb), &vp, &xfid);
131 if (error)
132 return ERR_PTR(-error);
133
134 return vp ? vn_to_inode(vp) : NULL;
135}
136
137STATIC struct dentry *
138xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
139 int fh_len, int fileid_type)
140{
141 struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
142 struct inode *inode = NULL;
143 struct dentry *result;
144
145 if (fh_len < xfs_fileid_length(fileid_type))
146 return NULL;
147
148 switch (fileid_type) {
149 case FILEID_INO32_GEN_PARENT:
150 case FILEID_INO32_GEN:
151 inode = xfs_nfs_get_inode(sb, fid->i32.ino, fid->i32.gen);
152 break;
153 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
154 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
155 inode = xfs_nfs_get_inode(sb, fid64->ino, fid64->gen);
156 break;
157 }
158
159 if (!inode)
160 return NULL;
161 if (IS_ERR(inode))
162 return ERR_PTR(PTR_ERR(inode));
163 result = d_alloc_anon(inode);
164 if (!result) {
165 iput(inode);
166 return ERR_PTR(-ENOMEM);
167 }
168 return result;
169}
170
171STATIC struct dentry *
172xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
173 int fh_len, int fileid_type)
174{
175 struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
176 struct inode *inode = NULL;
177 struct dentry *result;
178
179 switch (fileid_type) {
180 case FILEID_INO32_GEN_PARENT:
181 inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
182 fid->i32.parent_gen);
183 break;
184 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
185 inode = xfs_nfs_get_inode(sb, fid64->parent_ino,
186 fid64->parent_gen);
187 break;
188 }
189
190 if (!inode)
191 return NULL;
192 if (IS_ERR(inode))
193 return ERR_PTR(PTR_ERR(inode));
152 result = d_alloc_anon(inode); 194 result = d_alloc_anon(inode);
153 if (!result) { 195 if (!result) {
154 iput(inode); 196 iput(inode);
155 return ERR_PTR(-ENOMEM); 197 return ERR_PTR(-ENOMEM);
156 } 198 }
@@ -178,9 +220,9 @@ xfs_fs_get_parent(
178 return parent; 220 return parent;
179} 221}
180 222
181struct export_operations xfs_export_operations = { 223const struct export_operations xfs_export_operations = {
182 .decode_fh = xfs_fs_decode_fh,
183 .encode_fh = xfs_fs_encode_fh, 224 .encode_fh = xfs_fs_encode_fh,
225 .fh_to_dentry = xfs_fs_fh_to_dentry,
226 .fh_to_parent = xfs_fs_fh_to_parent,
184 .get_parent = xfs_fs_get_parent, 227 .get_parent = xfs_fs_get_parent,
185 .get_dentry = xfs_fs_get_dentry,
186}; 228};
diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/linux-2.6/xfs_export.h
index 2f36071a86f7..3272b6ae7a35 100644
--- a/fs/xfs/linux-2.6/xfs_export.h
+++ b/fs/xfs/linux-2.6/xfs_export.h
@@ -59,50 +59,14 @@
59 * a subdirectory) or use the "fsid" export option. 59 * a subdirectory) or use the "fsid" export option.
60 */ 60 */
61 61
62struct xfs_fid64 {
63 u64 ino;
64 u32 gen;
65 u64 parent_ino;
66 u32 parent_gen;
67} __attribute__((packed));
68
62/* This flag goes on the wire. Don't play with it. */ 69/* This flag goes on the wire. Don't play with it. */
63#define XFS_FILEID_TYPE_64FLAG 0x80 /* NFS fileid has 64bit inodes */ 70#define XFS_FILEID_TYPE_64FLAG 0x80 /* NFS fileid has 64bit inodes */
64 71
65/* Calculate the length in u32 units of the fileid data */
66static inline int
67xfs_fileid_length(int hasparent, int is64)
68{
69 return hasparent ? (is64 ? 6 : 4) : (is64 ? 3 : 2);
70}
71
72/*
73 * Decode encoded inode information (either for the inode itself
74 * or the parent) into an xfs_fid_t structure. Advances and
75 * returns the new data pointer
76 */
77static inline __u32 *
78xfs_fileid_decode_fid2(__u32 *p, xfs_fid_t *fid, int is64)
79{
80 fid->fid_len = sizeof(xfs_fid_t) - sizeof(fid->fid_len);
81 fid->fid_pad = 0;
82 fid->fid_ino = *p++;
83#if XFS_BIG_INUMS
84 if (is64)
85 fid->fid_ino |= (((__u64)(*p++)) << 32);
86#endif
87 fid->fid_gen = *p++;
88 return p;
89}
90
91/*
92 * Encode inode information (either for the inode itself or the
93 * parent) into a fileid buffer. Advances and returns the new
94 * data pointer.
95 */
96static inline __u32 *
97xfs_fileid_encode_inode(__u32 *p, struct inode *inode, int is64)
98{
99 *p++ = (__u32)inode->i_ino;
100#if XFS_BIG_INUMS
101 if (is64)
102 *p++ = (__u32)(inode->i_ino >> 32);
103#endif
104 *p++ = inode->i_generation;
105 return p;
106}
107
108#endif /* __XFS_EXPORT_H__ */ 72#endif /* __XFS_EXPORT_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index c78c23310fe8..3efcf45b14ab 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -118,7 +118,7 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *,
118extern void xfs_blkdev_put(struct block_device *); 118extern void xfs_blkdev_put(struct block_device *);
119extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); 119extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
120 120
121extern struct export_operations xfs_export_operations; 121extern const struct export_operations xfs_export_operations;
122 122
123#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) 123#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
124 124
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 4e5d3ca53a8e..a1b1b2ee3e51 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -257,7 +257,8 @@ struct acpi_table_dbgp {
257struct acpi_table_dmar { 257struct acpi_table_dmar {
258 struct acpi_table_header header; /* Common ACPI table header */ 258 struct acpi_table_header header; /* Common ACPI table header */
259 u8 width; /* Host Address Width */ 259 u8 width; /* Host Address Width */
260 u8 reserved[11]; 260 u8 flags;
261 u8 reserved[10];
261}; 262};
262 263
263/* DMAR subtable header */ 264/* DMAR subtable header */
@@ -265,8 +266,6 @@ struct acpi_table_dmar {
265struct acpi_dmar_header { 266struct acpi_dmar_header {
266 u16 type; 267 u16 type;
267 u16 length; 268 u16 length;
268 u8 flags;
269 u8 reserved[3];
270}; 269};
271 270
272/* Values for subtable type in struct acpi_dmar_header */ 271/* Values for subtable type in struct acpi_dmar_header */
@@ -274,13 +273,15 @@ struct acpi_dmar_header {
274enum acpi_dmar_type { 273enum acpi_dmar_type {
275 ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, 274 ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
276 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, 275 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
277 ACPI_DMAR_TYPE_RESERVED = 2 /* 2 and greater are reserved */ 276 ACPI_DMAR_TYPE_ATSR = 2,
277 ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */
278}; 278};
279 279
280struct acpi_dmar_device_scope { 280struct acpi_dmar_device_scope {
281 u8 entry_type; 281 u8 entry_type;
282 u8 length; 282 u8 length;
283 u8 segment; 283 u16 reserved;
284 u8 enumeration_id;
284 u8 bus; 285 u8 bus;
285}; 286};
286 287
@@ -290,7 +291,14 @@ enum acpi_dmar_scope_type {
290 ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, 291 ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
291 ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, 292 ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
292 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, 293 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
293 ACPI_DMAR_SCOPE_TYPE_RESERVED = 3 /* 3 and greater are reserved */ 294 ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
295 ACPI_DMAR_SCOPE_TYPE_HPET = 4,
296 ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */
297};
298
299struct acpi_dmar_pci_path {
300 u8 dev;
301 u8 fn;
294}; 302};
295 303
296/* 304/*
@@ -301,6 +309,9 @@ enum acpi_dmar_scope_type {
301 309
302struct acpi_dmar_hardware_unit { 310struct acpi_dmar_hardware_unit {
303 struct acpi_dmar_header header; 311 struct acpi_dmar_header header;
312 u8 flags;
313 u8 reserved;
314 u16 segment;
304 u64 address; /* Register Base Address */ 315 u64 address; /* Register Base Address */
305}; 316};
306 317
@@ -312,7 +323,9 @@ struct acpi_dmar_hardware_unit {
312 323
313struct acpi_dmar_reserved_memory { 324struct acpi_dmar_reserved_memory {
314 struct acpi_dmar_header header; 325 struct acpi_dmar_header header;
315 u64 address; /* 4_k aligned base address */ 326 u16 reserved;
327 u16 segment;
328 u64 base_address; /* 4_k aligned base address */
316 u64 end_address; /* 4_k aligned limit address */ 329 u64 end_address; /* 4_k aligned limit address */
317}; 330};
318 331
diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h
index 917365405e83..440747ca6349 100644
--- a/include/asm-alpha/scatterlist.h
+++ b/include/asm-alpha/scatterlist.h
@@ -5,7 +5,10 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 13
11 unsigned int length; 14 unsigned int length;
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 1eb8aac43228..e99406a7bece 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -5,7 +5,7 @@
5 5
6#include <linux/mm.h> /* need struct page */ 6#include <linux/mm.h> /* need struct page */
7 7
8#include <asm/scatterlist.h> 8#include <linux/scatterlist.h>
9 9
10/* 10/*
11 * DMA-consistent mapping functions. These allocate/free a region of 11 * DMA-consistent mapping functions. These allocate/free a region of
@@ -274,8 +274,8 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
274 for (i = 0; i < nents; i++, sg++) { 274 for (i = 0; i < nents; i++, sg++) {
275 char *virt; 275 char *virt;
276 276
277 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; 277 sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
278 virt = page_address(sg->page) + sg->offset; 278 virt = sg_virt(sg);
279 279
280 if (!arch_is_coherent()) 280 if (!arch_is_coherent())
281 dma_cache_maint(virt, sg->length, dir); 281 dma_cache_maint(virt, sg->length, dir);
@@ -371,7 +371,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
371 int i; 371 int i;
372 372
373 for (i = 0; i < nents; i++, sg++) { 373 for (i = 0; i < nents; i++, sg++) {
374 char *virt = page_address(sg->page) + sg->offset; 374 char *virt = sg_virt(sg);
375 if (!arch_is_coherent()) 375 if (!arch_is_coherent())
376 dma_cache_maint(virt, sg->length, dir); 376 dma_cache_maint(virt, sg->length, dir);
377 } 377 }
@@ -384,7 +384,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
384 int i; 384 int i;
385 385
386 for (i = 0; i < nents; i++, sg++) { 386 for (i = 0; i < nents; i++, sg++) {
387 char *virt = page_address(sg->page) + sg->offset; 387 char *virt = sg_virt(sg);
388 if (!arch_is_coherent()) 388 if (!arch_is_coherent())
389 dma_cache_maint(virt, sg->length, dir); 389 dma_cache_maint(virt, sg->length, dir);
390 } 390 }
diff --git a/include/asm-arm/scatterlist.h b/include/asm-arm/scatterlist.h
index de2f65eb42ed..ca0a37d03400 100644
--- a/include/asm-arm/scatterlist.h
+++ b/include/asm-arm/scatterlist.h
@@ -5,7 +5,10 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; /* buffer page */ 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; /* buffer offset */ 12 unsigned int offset; /* buffer offset */
10 dma_addr_t dma_address; /* dma address */ 13 dma_addr_t dma_address; /* dma address */
11 unsigned int length; /* length */ 14 unsigned int length; /* length */
diff --git a/include/asm-avr32/arch-at32ap/board.h b/include/asm-avr32/arch-at32ap/board.h
index 7dbd603c38cc..d6993a6b6473 100644
--- a/include/asm-avr32/arch-at32ap/board.h
+++ b/include/asm-avr32/arch-at32ap/board.h
@@ -44,6 +44,13 @@ struct usba_platform_data {
44struct platform_device * 44struct platform_device *
45at32_add_device_usba(unsigned int id, struct usba_platform_data *data); 45at32_add_device_usba(unsigned int id, struct usba_platform_data *data);
46 46
47struct ide_platform_data {
48 u8 cs;
49};
50struct platform_device *
51at32_add_device_ide(unsigned int id, unsigned int extint,
52 struct ide_platform_data *data);
53
47/* depending on what's hooked up, not all SSC pins will be used */ 54/* depending on what's hooked up, not all SSC pins will be used */
48#define ATMEL_SSC_TK 0x01 55#define ATMEL_SSC_TK 0x01
49#define ATMEL_SSC_TF 0x02 56#define ATMEL_SSC_TF 0x02
@@ -58,4 +65,20 @@ at32_add_device_usba(unsigned int id, struct usba_platform_data *data);
58struct platform_device * 65struct platform_device *
59at32_add_device_ssc(unsigned int id, unsigned int flags); 66at32_add_device_ssc(unsigned int id, unsigned int flags);
60 67
68struct platform_device *at32_add_device_twi(unsigned int id);
69struct platform_device *at32_add_device_mci(unsigned int id);
70struct platform_device *at32_add_device_ac97c(unsigned int id);
71struct platform_device *at32_add_device_abdac(unsigned int id);
72
73struct cf_platform_data {
74 int detect_pin;
75 int reset_pin;
76 int vcc_pin;
77 int ready_pin;
78 u8 cs;
79};
80struct platform_device *
81at32_add_device_cf(unsigned int id, unsigned int extint,
82 struct cf_platform_data *data);
83
61#endif /* __ASM_ARCH_BOARD_H */ 84#endif /* __ASM_ARCH_BOARD_H */
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 81e342636ac4..a7131630c057 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -217,8 +217,8 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
217 for (i = 0; i < nents; i++) { 217 for (i = 0; i < nents; i++) {
218 char *virt; 218 char *virt;
219 219
220 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; 220 sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;
221 virt = page_address(sg[i].page) + sg[i].offset; 221 virt = sg_virt(&sg[i]);
222 dma_cache_sync(dev, virt, sg[i].length, direction); 222 dma_cache_sync(dev, virt, sg[i].length, direction);
223 } 223 }
224 224
@@ -327,8 +327,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
327 int i; 327 int i;
328 328
329 for (i = 0; i < nents; i++) { 329 for (i = 0; i < nents; i++) {
330 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 330 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
331 sg[i].length, direction);
332 } 331 }
333} 332}
334 333
diff --git a/include/asm-avr32/scatterlist.h b/include/asm-avr32/scatterlist.h
index c6d5ce3b3a25..377320e3bd17 100644
--- a/include/asm-avr32/scatterlist.h
+++ b/include/asm-avr32/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-blackfin/scatterlist.h b/include/asm-blackfin/scatterlist.h
index 60e07b92044c..04f448711cd0 100644
--- a/include/asm-blackfin/scatterlist.h
+++ b/include/asm-blackfin/scatterlist.h
@@ -4,7 +4,10 @@
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
@@ -17,7 +20,6 @@ struct scatterlist {
17 * returns, or alternatively stop on the first sg_dma_len(sg) which 20 * returns, or alternatively stop on the first sg_dma_len(sg) which
18 * is 0. 21 * is 0.
19 */ 22 */
20#define sg_address(sg) (page_address((sg)->page) + (sg)->offset)
21#define sg_dma_address(sg) ((sg)->dma_address) 23#define sg_dma_address(sg) ((sg)->dma_address)
22#define sg_dma_len(sg) ((sg)->length) 24#define sg_dma_len(sg) ((sg)->length)
23 25
diff --git a/include/asm-cris/scatterlist.h b/include/asm-cris/scatterlist.h
index 4bdc44c4ac3d..faff53ad1f96 100644
--- a/include/asm-cris/scatterlist.h
+++ b/include/asm-cris/scatterlist.h
@@ -2,11 +2,14 @@
2#define __ASM_CRIS_SCATTERLIST_H 2#define __ASM_CRIS_SCATTERLIST_H
3 3
4struct scatterlist { 4struct scatterlist {
5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
5 char * address; /* Location data is to be transferred to */ 8 char * address; /* Location data is to be transferred to */
6 unsigned int length; 9 unsigned int length;
7 10
8 /* The following is i386 highmem junk - not used by us */ 11 /* The following is i386 highmem junk - not used by us */
9 struct page * page; /* Location for highmem page, if any */ 12 unsigned long page_link;
10 unsigned int offset;/* for highmem, page offset */ 13 unsigned int offset;/* for highmem, page offset */
11 14
12}; 15};
diff --git a/include/asm-frv/scatterlist.h b/include/asm-frv/scatterlist.h
index 8e827fa853f1..99ba76edc42a 100644
--- a/include/asm-frv/scatterlist.h
+++ b/include/asm-frv/scatterlist.h
@@ -4,25 +4,28 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6/* 6/*
7 * Drivers must set either ->address or (preferred) ->page and ->offset 7 * Drivers must set either ->address or (preferred) page and ->offset
8 * to indicate where data must be transferred to/from. 8 * to indicate where data must be transferred to/from.
9 * 9 *
10 * Using ->page is recommended since it handles highmem data as well as 10 * Using page is recommended since it handles highmem data as well as
11 * low mem. ->address is restricted to data which has a virtual mapping, and 11 * low mem. ->address is restricted to data which has a virtual mapping, and
12 * it will go away in the future. Updating to ->page can be automated very 12 * it will go away in the future. Updating to page can be automated very
13 * easily -- something like 13 * easily -- something like
14 * 14 *
15 * sg->address = some_ptr; 15 * sg->address = some_ptr;
16 * 16 *
17 * can be rewritten as 17 * can be rewritten as
18 * 18 *
19 * sg->page = virt_to_page(some_ptr); 19 * sg_set_page(virt_to_page(some_ptr));
20 * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK; 20 * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK;
21 * 21 *
22 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens 22 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
23 */ 23 */
24struct scatterlist { 24struct scatterlist {
25 struct page *page; /* Location for highmem page, if any */ 25#ifdef CONFIG_DEBUG_SG
26 unsigned long sg_magic;
27#endif
28 unsigned long page_link;
26 unsigned int offset; /* for highmem, page offset */ 29 unsigned int offset; /* for highmem, page offset */
27 30
28 dma_addr_t dma_address; 31 dma_addr_t dma_address;
diff --git a/include/asm-h8300/scatterlist.h b/include/asm-h8300/scatterlist.h
index 985fdf54eaca..d3ecdd87ac90 100644
--- a/include/asm-h8300/scatterlist.h
+++ b/include/asm-h8300/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index 7d5234d50312..d6f57874041d 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -9,7 +9,10 @@
9#include <asm/types.h> 9#include <asm/types.h>
10 10
11struct scatterlist { 11struct scatterlist {
12 struct page *page; 12#ifdef CONFIG_DEBUG_SG
13 unsigned long sg_magic;
14#endif
15 unsigned long page_link;
13 unsigned int offset; 16 unsigned int offset;
14 unsigned int length; /* buffer length */ 17 unsigned int length; /* buffer length */
15 18
diff --git a/include/asm-m32r/scatterlist.h b/include/asm-m32r/scatterlist.h
index 352415ff5eb9..1ed372c73d0b 100644
--- a/include/asm-m32r/scatterlist.h
+++ b/include/asm-m32r/scatterlist.h
@@ -4,9 +4,12 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
7 char * address; /* Location data is to be transferred to, NULL for 10 char * address; /* Location data is to be transferred to, NULL for
8 * highmem page */ 11 * highmem page */
9 struct page * page; /* Location for highmem page, if any */ 12 unsigned long page_link;
10 unsigned int offset;/* for highmem, page offset */ 13 unsigned int offset;/* for highmem, page offset */
11 14
12 dma_addr_t dma_address; 15 dma_addr_t dma_address;
diff --git a/include/asm-m68k/scatterlist.h b/include/asm-m68k/scatterlist.h
index 24887a2d9c7b..d3a7a0edfeca 100644
--- a/include/asm-m68k/scatterlist.h
+++ b/include/asm-m68k/scatterlist.h
@@ -4,7 +4,10 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 unsigned int length; 12 unsigned int length;
10 13
diff --git a/include/asm-m68knommu/module.h b/include/asm-m68knommu/module.h
index 57e95cc01ad5..2e45ab50b232 100644
--- a/include/asm-m68knommu/module.h
+++ b/include/asm-m68knommu/module.h
@@ -1 +1,11 @@
1#include <asm-m68k/module.h> 1#ifndef ASM_M68KNOMMU_MODULE_H
2#define ASM_M68KNOMMU_MODULE_H
3
4struct mod_arch_specific {
5};
6
7#define Elf_Shdr Elf32_Shdr
8#define Elf_Sym Elf32_Sym
9#define Elf_Ehdr Elf32_Ehdr
10
11#endif /* ASM_M68KNOMMU_MODULE_H */
diff --git a/include/asm-m68knommu/scatterlist.h b/include/asm-m68knommu/scatterlist.h
index 4da79d3d3f34..afc4788b0d2c 100644
--- a/include/asm-m68knommu/scatterlist.h
+++ b/include/asm-m68knommu/scatterlist.h
@@ -5,13 +5,15 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 dma_addr_t dma_address; 13 dma_addr_t dma_address;
11 unsigned int length; 14 unsigned int length;
12}; 15};
13 16
14#define sg_address(sg) (page_address((sg)->page) + (sg)->offset)
15#define sg_dma_address(sg) ((sg)->dma_address) 17#define sg_dma_address(sg) ((sg)->dma_address)
16#define sg_dma_len(sg) ((sg)->length) 18#define sg_dma_len(sg) ((sg)->length)
17 19
diff --git a/include/asm-m68knommu/uaccess.h b/include/asm-m68knommu/uaccess.h
index 9ed9169a8849..68bbe9b312f1 100644
--- a/include/asm-m68knommu/uaccess.h
+++ b/include/asm-m68knommu/uaccess.h
@@ -170,10 +170,12 @@ static inline long strnlen_user(const char *src, long n)
170 */ 170 */
171 171
172static inline unsigned long 172static inline unsigned long
173clear_user(void *to, unsigned long n) 173__clear_user(void *to, unsigned long n)
174{ 174{
175 memset(to, 0, n); 175 memset(to, 0, n);
176 return 0; 176 return 0;
177} 177}
178 178
179#define clear_user(to,n) __clear_user(to,n)
180
179#endif /* _M68KNOMMU_UACCESS_H */ 181#endif /* _M68KNOMMU_UACCESS_H */
diff --git a/include/asm-mips/gt64120.h b/include/asm-mips/gt64120.h
index 4bf8e28f8850..e64b41093c49 100644
--- a/include/asm-mips/gt64120.h
+++ b/include/asm-mips/gt64120.h
@@ -21,6 +21,8 @@
21#ifndef _ASM_GT64120_H 21#ifndef _ASM_GT64120_H
22#define _ASM_GT64120_H 22#define _ASM_GT64120_H
23 23
24#include <linux/clocksource.h>
25
24#include <asm/addrspace.h> 26#include <asm/addrspace.h>
25#include <asm/byteorder.h> 27#include <asm/byteorder.h>
26 28
@@ -572,4 +574,7 @@
572#define GT_READ(ofs) le32_to_cpu(__GT_READ(ofs)) 574#define GT_READ(ofs) le32_to_cpu(__GT_READ(ofs))
573#define GT_WRITE(ofs, data) __GT_WRITE(ofs, cpu_to_le32(data)) 575#define GT_WRITE(ofs, data) __GT_WRITE(ofs, cpu_to_le32(data))
574 576
577extern void gt641xx_set_base_clock(unsigned int clock);
578extern int gt641xx_timer0_state(void);
579
575#endif /* _ASM_GT64120_H */ 580#endif /* _ASM_GT64120_H */
diff --git a/include/asm-mips/i8253.h b/include/asm-mips/i8253.h
index 8f689d7df6b1..affb32ce4af9 100644
--- a/include/asm-mips/i8253.h
+++ b/include/asm-mips/i8253.h
@@ -2,8 +2,8 @@
2 * Machine specific IO port address definition for generic. 2 * Machine specific IO port address definition for generic.
3 * Written by Osamu Tomita <tomita@cinet.co.jp> 3 * Written by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5#ifndef _MACH_IO_PORTS_H 5#ifndef __ASM_I8253_H
6#define _MACH_IO_PORTS_H 6#define __ASM_I8253_H
7 7
8/* i8253A PIT registers */ 8/* i8253A PIT registers */
9#define PIT_MODE 0x43 9#define PIT_MODE 0x43
@@ -27,4 +27,4 @@
27 27
28extern void setup_pit_timer(void); 28extern void setup_pit_timer(void);
29 29
30#endif /* !_MACH_IO_PORTS_H */ 30#endif /* __ASM_I8253_H */
diff --git a/include/asm-mips/scatterlist.h b/include/asm-mips/scatterlist.h
index 7af104c95b20..83d69fe17c9f 100644
--- a/include/asm-mips/scatterlist.h
+++ b/include/asm-mips/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page * page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h
index 494aa65dcfbd..0dad844a3b5b 100644
--- a/include/asm-mips/sibyte/sb1250.h
+++ b/include/asm-mips/sibyte/sb1250.h
@@ -45,13 +45,11 @@ extern unsigned int soc_type;
45extern unsigned int periph_rev; 45extern unsigned int periph_rev;
46extern unsigned int zbbus_mhz; 46extern unsigned int zbbus_mhz;
47 47
48extern void sb1250_hpt_setup(void);
49extern void sb1250_time_init(void); 48extern void sb1250_time_init(void);
50extern void sb1250_mask_irq(int cpu, int irq); 49extern void sb1250_mask_irq(int cpu, int irq);
51extern void sb1250_unmask_irq(int cpu, int irq); 50extern void sb1250_unmask_irq(int cpu, int irq);
52extern void sb1250_smp_finish(void); 51extern void sb1250_smp_finish(void);
53 52
54extern void bcm1480_hpt_setup(void);
55extern void bcm1480_time_init(void); 53extern void bcm1480_time_init(void);
56extern void bcm1480_mask_irq(int cpu, int irq); 54extern void bcm1480_mask_irq(int cpu, int irq);
57extern void bcm1480_unmask_irq(int cpu, int irq); 55extern void bcm1480_unmask_irq(int cpu, int irq);
diff --git a/include/asm-parisc/scatterlist.h b/include/asm-parisc/scatterlist.h
index e7211c748446..62269b31ebf4 100644
--- a/include/asm-parisc/scatterlist.h
+++ b/include/asm-parisc/scatterlist.h
@@ -5,7 +5,10 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 13
11 unsigned int length; 14 unsigned int length;
@@ -15,7 +18,7 @@ struct scatterlist {
15 __u32 iova_length; /* bytes mapped */ 18 __u32 iova_length; /* bytes mapped */
16}; 19};
17 20
18#define sg_virt_addr(sg) ((unsigned long)(page_address(sg->page) + sg->offset)) 21#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
19#define sg_dma_address(sg) ((sg)->iova) 22#define sg_dma_address(sg) ((sg)->iova)
20#define sg_dma_len(sg) ((sg)->iova_length) 23#define sg_dma_len(sg) ((sg)->iova_length)
21 24
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 65be95dd03a5..ff52013c0e2d 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -285,9 +285,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
285 BUG_ON(direction == DMA_NONE); 285 BUG_ON(direction == DMA_NONE);
286 286
287 for_each_sg(sgl, sg, nents, i) { 287 for_each_sg(sgl, sg, nents, i) {
288 BUG_ON(!sg->page); 288 BUG_ON(!sg_page(sg));
289 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 289 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
290 sg->dma_address = page_to_bus(sg->page) + sg->offset; 290 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
291 } 291 }
292 292
293 return nents; 293 return nents;
@@ -328,7 +328,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
328 BUG_ON(direction == DMA_NONE); 328 BUG_ON(direction == DMA_NONE);
329 329
330 for_each_sg(sgl, sg, nents, i) 330 for_each_sg(sgl, sg, nents, i)
331 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 331 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
332} 332}
333 333
334static inline void dma_sync_sg_for_device(struct device *dev, 334static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +341,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
341 BUG_ON(direction == DMA_NONE); 341 BUG_ON(direction == DMA_NONE);
342 342
343 for_each_sg(sgl, sg, nents, i) 343 for_each_sg(sgl, sg, nents, i)
344 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 344 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
345} 345}
346 346
347static inline int dma_mapping_error(dma_addr_t dma_addr) 347static inline int dma_mapping_error(dma_addr_t dma_addr)
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
index 568135fe52ea..fcb2ebbfddbc 100644
--- a/include/asm-powerpc/mpc52xx.h
+++ b/include/asm-powerpc/mpc52xx.h
@@ -20,6 +20,11 @@
20 20
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22 22
23/* Variants of the 5200(B) */
24#define MPC5200_SVR 0x80110010
25#define MPC5200_SVR_MASK 0xfffffff0
26#define MPC5200B_SVR 0x80110020
27#define MPC5200B_SVR_MASK 0xfffffff0
23 28
24/* ======================================================================== */ 29/* ======================================================================== */
25/* Structures mapping of some unit register set */ 30/* Structures mapping of some unit register set */
@@ -244,6 +249,7 @@ struct mpc52xx_cdm {
244#ifndef __ASSEMBLY__ 249#ifndef __ASSEMBLY__
245 250
246extern void __iomem * mpc52xx_find_and_map(const char *); 251extern void __iomem * mpc52xx_find_and_map(const char *);
252extern void __iomem * mpc52xx_find_and_map_path(const char *path);
247extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); 253extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
248extern void mpc5200_setup_xlb_arbiter(void); 254extern void mpc5200_setup_xlb_arbiter(void);
249extern void mpc52xx_declare_of_platform_devices(void); 255extern void mpc52xx_declare_of_platform_devices(void);
@@ -253,6 +259,9 @@ extern unsigned int mpc52xx_get_irq(void);
253 259
254extern int __init mpc52xx_add_bridge(struct device_node *node); 260extern int __init mpc52xx_add_bridge(struct device_node *node);
255 261
262extern void __init mpc52xx_map_wdt(void);
263extern void mpc52xx_restart(char *cmd);
264
256#endif /* __ASSEMBLY__ */ 265#endif /* __ASSEMBLY__ */
257 266
258#ifdef CONFIG_PM 267#ifdef CONFIG_PM
diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h
index b075f619c3b7..fcf7d55afe45 100644
--- a/include/asm-powerpc/scatterlist.h
+++ b/include/asm-powerpc/scatterlist.h
@@ -14,7 +14,10 @@
14#include <asm/dma.h> 14#include <asm/dma.h>
15 15
16struct scatterlist { 16struct scatterlist {
17 struct page *page; 17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
18 unsigned int offset; 21 unsigned int offset;
19 unsigned int length; 22 unsigned int length;
20 23
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index cc45780421ca..51df94c73846 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -33,6 +33,7 @@
33 33
34#define set_mb(var, value) do { var = value; mb(); } while (0) 34#define set_mb(var, value) do { var = value; mb(); } while (0)
35 35
36#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
36#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
37#define smp_mb() mb() 38#define smp_mb() mb()
38#define smp_rmb() rmb() 39#define smp_rmb() rmb()
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
new file mode 100644
index 000000000000..352dde194f3c
--- /dev/null
+++ b/include/asm-s390/cpu.h
@@ -0,0 +1,25 @@
1/*
2 * include/asm-s390/cpu.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CPU_H_
9#define _ASM_S390_CPU_H_
10
11#include <linux/types.h>
12#include <linux/percpu.h>
13#include <linux/spinlock.h>
14
15struct s390_idle_data {
16 spinlock_t lock;
17 unsigned int in_idle;
18 unsigned long long idle_count;
19 unsigned long long idle_enter;
20 unsigned long long idle_time;
21};
22
23DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
24
25#endif /* _ASM_S390_CPU_H_ */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 501cb9b06314..05b842126b99 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -21,45 +21,43 @@
21 21
22#ifndef __s390x__ 22#ifndef __s390x__
23#define LCTL_OPCODE "lctl" 23#define LCTL_OPCODE "lctl"
24#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
25#else 24#else
26#define LCTL_OPCODE "lctlg" 25#define LCTL_OPCODE "lctlg"
27#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
28#endif 26#endif
29 27
30static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
31 struct task_struct *tsk)
32{ 29{
30 pgd_t *pgd = mm->pgd;
31 unsigned long asce_bits;
32
33 /* Calculate asce bits from the first pgd table entry. */
34 asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
35#ifdef CONFIG_64BIT
36 asce_bits |= _ASCE_TYPE_REGION3;
37#endif
38 S390_lowcore.user_asce = asce_bits | __pa(pgd);
39 if (switch_amode) {
40 /* Load primary space page table origin. */
41 pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
42 S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
43 asm volatile(LCTL_OPCODE" 1,1,%0\n"
44 : : "m" (S390_lowcore.user_exec_asce) );
45 } else
46 /* Load home space page table origin. */
47 asm volatile(LCTL_OPCODE" 13,13,%0"
48 : : "m" (S390_lowcore.user_asce) );
33} 49}
34 50
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 51static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk) 52 struct task_struct *tsk)
37{ 53{
38 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); 54 if (unlikely(prev == next))
39 55 return;
40 if (prev != next) {
41 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
42 PGTABLE_BITS;
43 if (shadow_pgd) {
44 /* Load primary/secondary space page table origin. */
45 S390_lowcore.user_exec_asce =
46 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
47 asm volatile(LCTL_OPCODE" 1,1,%0\n"
48 LCTL_OPCODE" 7,7,%1"
49 : : "m" (S390_lowcore.user_exec_asce),
50 "m" (S390_lowcore.user_asce) );
51 } else if (switch_amode) {
52 /* Load primary space page table origin. */
53 asm volatile(LCTL_OPCODE" 1,1,%0"
54 : : "m" (S390_lowcore.user_asce) );
55 } else
56 /* Load home space page table origin. */
57 asm volatile(LCTL_OPCODE" 13,13,%0"
58 : : "m" (S390_lowcore.user_asce) );
59 }
60 cpu_set(smp_processor_id(), next->cpu_vm_mask); 56 cpu_set(smp_processor_id(), next->cpu_vm_mask);
57 update_mm(next, tsk);
61} 58}
62 59
60#define enter_lazy_tlb(mm,tsk) do { } while (0)
63#define deactivate_mm(tsk,mm) do { } while (0) 61#define deactivate_mm(tsk,mm) do { } while (0)
64 62
65static inline void activate_mm(struct mm_struct *prev, 63static inline void activate_mm(struct mm_struct *prev,
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index ceec3826a67c..584d0ee3c7f6 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -82,6 +82,7 @@ typedef struct { unsigned long pte; } pte_t;
82#ifndef __s390x__ 82#ifndef __s390x__
83 83
84typedef struct { unsigned long pmd; } pmd_t; 84typedef struct { unsigned long pmd; } pmd_t;
85typedef struct { unsigned long pud; } pud_t;
85typedef struct { 86typedef struct {
86 unsigned long pgd0; 87 unsigned long pgd0;
87 unsigned long pgd1; 88 unsigned long pgd1;
@@ -90,6 +91,7 @@ typedef struct {
90 } pgd_t; 91 } pgd_t;
91 92
92#define pmd_val(x) ((x).pmd) 93#define pmd_val(x) ((x).pmd)
94#define pud_val(x) ((x).pud)
93#define pgd_val(x) ((x).pgd0) 95#define pgd_val(x) ((x).pgd0)
94 96
95#else /* __s390x__ */ 97#else /* __s390x__ */
@@ -98,10 +100,12 @@ typedef struct {
98 unsigned long pmd0; 100 unsigned long pmd0;
99 unsigned long pmd1; 101 unsigned long pmd1;
100 } pmd_t; 102 } pmd_t;
103typedef struct { unsigned long pud; } pud_t;
101typedef struct { unsigned long pgd; } pgd_t; 104typedef struct { unsigned long pgd; } pgd_t;
102 105
103#define pmd_val(x) ((x).pmd0) 106#define pmd_val(x) ((x).pmd0)
104#define pmd_val1(x) ((x).pmd1) 107#define pmd_val1(x) ((x).pmd1)
108#define pud_val(x) ((x).pud)
105#define pgd_val(x) ((x).pgd) 109#define pgd_val(x) ((x).pgd)
106 110
107#endif /* __s390x__ */ 111#endif /* __s390x__ */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index e45d3c9a4b7e..709dd1740956 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -19,140 +19,115 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22/* 22unsigned long *crst_table_alloc(struct mm_struct *, int);
23 * Page allocation orders. 23void crst_table_free(unsigned long *);
24 */
25#ifndef __s390x__
26# define PTE_ALLOC_ORDER 0
27# define PMD_ALLOC_ORDER 0
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PTE_ALLOC_ORDER 0
31# define PMD_ALLOC_ORDER 2
32# define PGD_ALLOC_ORDER 2
33#endif /* __s390x__ */
34 24
35/* 25unsigned long *page_table_alloc(int);
36 * Allocate and free page tables. The xxx_kernel() versions are 26void page_table_free(unsigned long *);
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40 27
41static inline pgd_t *pgd_alloc(struct mm_struct *mm) 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
42{ 29{
43 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); 30 *s = val;
44 int i; 31 n = (n / 256) - 1;
45 32 asm volatile(
46 if (!pgd) 33#ifdef CONFIG_64BIT
47 return NULL; 34 " mvc 8(248,%0),0(%0)\n"
48 if (s390_noexec) {
49 pgd_t *shadow_pgd = (pgd_t *)
50 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 struct page *page = virt_to_page(pgd);
52
53 if (!shadow_pgd) {
54 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
55 return NULL;
56 }
57 page->lru.next = (void *) shadow_pgd;
58 }
59 for (i = 0; i < PTRS_PER_PGD; i++)
60#ifndef __s390x__
61 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
62#else 35#else
63 pgd_clear(pgd + i); 36 " mvc 4(252,%0),0(%0)\n"
64#endif 37#endif
65 return pgd; 38 "0: mvc 256(256,%0),0(%0)\n"
39 " la %0,256(%0)\n"
40 " brct %1,0b\n"
41 : "+a" (s), "+d" (n));
66} 42}
67 43
68static inline void pgd_free(pgd_t *pgd) 44static inline void crst_table_init(unsigned long *crst, unsigned long entry)
69{ 45{
70 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 46 clear_table(crst, entry, sizeof(unsigned long)*2048);
71 47 crst = get_shadow_table(crst);
72 if (shadow_pgd) 48 if (crst)
73 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); 49 clear_table(crst, entry, sizeof(unsigned long)*2048);
74 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
75} 50}
76 51
77#ifndef __s390x__ 52#ifndef __s390x__
78/* 53
79 * page middle directory allocation/free routines. 54static inline unsigned long pgd_entry_type(struct mm_struct *mm)
80 * We use pmd cache only on s390x, so these are dummy routines. This
81 * code never triggers because the pgd will always be present.
82 */
83#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
84#define pmd_free(x) do { } while (0)
85#define __pmd_free_tlb(tlb,x) do { } while (0)
86#define pgd_populate(mm, pmd, pte) BUG()
87#define pgd_populate_kernel(mm, pmd, pte) BUG()
88#else /* __s390x__ */
89static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
90{ 55{
91 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); 56 return _SEGMENT_ENTRY_EMPTY;
92 int i;
93
94 if (!pmd)
95 return NULL;
96 if (s390_noexec) {
97 pmd_t *shadow_pmd = (pmd_t *)
98 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
99 struct page *page = virt_to_page(pmd);
100
101 if (!shadow_pmd) {
102 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
103 return NULL;
104 }
105 page->lru.next = (void *) shadow_pmd;
106 }
107 for (i=0; i < PTRS_PER_PMD; i++)
108 pmd_clear(pmd + i);
109 return pmd;
110} 57}
111 58
112static inline void pmd_free (pmd_t *pmd) 59#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
60#define pud_free(x) do { } while (0)
61
62#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
63#define pmd_free(x) do { } while (0)
64
65#define pgd_populate(mm, pgd, pud) BUG()
66#define pgd_populate_kernel(mm, pgd, pud) BUG()
67
68#define pud_populate(mm, pud, pmd) BUG()
69#define pud_populate_kernel(mm, pud, pmd) BUG()
70
71#else /* __s390x__ */
72
73static inline unsigned long pgd_entry_type(struct mm_struct *mm)
113{ 74{
114 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 75 return _REGION3_ENTRY_EMPTY;
76}
77
78#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
79#define pud_free(x) do { } while (0)
115 80
116 if (shadow_pmd) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
117 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); 82{
118 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); 83 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
84 if (crst)
85 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
86 return (pmd_t *) crst;
119} 87}
88#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
120 89
121#define __pmd_free_tlb(tlb,pmd) \ 90#define pgd_populate(mm, pgd, pud) BUG()
122 do { \ 91#define pgd_populate_kernel(mm, pgd, pud) BUG()
123 tlb_flush_mmu(tlb, 0, 0); \
124 pmd_free(pmd); \
125 } while (0)
126 92
127static inline void 93static inline void pud_populate_kernel(struct mm_struct *mm,
128pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 94 pud_t *pud, pmd_t *pmd)
129{ 95{
130 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 96 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
131} 97}
132 98
133static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 99static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
134{ 100{
135 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 101 pud_t *shadow_pud = get_shadow_table(pud);
136 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 102 pmd_t *shadow_pmd = get_shadow_table(pmd);
137 103
138 if (shadow_pgd && shadow_pmd) 104 if (shadow_pud && shadow_pmd)
139 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); 105 pud_populate_kernel(mm, shadow_pud, shadow_pmd);
140 pgd_populate_kernel(mm, pgd, pmd); 106 pud_populate_kernel(mm, pud, pmd);
141} 107}
142 108
143#endif /* __s390x__ */ 109#endif /* __s390x__ */
144 110
111static inline pgd_t *pgd_alloc(struct mm_struct *mm)
112{
113 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
114 if (crst)
115 crst_table_init(crst, pgd_entry_type(mm));
116 return (pgd_t *) crst;
117}
118#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
119
145static inline void 120static inline void
146pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 121pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
147{ 122{
148#ifndef __s390x__ 123#ifndef __s390x__
149 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); 124 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
150 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); 125 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
151 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); 126 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
152 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); 127 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
153#else /* __s390x__ */ 128#else /* __s390x__ */
154 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); 129 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
155 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); 130 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
156#endif /* __s390x__ */ 131#endif /* __s390x__ */
157} 132}
158 133
@@ -160,7 +135,7 @@ static inline void
160pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 135pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
161{ 136{
162 pte_t *pte = (pte_t *)page_to_phys(page); 137 pte_t *pte = (pte_t *)page_to_phys(page);
163 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 138 pmd_t *shadow_pmd = get_shadow_table(pmd);
164 pte_t *shadow_pte = get_shadow_pte(pte); 139 pte_t *shadow_pte = get_shadow_pte(pte);
165 140
166 pmd_populate_kernel(mm, pmd, pte); 141 pmd_populate_kernel(mm, pmd, pte);
@@ -171,67 +146,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
171/* 146/*
172 * page table entry allocation/free routines. 147 * page table entry allocation/free routines.
173 */ 148 */
174static inline pte_t * 149#define pte_alloc_one_kernel(mm, vmaddr) \
175pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 150 ((pte_t *) page_table_alloc(s390_noexec))
176{ 151#define pte_alloc_one(mm, vmaddr) \
177 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); 152 virt_to_page(page_table_alloc(s390_noexec))
178 int i; 153
179 154#define pte_free_kernel(pte) \
180 if (!pte) 155 page_table_free((unsigned long *) pte)
181 return NULL; 156#define pte_free(pte) \
182 if (s390_noexec) { 157 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
183 pte_t *shadow_pte = (pte_t *)
184 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
185 struct page *page = virt_to_page(pte);
186
187 if (!shadow_pte) {
188 free_page((unsigned long) pte);
189 return NULL;
190 }
191 page->lru.next = (void *) shadow_pte;
192 }
193 for (i=0; i < PTRS_PER_PTE; i++) {
194 pte_clear(mm, vmaddr, pte + i);
195 vmaddr += PAGE_SIZE;
196 }
197 return pte;
198}
199
200static inline struct page *
201pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
202{
203 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
204 if (pte)
205 return virt_to_page(pte);
206 return NULL;
207}
208
209static inline void pte_free_kernel(pte_t *pte)
210{
211 pte_t *shadow_pte = get_shadow_pte(pte);
212
213 if (shadow_pte)
214 free_page((unsigned long) shadow_pte);
215 free_page((unsigned long) pte);
216}
217
218static inline void pte_free(struct page *pte)
219{
220 struct page *shadow_page = get_shadow_page(pte);
221
222 if (shadow_page)
223 __free_page(shadow_page);
224 __free_page(pte);
225}
226
227#define __pte_free_tlb(tlb, pte) \
228({ \
229 struct mmu_gather *__tlb = (tlb); \
230 struct page *__pte = (pte); \
231 struct page *shadow_page = get_shadow_page(__pte); \
232 if (shadow_page) \
233 tlb_remove_page(__tlb, shadow_page); \
234 tlb_remove_page(__tlb, __pte); \
235})
236 158
237#endif /* _S390_PGALLOC_H */ 159#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 39bb5192dc31..f2cc25b74adf 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -13,8 +13,6 @@
13#ifndef _ASM_S390_PGTABLE_H 13#ifndef _ASM_S390_PGTABLE_H
14#define _ASM_S390_PGTABLE_H 14#define _ASM_S390_PGTABLE_H
15 15
16#include <asm-generic/4level-fixup.h>
17
18/* 16/*
19 * The Linux memory management assumes a three-level page table setup. For 17 * The Linux memory management assumes a three-level page table setup. For
20 * s390 31 bit we "fold" the mid level into the top-level page table, so 18 * s390 31 bit we "fold" the mid level into the top-level page table, so
@@ -35,9 +33,6 @@
35#include <asm/bug.h> 33#include <asm/bug.h>
36#include <asm/processor.h> 34#include <asm/processor.h>
37 35
38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
39struct mm_struct;
40
41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 36extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
42extern void paging_init(void); 37extern void paging_init(void);
43extern void vmem_map_init(void); 38extern void vmem_map_init(void);
@@ -63,14 +58,18 @@ extern char empty_zero_page[PAGE_SIZE];
63 */ 58 */
64#ifndef __s390x__ 59#ifndef __s390x__
65# define PMD_SHIFT 22 60# define PMD_SHIFT 22
61# define PUD_SHIFT 22
66# define PGDIR_SHIFT 22 62# define PGDIR_SHIFT 22
67#else /* __s390x__ */ 63#else /* __s390x__ */
68# define PMD_SHIFT 21 64# define PMD_SHIFT 21
65# define PUD_SHIFT 31
69# define PGDIR_SHIFT 31 66# define PGDIR_SHIFT 31
70#endif /* __s390x__ */ 67#endif /* __s390x__ */
71 68
72#define PMD_SIZE (1UL << PMD_SHIFT) 69#define PMD_SIZE (1UL << PMD_SHIFT)
73#define PMD_MASK (~(PMD_SIZE-1)) 70#define PMD_MASK (~(PMD_SIZE-1))
71#define PUD_SIZE (1UL << PUD_SHIFT)
72#define PUD_MASK (~(PUD_SIZE-1))
74#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 73#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75#define PGDIR_MASK (~(PGDIR_SIZE-1)) 74#define PGDIR_MASK (~(PGDIR_SIZE-1))
76 75
@@ -83,10 +82,12 @@ extern char empty_zero_page[PAGE_SIZE];
83#ifndef __s390x__ 82#ifndef __s390x__
84# define PTRS_PER_PTE 1024 83# define PTRS_PER_PTE 1024
85# define PTRS_PER_PMD 1 84# define PTRS_PER_PMD 1
85# define PTRS_PER_PUD 1
86# define PTRS_PER_PGD 512 86# define PTRS_PER_PGD 512
87#else /* __s390x__ */ 87#else /* __s390x__ */
88# define PTRS_PER_PTE 512 88# define PTRS_PER_PTE 512
89# define PTRS_PER_PMD 1024 89# define PTRS_PER_PMD 1024
90# define PTRS_PER_PUD 1
90# define PTRS_PER_PGD 2048 91# define PTRS_PER_PGD 2048
91#endif /* __s390x__ */ 92#endif /* __s390x__ */
92 93
@@ -96,6 +97,8 @@ extern char empty_zero_page[PAGE_SIZE];
96 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
97#define pmd_ERROR(e) \ 98#define pmd_ERROR(e) \
98 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100#define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
99#define pgd_ERROR(e) \ 102#define pgd_ERROR(e) \
100 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
101 104
@@ -195,7 +198,7 @@ extern unsigned long vmalloc_end;
195 * I Segment-Invalid Bit: Segment is not available for address-translation 198 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * TT Type 01 199 * TT Type 01
197 * TF 200 * TF
198 * TL Table lenght 201 * TL Table length
199 * 202 *
200 * The 64 bit regiontable origin of S390 has following format: 203 * The 64 bit regiontable origin of S390 has following format:
201 * | region table origon | DTTL 204 * | region table origon | DTTL
@@ -221,6 +224,8 @@ extern unsigned long vmalloc_end;
221/* Hardware bits in the page table entry */ 224/* Hardware bits in the page table entry */
222#define _PAGE_RO 0x200 /* HW read-only bit */ 225#define _PAGE_RO 0x200 /* HW read-only bit */
223#define _PAGE_INVALID 0x400 /* HW invalid bit */ 226#define _PAGE_INVALID 0x400 /* HW invalid bit */
227
228/* Software bits in the page table entry */
224#define _PAGE_SWT 0x001 /* SW pte type bit t */ 229#define _PAGE_SWT 0x001 /* SW pte type bit t */
225#define _PAGE_SWX 0x002 /* SW pte type bit x */ 230#define _PAGE_SWX 0x002 /* SW pte type bit x */
226 231
@@ -264,60 +269,75 @@ extern unsigned long vmalloc_end;
264 269
265#ifndef __s390x__ 270#ifndef __s390x__
266 271
267/* Bits in the segment table entry */ 272/* Bits in the segment table address-space-control-element */
268#define _PAGE_TABLE_LEN 0xf /* only full page-tables */ 273#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
269#define _PAGE_TABLE_COM 0x10 /* common page-table */ 274#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
270#define _PAGE_TABLE_INV 0x20 /* invalid page-table */ 275#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
271#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */ 276#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
272 277#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
273/* Bits int the storage key */
274#define _PAGE_CHANGED 0x02 /* HW changed bit */
275#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
276
277#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
278#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
279
280/*
281 * User and Kernel pagetables are identical
282 */
283#define _PAGE_TABLE _PAGE_TABLE_LEN
284#define _KERNPG_TABLE _PAGE_TABLE_LEN
285
286/*
287 * The Kernel segment-tables includes the User segment-table
288 */
289 278
290#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100) 279/* Bits in the segment table entry */
291#define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN 280#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
281#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
282#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
283#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
292 284
293#define USER_STD_MASK 0x00000080UL 285#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
286#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
294 287
295#else /* __s390x__ */ 288#else /* __s390x__ */
296 289
290/* Bits in the segment/region table address-space-control-element */
291#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
292#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
293#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
294#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
295#define _ASCE_REAL_SPACE 0x20 /* real space control */
296#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
297#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
298#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
299#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
300#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
301#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
302
303/* Bits in the region table entry */
304#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
305#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
306#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
307#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
308#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
309#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
310#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
311
312#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
313#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
314#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
315#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
316#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
317#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
318
297/* Bits in the segment table entry */ 319/* Bits in the segment table entry */
298#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */ 320#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
299#define _PMD_ENTRY 0x00 321#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
322#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
323
324#define _SEGMENT_ENTRY (0)
325#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
300 326
301/* Bits in the region third table entry */ 327#endif /* __s390x__ */
302#define _PGD_ENTRY_INV 0x20 /* invalid region table entry */
303#define _PGD_ENTRY 0x07
304 328
305/* 329/*
306 * User and kernel page directory 330 * A user page table pointer has the space-switch-event bit, the
331 * private-space-control bit and the storage-alteration-event-control
332 * bit set. A kernel page table pointer doesn't need them.
307 */ 333 */
308#define _REGION_THIRD 0x4 334#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
309#define _REGION_THIRD_LEN 0x3 335 _ASCE_ALT_EVENT)
310#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
311#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
312
313#define USER_STD_MASK 0x0000000000000080UL
314 336
315/* Bits in the storage key */ 337/* Bits int the storage key */
316#define _PAGE_CHANGED 0x02 /* HW changed bit */ 338#define _PAGE_CHANGED 0x02 /* HW changed bit */
317#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 339#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
318 340
319#endif /* __s390x__ */
320
321/* 341/*
322 * Page protection definitions. 342 * Page protection definitions.
323 */ 343 */
@@ -358,65 +378,38 @@ extern unsigned long vmalloc_end;
358#define __S111 PAGE_EX_RW 378#define __S111 PAGE_EX_RW
359 379
360#ifndef __s390x__ 380#ifndef __s390x__
361# define PMD_SHADOW_SHIFT 1 381# define PxD_SHADOW_SHIFT 1
362# define PGD_SHADOW_SHIFT 1
363#else /* __s390x__ */ 382#else /* __s390x__ */
364# define PMD_SHADOW_SHIFT 2 383# define PxD_SHADOW_SHIFT 2
365# define PGD_SHADOW_SHIFT 2
366#endif /* __s390x__ */ 384#endif /* __s390x__ */
367 385
368static inline struct page *get_shadow_page(struct page *page) 386static inline struct page *get_shadow_page(struct page *page)
369{ 387{
370 if (s390_noexec && !list_empty(&page->lru)) 388 if (s390_noexec && page->index)
371 return virt_to_page(page->lru.next); 389 return virt_to_page((void *)(addr_t) page->index);
372 return NULL;
373}
374
375static inline pte_t *get_shadow_pte(pte_t *ptep)
376{
377 unsigned long pteptr = (unsigned long) (ptep);
378
379 if (s390_noexec) {
380 unsigned long offset = pteptr & (PAGE_SIZE - 1);
381 void *addr = (void *) (pteptr ^ offset);
382 struct page *page = virt_to_page(addr);
383 if (!list_empty(&page->lru))
384 return (pte_t *) ((unsigned long) page->lru.next |
385 offset);
386 }
387 return NULL; 390 return NULL;
388} 391}
389 392
390static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) 393static inline void *get_shadow_pte(void *table)
391{ 394{
392 unsigned long pmdptr = (unsigned long) (pmdp); 395 unsigned long addr, offset;
396 struct page *page;
393 397
394 if (s390_noexec) { 398 addr = (unsigned long) table;
395 unsigned long offset = pmdptr & 399 offset = addr & (PAGE_SIZE - 1);
396 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); 400 page = virt_to_page((void *)(addr ^ offset));
397 void *addr = (void *) (pmdptr ^ offset); 401 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
398 struct page *page = virt_to_page(addr);
399 if (!list_empty(&page->lru))
400 return (pmd_t *) ((unsigned long) page->lru.next |
401 offset);
402 }
403 return NULL;
404} 402}
405 403
406static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) 404static inline void *get_shadow_table(void *table)
407{ 405{
408 unsigned long pgdptr = (unsigned long) (pgdp); 406 unsigned long addr, offset;
407 struct page *page;
409 408
410 if (s390_noexec) { 409 addr = (unsigned long) table;
411 unsigned long offset = pgdptr & 410 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
412 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); 411 page = virt_to_page((void *)(addr ^ offset));
413 void *addr = (void *) (pgdptr ^ offset); 412 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
414 struct page *page = virt_to_page(addr);
415 if (!list_empty(&page->lru))
416 return (pgd_t *) ((unsigned long) page->lru.next |
417 offset);
418 }
419 return NULL;
420} 413}
421 414
422/* 415/*
@@ -424,7 +417,8 @@ static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
424 * within a page table are directly modified. Thus, the following 417 * within a page table are directly modified. Thus, the following
425 * hook is made available. 418 * hook is made available.
426 */ 419 */
427static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
421 pte_t *pteptr, pte_t pteval)
428{ 422{
429 pte_t *shadow_pte = get_shadow_pte(pteptr); 423 pte_t *shadow_pte = get_shadow_pte(pteptr);
430 424
@@ -437,7 +431,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
437 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 431 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
438 } 432 }
439} 433}
440#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
441 434
442/* 435/*
443 * pgd/pmd/pte query functions 436 * pgd/pmd/pte query functions
@@ -448,47 +441,50 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
448static inline int pgd_none(pgd_t pgd) { return 0; } 441static inline int pgd_none(pgd_t pgd) { return 0; }
449static inline int pgd_bad(pgd_t pgd) { return 0; } 442static inline int pgd_bad(pgd_t pgd) { return 0; }
450 443
451static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } 444static inline int pud_present(pud_t pud) { return 1; }
452static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } 445static inline int pud_none(pud_t pud) { return 0; }
453static inline int pmd_bad(pmd_t pmd) 446static inline int pud_bad(pud_t pud) { return 0; }
454{
455 return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE;
456}
457 447
458#else /* __s390x__ */ 448#else /* __s390x__ */
459 449
460static inline int pgd_present(pgd_t pgd) 450static inline int pgd_present(pgd_t pgd) { return 1; }
451static inline int pgd_none(pgd_t pgd) { return 0; }
452static inline int pgd_bad(pgd_t pgd) { return 0; }
453
454static inline int pud_present(pud_t pud)
461{ 455{
462 return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; 456 return pud_val(pud) & _REGION_ENTRY_ORIGIN;
463} 457}
464 458
465static inline int pgd_none(pgd_t pgd) 459static inline int pud_none(pud_t pud)
466{ 460{
467 return pgd_val(pgd) & _PGD_ENTRY_INV; 461 return pud_val(pud) & _REGION_ENTRY_INV;
468} 462}
469 463
470static inline int pgd_bad(pgd_t pgd) 464static inline int pud_bad(pud_t pud)
471{ 465{
472 return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; 466 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
467 return (pud_val(pud) & mask) != _REGION3_ENTRY;
473} 468}
474 469
470#endif /* __s390x__ */
471
475static inline int pmd_present(pmd_t pmd) 472static inline int pmd_present(pmd_t pmd)
476{ 473{
477 return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; 474 return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
478} 475}
479 476
480static inline int pmd_none(pmd_t pmd) 477static inline int pmd_none(pmd_t pmd)
481{ 478{
482 return pmd_val(pmd) & _PMD_ENTRY_INV; 479 return pmd_val(pmd) & _SEGMENT_ENTRY_INV;
483} 480}
484 481
485static inline int pmd_bad(pmd_t pmd) 482static inline int pmd_bad(pmd_t pmd)
486{ 483{
487 return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; 484 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
485 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
488} 486}
489 487
490#endif /* __s390x__ */
491
492static inline int pte_none(pte_t pte) 488static inline int pte_none(pte_t pte)
493{ 489{
494 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 490 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
@@ -508,7 +504,8 @@ static inline int pte_file(pte_t pte)
508 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 504 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
509} 505}
510 506
511#define pte_same(a,b) (pte_val(a) == pte_val(b)) 507#define __HAVE_ARCH_PTE_SAME
508#define pte_same(a,b) (pte_val(a) == pte_val(b))
512 509
513/* 510/*
514 * query functions pte_write/pte_dirty/pte_young only work if 511 * query functions pte_write/pte_dirty/pte_young only work if
@@ -543,58 +540,52 @@ static inline int pte_young(pte_t pte)
543 540
544#ifndef __s390x__ 541#ifndef __s390x__
545 542
546static inline void pgd_clear(pgd_t * pgdp) { } 543#define pgd_clear(pgd) do { } while (0)
544#define pud_clear(pud) do { } while (0)
547 545
548static inline void pmd_clear_kernel(pmd_t * pmdp) 546static inline void pmd_clear_kernel(pmd_t * pmdp)
549{ 547{
550 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 548 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
551 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 549 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
552 pmd_val(pmdp[2]) = _PAGE_TABLE_INV; 550 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
553 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
554}
555
556static inline void pmd_clear(pmd_t * pmdp)
557{
558 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
559
560 pmd_clear_kernel(pmdp);
561 if (shadow_pmd)
562 pmd_clear_kernel(shadow_pmd);
563} 552}
564 553
565#else /* __s390x__ */ 554#else /* __s390x__ */
566 555
567static inline void pgd_clear_kernel(pgd_t * pgdp) 556#define pgd_clear(pgd) do { } while (0)
557
558static inline void pud_clear_kernel(pud_t *pud)
568{ 559{
569 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 560 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
570} 561}
571 562
572static inline void pgd_clear(pgd_t * pgdp) 563static inline void pud_clear(pud_t * pud)
573{ 564{
574 pgd_t *shadow_pgd = get_shadow_pgd(pgdp); 565 pud_t *shadow = get_shadow_table(pud);
575 566
576 pgd_clear_kernel(pgdp); 567 pud_clear_kernel(pud);
577 if (shadow_pgd) 568 if (shadow)
578 pgd_clear_kernel(shadow_pgd); 569 pud_clear_kernel(shadow);
579} 570}
580 571
581static inline void pmd_clear_kernel(pmd_t * pmdp) 572static inline void pmd_clear_kernel(pmd_t * pmdp)
582{ 573{
583 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 574 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
584 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 575 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
585} 576}
586 577
578#endif /* __s390x__ */
579
587static inline void pmd_clear(pmd_t * pmdp) 580static inline void pmd_clear(pmd_t * pmdp)
588{ 581{
589 pmd_t *shadow_pmd = get_shadow_pmd(pmdp); 582 pmd_t *shadow_pmd = get_shadow_table(pmdp);
590 583
591 pmd_clear_kernel(pmdp); 584 pmd_clear_kernel(pmdp);
592 if (shadow_pmd) 585 if (shadow_pmd)
593 pmd_clear_kernel(shadow_pmd); 586 pmd_clear_kernel(shadow_pmd);
594} 587}
595 588
596#endif /* __s390x__ */
597
598static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 589static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
599{ 590{
600 pte_t *shadow_pte = get_shadow_pte(ptep); 591 pte_t *shadow_pte = get_shadow_pte(ptep);
@@ -663,24 +654,19 @@ static inline pte_t pte_mkyoung(pte_t pte)
663 return pte; 654 return pte;
664} 655}
665 656
666static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 657#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
658static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
659 unsigned long addr, pte_t *ptep)
667{ 660{
668 return 0; 661 return 0;
669} 662}
670 663
671static inline int 664#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
672ptep_clear_flush_young(struct vm_area_struct *vma, 665static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
673 unsigned long address, pte_t *ptep) 666 unsigned long address, pte_t *ptep)
674{ 667{
675 /* No need to flush TLB; bits are in storage key */ 668 /* No need to flush TLB; bits are in storage key */
676 return ptep_test_and_clear_young(vma, address, ptep); 669 return 0;
677}
678
679static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
680{
681 pte_t pte = *ptep;
682 pte_clear(mm, addr, ptep);
683 return pte;
684} 670}
685 671
686static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 672static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -709,6 +695,32 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
709 __ptep_ipte(address, ptep); 695 __ptep_ipte(address, ptep);
710} 696}
711 697
698/*
699 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
700 * both clear the TLB for the unmapped pte. The reason is that
701 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
702 * to modify an active pte. The sequence is
703 * 1) ptep_get_and_clear
704 * 2) set_pte_at
705 * 3) flush_tlb_range
706 * On s390 the tlb needs to get flushed with the modification of the pte
707 * if the pte is active. The only way how this can be implemented is to
708 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
709 * is a nop.
710 */
711#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
712#define ptep_get_and_clear(__mm, __address, __ptep) \
713({ \
714 pte_t __pte = *(__ptep); \
715 if (atomic_read(&(__mm)->mm_users) > 1 || \
716 (__mm) != current->active_mm) \
717 ptep_invalidate(__address, __ptep); \
718 else \
719 pte_clear((__mm), (__address), (__ptep)); \
720 __pte; \
721})
722
723#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
712static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 724static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
713 unsigned long address, pte_t *ptep) 725 unsigned long address, pte_t *ptep)
714{ 726{
@@ -717,12 +729,40 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
717 return pte; 729 return pte;
718} 730}
719 731
720static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 732/*
733 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
734 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
735 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
736 * cannot be accessed while the batched unmap is running. In this case
737 * full==1 and a simple pte_clear is enough. See tlb.h.
738 */
739#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
740static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
741 unsigned long addr,
742 pte_t *ptep, int full)
721{ 743{
722 pte_t old_pte = *ptep; 744 pte_t pte = *ptep;
723 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 745
746 if (full)
747 pte_clear(mm, addr, ptep);
748 else
749 ptep_invalidate(addr, ptep);
750 return pte;
724} 751}
725 752
753#define __HAVE_ARCH_PTEP_SET_WRPROTECT
754#define ptep_set_wrprotect(__mm, __addr, __ptep) \
755({ \
756 pte_t __pte = *(__ptep); \
757 if (pte_write(__pte)) { \
758 if (atomic_read(&(__mm)->mm_users) > 1 || \
759 (__mm) != current->active_mm) \
760 ptep_invalidate(__addr, __ptep); \
761 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
762 } \
763})
764
765#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
726#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 766#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
727({ \ 767({ \
728 int __changed = !pte_same(*(__ptep), __entry); \ 768 int __changed = !pte_same(*(__ptep), __entry); \
@@ -740,11 +780,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
740 * should therefore only be called if it is not mapped in any 780 * should therefore only be called if it is not mapped in any
741 * address space. 781 * address space.
742 */ 782 */
783#define __HAVE_ARCH_PAGE_TEST_DIRTY
743static inline int page_test_dirty(struct page *page) 784static inline int page_test_dirty(struct page *page)
744{ 785{
745 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; 786 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
746} 787}
747 788
789#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
748static inline void page_clear_dirty(struct page *page) 790static inline void page_clear_dirty(struct page *page)
749{ 791{
750 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); 792 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
@@ -753,6 +795,7 @@ static inline void page_clear_dirty(struct page *page)
753/* 795/*
754 * Test and clear referenced bit in storage key. 796 * Test and clear referenced bit in storage key.
755 */ 797 */
798#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
756static inline int page_test_and_clear_young(struct page *page) 799static inline int page_test_and_clear_young(struct page *page)
757{ 800{
758 unsigned long physpage = page_to_phys(page); 801 unsigned long physpage = page_to_phys(page);
@@ -784,63 +827,48 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
784 return mk_pte_phys(physpage, pgprot); 827 return mk_pte_phys(physpage, pgprot);
785} 828}
786 829
787static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 830#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
788{ 831#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
789 unsigned long physpage = __pa((pfn) << PAGE_SHIFT); 832#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
790 833#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
791 return mk_pte_phys(physpage, pgprot);
792}
793
794#ifdef __s390x__
795
796static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
797{
798 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
799
800 return __pmd(physpage + pgprot_val(pgprot));
801}
802
803#endif /* __s390x__ */
804
805#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
806#define pte_page(x) pfn_to_page(pte_pfn(x))
807 834
808#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 835#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
836#define pgd_offset_k(address) pgd_offset(&init_mm, address)
809 837
810#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 838#ifndef __s390x__
811 839
812#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) 840#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
841#define pud_deref(pmd) ({ BUG(); 0UL; })
842#define pgd_deref(pmd) ({ BUG(); 0UL; })
813 843
814#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 844#define pud_offset(pgd, address) ((pud_t *) pgd)
845#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
815 846
816/* to find an entry in a page-table-directory */ 847#else /* __s390x__ */
817#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
818#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
819 848
820/* to find an entry in a kernel page-table-directory */ 849#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
821#define pgd_offset_k(address) pgd_offset(&init_mm, address) 850#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
851#define pgd_deref(pgd) ({ BUG(); 0UL; })
822 852
823#ifndef __s390x__ 853#define pud_offset(pgd, address) ((pud_t *) pgd)
824 854
825/* Find an entry in the second-level page table.. */ 855static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
826static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
827{ 856{
828 return (pmd_t *) dir; 857 pmd_t *pmd = (pmd_t *) pud_deref(*pud);
858 return pmd + pmd_index(address);
829} 859}
830 860
831#else /* __s390x__ */ 861#endif /* __s390x__ */
832 862
833/* Find an entry in the second-level page table.. */ 863#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
834#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 864#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
835#define pmd_offset(dir,addr) \ 865#define pte_page(x) pfn_to_page(pte_pfn(x))
836 ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr))
837 866
838#endif /* __s390x__ */ 867#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
839 868
840/* Find an entry in the third-level page table.. */ 869/* Find an entry in the lowest level page table.. */
841#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 870#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
842#define pte_offset_kernel(pmd, address) \ 871#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
843 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
844#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 872#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
845#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) 873#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
846#define pte_unmap(pte) do { } while (0) 874#define pte_unmap(pte) do { } while (0)
@@ -930,17 +958,6 @@ extern int remove_shared_memory(unsigned long start, unsigned long size);
930#define __HAVE_ARCH_MEMMAP_INIT 958#define __HAVE_ARCH_MEMMAP_INIT
931extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 959extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
932 960
933#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
934#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
935#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
936#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
937#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
938#define __HAVE_ARCH_PTEP_SET_WRPROTECT
939#define __HAVE_ARCH_PTE_SAME
940#define __HAVE_ARCH_PAGE_TEST_DIRTY
941#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
942#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
943#include <asm-generic/pgtable.h> 961#include <asm-generic/pgtable.h>
944 962
945#endif /* _S390_PAGE_H */ 963#endif /* _S390_PAGE_H */
946
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 3b972d4c6b29..21d40a19355e 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -93,7 +93,6 @@ struct thread_struct {
93 s390_fp_regs fp_regs; 93 s390_fp_regs fp_regs;
94 unsigned int acrs[NUM_ACRS]; 94 unsigned int acrs[NUM_ACRS];
95 unsigned long ksp; /* kernel stack pointer */ 95 unsigned long ksp; /* kernel stack pointer */
96 unsigned long user_seg; /* HSTD */
97 mm_segment_t mm_segment; 96 mm_segment_t mm_segment;
98 unsigned long prot_addr; /* address of protection-excep. */ 97 unsigned long prot_addr; /* address of protection-excep. */
99 unsigned int error_code; /* error-code of last prog-excep. */ 98 unsigned int error_code; /* error-code of last prog-excep. */
@@ -128,22 +127,9 @@ struct stack_frame {
128 127
129#define ARCH_MIN_TASKALIGN 8 128#define ARCH_MIN_TASKALIGN 8
130 129
131#ifndef __s390x__ 130#define INIT_THREAD { \
132# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE 131 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
133#else /* __s390x__ */ 132}
134# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE
135#endif /* __s390x__ */
136
137#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
138 {0},{0},{0},{0},{0},{0}}}, \
139 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, \
140 sizeof(init_stack) + (unsigned long) &init_stack, \
141 __SWAPPER_PG_DIR, \
142 {0}, \
143 0,0,0, \
144 (per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \
145 0, 0 \
146}
147 133
148/* 134/*
149 * Do necessary setup to start up a new thread. 135 * Do necessary setup to start up a new thread.
diff --git a/include/asm-s390/scatterlist.h b/include/asm-s390/scatterlist.h
index a43b3afc5e2d..29ec8e28c8df 100644
--- a/include/asm-s390/scatterlist.h
+++ b/include/asm-s390/scatterlist.h
@@ -2,7 +2,10 @@
2#define _ASMS390_SCATTERLIST_H 2#define _ASMS390_SCATTERLIST_H
3 3
4struct scatterlist { 4struct scatterlist {
5 struct page *page; 5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
8 unsigned long page_link;
6 unsigned int offset; 9 unsigned int offset;
7 unsigned int length; 10 unsigned int length;
8}; 11};
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h
index 51bd957b85bd..618693cfc10f 100644
--- a/include/asm-s390/tlb.h
+++ b/include/asm-s390/tlb.h
@@ -2,19 +2,130 @@
2#define _S390_TLB_H 2#define _S390_TLB_H
3 3
4/* 4/*
5 * s390 doesn't need any special per-pte or 5 * TLB flushing on s390 is complicated. The following requirement
6 * per-vma handling.. 6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
7 */ 22 */
8#define tlb_start_vma(tlb, vma) do { } while (0) 23
9#define tlb_end_vma(tlb, vma) do { } while (0) 24#include <linux/mm.h>
10#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 25#include <linux/swap.h>
26#include <asm/processor.h>
27#include <asm/pgalloc.h>
28#include <asm/smp.h>
29#include <asm/tlbflush.h>
30
31#ifndef CONFIG_SMP
32#define TLB_NR_PTRS 1
33#else
34#define TLB_NR_PTRS 508
35#endif
36
37struct mmu_gather {
38 struct mm_struct *mm;
39 unsigned int fullmm;
40 unsigned int nr_ptes;
41 unsigned int nr_pmds;
42 void *array[TLB_NR_PTRS];
43};
44
45DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
46
47static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
48 unsigned int full_mm_flush)
49{
50 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
51
52 tlb->mm = mm;
53 tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
54 (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
55 tlb->nr_ptes = 0;
56 tlb->nr_pmds = TLB_NR_PTRS;
57 if (tlb->fullmm)
58 __tlb_flush_mm(mm);
59 return tlb;
60}
61
62static inline void tlb_flush_mmu(struct mmu_gather *tlb,
63 unsigned long start, unsigned long end)
64{
65 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS))
66 __tlb_flush_mm(tlb->mm);
67 while (tlb->nr_ptes > 0)
68 pte_free(tlb->array[--tlb->nr_ptes]);
69 while (tlb->nr_pmds < TLB_NR_PTRS)
70 pmd_free((pmd_t *) tlb->array[tlb->nr_pmds++]);
71}
72
73static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end)
75{
76 tlb_flush_mmu(tlb, start, end);
77
78 /* keep the page table cache within bounds */
79 check_pgt_cache();
80
81 put_cpu_var(mmu_gathers);
82}
11 83
12/* 84/*
13 * .. because we flush the whole mm when it 85 * Release the page cache reference for a pte removed by
14 * fills up. 86 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page
87 * has already been freed, so just do free_page_and_swap_cache.
15 */ 88 */
16#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 89static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
90{
91 free_page_and_swap_cache(page);
92}
17 93
18#include <asm-generic/tlb.h> 94/*
95 * pte_free_tlb frees a pte table and clears the CRSTE for the
96 * page table from the tlb.
97 */
98static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
99{
100 if (!tlb->fullmm) {
101 tlb->array[tlb->nr_ptes++] = page;
102 if (tlb->nr_ptes >= tlb->nr_pmds)
103 tlb_flush_mmu(tlb, 0, 0);
104 } else
105 pte_free(page);
106}
19 107
108/*
109 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
110 * segment table entry from the tlb.
111 */
112static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
113{
114#ifdef __s390x__
115 if (!tlb->fullmm) {
116 tlb->array[--tlb->nr_pmds] = (struct page *) pmd;
117 if (tlb->nr_ptes >= tlb->nr_pmds)
118 tlb_flush_mmu(tlb, 0, 0);
119 } else
120 pmd_free(pmd);
20#endif 121#endif
122}
123
124#define pud_free_tlb(tlb, pud) do { } while (0)
125
126#define tlb_start_vma(tlb, vma) do { } while (0)
127#define tlb_end_vma(tlb, vma) do { } while (0)
128#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
129#define tlb_migrate_finish(mm) do { } while (0)
130
131#endif /* _S390_TLB_H */
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 6de2632a3e4f..a69bd2490d52 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -6,68 +6,19 @@
6#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
7 7
8/* 8/*
9 * TLB flushing: 9 * Flush all tlb entries on the local cpu.
10 *
11 * - flush_tlb() flushes the current mm struct TLBs
12 * - flush_tlb_all() flushes all processes TLBs
13 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
14 * - flush_tlb_page(vma, vmaddr) flushes one page
15 * - flush_tlb_range(vma, start, end) flushes a range of pages
16 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
17 */
18
19/*
20 * S/390 has three ways of flushing TLBs
21 * 'ptlb' does a flush of the local processor
22 * 'csp' flushes the TLBs on all PUs of a SMP
23 * 'ipte' invalidates a pte in a page table and flushes that out of
24 * the TLBs of all PUs of a SMP
25 */
26
27#define local_flush_tlb() \
28do { asm volatile("ptlb": : :"memory"); } while (0)
29
30#ifndef CONFIG_SMP
31
32/*
33 * We always need to flush, since s390 does not flush tlb
34 * on each context switch
35 */ 10 */
36 11static inline void __tlb_flush_local(void)
37static inline void flush_tlb(void)
38{ 12{
39 local_flush_tlb(); 13 asm volatile("ptlb" : : : "memory");
40} 14}
41static inline void flush_tlb_all(void)
42{
43 local_flush_tlb();
44}
45static inline void flush_tlb_mm(struct mm_struct *mm)
46{
47 local_flush_tlb();
48}
49static inline void flush_tlb_page(struct vm_area_struct *vma,
50 unsigned long addr)
51{
52 local_flush_tlb();
53}
54static inline void flush_tlb_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end)
56{
57 local_flush_tlb();
58}
59
60#define flush_tlb_kernel_range(start, end) \
61 local_flush_tlb();
62
63#else
64 15
65#include <asm/smp.h> 16/*
66 17 * Flush all tlb entries on all cpus.
67extern void smp_ptlb_all(void); 18 */
68 19static inline void __tlb_flush_global(void)
69static inline void global_flush_tlb(void)
70{ 20{
21 extern void smp_ptlb_all(void);
71 register unsigned long reg2 asm("2"); 22 register unsigned long reg2 asm("2");
72 register unsigned long reg3 asm("3"); 23 register unsigned long reg3 asm("3");
73 register unsigned long reg4 asm("4"); 24 register unsigned long reg4 asm("4");
@@ -89,66 +40,75 @@ static inline void global_flush_tlb(void)
89} 40}
90 41
91/* 42/*
92 * We only have to do global flush of tlb if process run since last 43 * Flush all tlb entries of a page table on all cpus.
93 * flush on any other pu than current.
94 * If we have threads (mm->count > 1) we always do a global flush,
95 * since the process runs on more than one processor at the same time.
96 */ 44 */
45static inline void __tlb_flush_idte(pgd_t *pgd)
46{
47 asm volatile(
48 " .insn rrf,0xb98e0000,0,%0,%1,0"
49 : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" );
50}
97 51
98static inline void __flush_tlb_mm(struct mm_struct * mm) 52static inline void __tlb_flush_mm(struct mm_struct * mm)
99{ 53{
100 cpumask_t local_cpumask; 54 cpumask_t local_cpumask;
101 55
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 56 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 57 return;
58 /*
59 * If the machine has IDTE we prefer to do a per mm flush
60 * on all cpus instead of doing a local flush if the mm
61 * only ran on the local cpu.
62 */
104 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
105 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); 64 pgd_t *shadow_pgd = get_shadow_table(mm->pgd);
106 65
107 if (shadow_pgd) { 66 if (shadow_pgd)
108 asm volatile( 67 __tlb_flush_idte(shadow_pgd);
109 " .insn rrf,0xb98e0000,0,%0,%1,0" 68 __tlb_flush_idte(mm->pgd);
110 : : "a" (2048),
111 "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
112 }
113 asm volatile(
114 " .insn rrf,0xb98e0000,0,%0,%1,0"
115 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
116 return; 69 return;
117 } 70 }
118 preempt_disable(); 71 preempt_disable();
72 /*
73 * If the process only ran on the local cpu, do a local flush.
74 */
119 local_cpumask = cpumask_of_cpu(smp_processor_id()); 75 local_cpumask = cpumask_of_cpu(smp_processor_id());
120 if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) 76 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
121 local_flush_tlb(); 77 __tlb_flush_local();
122 else 78 else
123 global_flush_tlb(); 79 __tlb_flush_global();
124 preempt_enable(); 80 preempt_enable();
125} 81}
126 82
127static inline void flush_tlb(void) 83static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
128{
129 __flush_tlb_mm(current->mm);
130}
131static inline void flush_tlb_all(void)
132{
133 global_flush_tlb();
134}
135static inline void flush_tlb_mm(struct mm_struct *mm)
136{
137 __flush_tlb_mm(mm);
138}
139static inline void flush_tlb_page(struct vm_area_struct *vma,
140 unsigned long addr)
141{
142 __flush_tlb_mm(vma->vm_mm);
143}
144static inline void flush_tlb_range(struct vm_area_struct *vma,
145 unsigned long start, unsigned long end)
146{ 84{
147 __flush_tlb_mm(vma->vm_mm); 85 if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
86 __tlb_flush_mm(mm);
148} 87}
149 88
150#define flush_tlb_kernel_range(start, end) global_flush_tlb() 89/*
90 * TLB flushing:
91 * flush_tlb() - flushes the current mm struct TLBs
92 * flush_tlb_all() - flushes all processes TLBs
93 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
94 * flush_tlb_page(vma, vmaddr) - flushes one page
95 * flush_tlb_range(vma, start, end) - flushes a range of pages
96 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
97 */
151 98
152#endif 99/*
100 * flush_tlb_mm goes together with ptep_set_wrprotect for the
101 * copy_page_range operation and flush_tlb_range is related to
102 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
103 * ptep_get_and_clear do not flush the TLBs directly if the mm has
104 * only one user. At the end of the update the flush_tlb_mm and
105 * flush_tlb_range functions need to do the flush.
106 */
107#define flush_tlb() do { } while (0)
108#define flush_tlb_all() do { } while (0)
109#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm)
110#define flush_tlb_page(vma, addr) do { } while (0)
111#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm)
112#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm)
153 113
154#endif /* _S390_TLBFLUSH_H */ 114#endif /* _S390_TLBFLUSH_H */
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 84fefdaa01a5..fcea067f7a9c 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -2,7 +2,7 @@
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <asm/cacheflush.h> 6#include <asm/cacheflush.h>
7#include <asm/io.h> 7#include <asm/io.h>
8 8
@@ -85,10 +85,9 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
85 85
86 for (i = 0; i < nents; i++) { 86 for (i = 0; i < nents; i++) {
87#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 87#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
88 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 88 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
89 sg[i].length, dir);
90#endif 89#endif
91 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 90 sg[i].dma_address = sg_phys(&sg[i]);
92 } 91 }
93 92
94 return nents; 93 return nents;
@@ -138,10 +137,9 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
138 137
139 for (i = 0; i < nelems; i++) { 138 for (i = 0; i < nelems; i++) {
140#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 139#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
141 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 140 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
142 sg[i].length, dir);
143#endif 141#endif
144 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 142 sg[i].dma_address = sg_phys(&sg[i]);
145 } 143 }
146} 144}
147 145
diff --git a/include/asm-sh/scatterlist.h b/include/asm-sh/scatterlist.h
index b9ae53c38365..a7d0d1856a99 100644
--- a/include/asm-sh/scatterlist.h
+++ b/include/asm-sh/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page * page; /* Location for highmem page, if any */ 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset;/* for highmem, page offset */ 11 unsigned int offset;/* for highmem, page offset */
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index e661857f98dc..1438b763a5ea 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -2,7 +2,7 @@
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <asm/io.h> 6#include <asm/io.h>
7 7
8struct pci_dev; 8struct pci_dev;
@@ -71,10 +71,9 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
71 71
72 for (i = 0; i < nents; i++) { 72 for (i = 0; i < nents; i++) {
73#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 73#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
74 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 74 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
75 sg[i].length, dir);
76#endif 75#endif
77 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 76 sg[i].dma_address = sg_phys(&sg[i]);
78 } 77 }
79 78
80 return nents; 79 return nents;
@@ -124,10 +123,9 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
124 123
125 for (i = 0; i < nelems; i++) { 124 for (i = 0; i < nelems; i++) {
126#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 125#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
127 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 126 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
128 sg[i].length, dir);
129#endif 127#endif
130 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 128 sg[i].dma_address = sg_phys(&sg[i]);
131 } 129 }
132} 130}
133 131
diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h
index 1c723f2d7a95..5109251970e7 100644
--- a/include/asm-sh64/scatterlist.h
+++ b/include/asm-sh64/scatterlist.h
@@ -14,7 +14,10 @@
14#include <asm/types.h> 14#include <asm/types.h>
15 15
16struct scatterlist { 16struct scatterlist {
17 struct page * page; /* Location for highmem page, if any */ 17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
18 unsigned int offset;/* for highmem, page offset */ 21 unsigned int offset;/* for highmem, page offset */
19 dma_addr_t dma_address; 22 dma_addr_t dma_address;
20 unsigned int length; 23 unsigned int length;
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index 4055af90ad7e..e08d3d775b08 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -5,7 +5,10 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 13
11 unsigned int length; 14 unsigned int length;
diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h
index 703c5bbe6c8c..6df23f070b1a 100644
--- a/include/asm-sparc64/scatterlist.h
+++ b/include/asm-sparc64/scatterlist.h
@@ -6,7 +6,10 @@
6#include <asm/types.h> 6#include <asm/types.h>
7 7
8struct scatterlist { 8struct scatterlist {
9 struct page *page; 9#ifdef CONFIG_DEBUG_SG
10 unsigned long sg_magic;
11#endif
12 unsigned long page_link;
10 unsigned int offset; 13 unsigned int offset;
11 14
12 unsigned int length; 15 unsigned int length;
diff --git a/include/asm-v850/scatterlist.h b/include/asm-v850/scatterlist.h
index 56f402920db9..02d27b3fb061 100644
--- a/include/asm-v850/scatterlist.h
+++ b/include/asm-v850/scatterlist.h
@@ -17,7 +17,10 @@
17#include <asm/types.h> 17#include <asm/types.h>
18 18
19struct scatterlist { 19struct scatterlist {
20 struct page *page; 20#ifdef CONFIG_DEBUG_SG
21 unsigned long sg_magic;
22#endif
23 unsigned long page_link;
21 unsigned offset; 24 unsigned offset;
22 dma_addr_t dma_address; 25 dma_addr_t dma_address;
23 unsigned length; 26 unsigned length;
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index ef67b59dbdb9..dc031cf44633 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -28,8 +28,9 @@ struct setup_header {
28 u16 kernel_version; 28 u16 kernel_version;
29 u8 type_of_loader; 29 u8 type_of_loader;
30 u8 loadflags; 30 u8 loadflags;
31#define LOADED_HIGH 0x01 31#define LOADED_HIGH (1<<0)
32#define CAN_USE_HEAP 0x80 32#define KEEP_SEGMENTS (1<<6)
33#define CAN_USE_HEAP (1<<7)
33 u16 setup_move_size; 34 u16 setup_move_size;
34 u32 code32_start; 35 u32 code32_start;
35 u32 ramdisk_image; 36 u32 ramdisk_image;
@@ -41,6 +42,10 @@ struct setup_header {
41 u32 initrd_addr_max; 42 u32 initrd_addr_max;
42 u32 kernel_alignment; 43 u32 kernel_alignment;
43 u8 relocatable_kernel; 44 u8 relocatable_kernel;
45 u8 _pad2[3];
46 u32 cmdline_size;
47 u32 hardware_subarch;
48 u64 hardware_subarch_data;
44} __attribute__((packed)); 49} __attribute__((packed));
45 50
46struct sys_desc_table { 51struct sys_desc_table {
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index b3d43de44c59..9411a2d3f19c 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -27,6 +27,7 @@
27void global_flush_tlb(void); 27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot); 28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); 29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
30void clflush_cache_range(void *addr, int size);
30 31
31#ifdef CONFIG_DEBUG_PAGEALLOC 32#ifdef CONFIG_DEBUG_PAGEALLOC
32/* internal debugging function */ 33/* internal debugging function */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index d9ee5e52e91b..87a715367a1b 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_DMAR
9 void *iommu; /* hook for IOMMU specific extension */
10#endif
8}; 11};
9 12
10#endif /* _ASM_X86_DEVICE_H */ 13#endif /* _ASM_X86_DEVICE_H */
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
index 6a2d26cb5da6..55f01bd9e556 100644
--- a/include/asm-x86/dma-mapping_32.h
+++ b/include/asm-x86/dma-mapping_32.h
@@ -45,9 +45,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
45 WARN_ON(nents == 0 || sglist[0].length == 0); 45 WARN_ON(nents == 0 || sglist[0].length == 0);
46 46
47 for_each_sg(sglist, sg, nents, i) { 47 for_each_sg(sglist, sg, nents, i) {
48 BUG_ON(!sg->page); 48 BUG_ON(!sg_page(sg));
49 49
50 sg->dma_address = page_to_phys(sg->page) + sg->offset; 50 sg->dma_address = sg_phys(sg);
51 } 51 }
52 52
53 flush_write_buffers(); 53 flush_write_buffers();
diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h
index bd5164aa8f63..0e7d997a34be 100644
--- a/include/asm-x86/scatterlist_32.h
+++ b/include/asm-x86/scatterlist_32.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h
index ef3986ba4b79..1847c72befeb 100644
--- a/include/asm-x86/scatterlist_64.h
+++ b/include/asm-x86/scatterlist_64.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 unsigned int length; 12 unsigned int length;
10 dma_addr_t dma_address; 13 dma_addr_t dma_address;
diff --git a/include/asm-xtensa/scatterlist.h b/include/asm-xtensa/scatterlist.h
index ca337a294290..810080bb0a2b 100644
--- a/include/asm-xtensa/scatterlist.h
+++ b/include/asm-xtensa/scatterlist.h
@@ -14,7 +14,10 @@
14#include <asm/types.h> 14#include <asm/types.h>
15 15
16struct scatterlist { 16struct scatterlist {
17 struct page *page; 17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
18 unsigned int offset; 21 unsigned int offset;
19 dma_addr_t dma_address; 22 dma_addr_t dma_address;
20 unsigned int length; 23 unsigned int length;
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 7a8d7ade28a0..bb017edffd56 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -56,10 +56,8 @@ typedef struct __user_cap_data_struct {
56 56
57struct vfs_cap_data { 57struct vfs_cap_data {
58 __u32 magic_etc; /* Little endian */ 58 __u32 magic_etc; /* Little endian */
59 struct { 59 __u32 permitted; /* Little endian */
60 __u32 permitted; /* Little endian */ 60 __u32 inheritable; /* Little endian */
61 __u32 inheritable; /* Little endian */
62 } data[1];
63}; 61};
64 62
65#ifdef __KERNEL__ 63#ifdef __KERNEL__
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
new file mode 100644
index 000000000000..ffb6439cb5e6
--- /dev/null
+++ b/include/linux/dmar.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
19 */
20
21#ifndef __DMAR_H__
22#define __DMAR_H__
23
24#include <linux/acpi.h>
25#include <linux/types.h>
26#include <linux/msi.h>
27
28#ifdef CONFIG_DMAR
29struct intel_iommu;
30
31extern char *dmar_get_fault_reason(u8 fault_reason);
32
33/* Can't use the common MSI interrupt functions
34 * since DMAR is not a pci device
35 */
36extern void dmar_msi_unmask(unsigned int irq);
37extern void dmar_msi_mask(unsigned int irq);
38extern void dmar_msi_read(int irq, struct msi_msg *msg);
39extern void dmar_msi_write(int irq, struct msi_msg *msg);
40extern int dmar_set_interrupt(struct intel_iommu *iommu);
41extern int arch_setup_dmar_msi(unsigned int irq);
42
43/* Intel IOMMU detection and initialization functions */
44extern void detect_intel_iommu(void);
45extern int intel_iommu_init(void);
46
47extern int dmar_table_init(void);
48extern int early_dmar_detect(void);
49
50extern struct list_head dmar_drhd_units;
51extern struct list_head dmar_rmrr_units;
52
53struct dmar_drhd_unit {
54 struct list_head list; /* list of drhd units */
55 u64 reg_base_addr; /* register base address*/
56 struct pci_dev **devices; /* target device array */
57 int devices_cnt; /* target device count */
58 u8 ignored:1; /* ignore drhd */
59 u8 include_all:1;
60 struct intel_iommu *iommu;
61};
62
63struct dmar_rmrr_unit {
64 struct list_head list; /* list of rmrr units */
65 u64 base_address; /* reserved base address*/
66 u64 end_address; /* reserved end address */
67 struct pci_dev **devices; /* target devices */
68 int devices_cnt; /* target device count */
69};
70
71#define for_each_drhd_unit(drhd) \
72 list_for_each_entry(drhd, &dmar_drhd_units, list)
73#define for_each_rmrr_units(rmrr) \
74 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
75#else
76static inline void detect_intel_iommu(void)
77{
78 return;
79}
80static inline int intel_iommu_init(void)
81{
82 return -ENODEV;
83}
84
85#endif /* !CONFIG_DMAR */
86#endif /* __DMAR_H__ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0b9579a4cd42..14813b595802 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -298,7 +298,7 @@ extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
298 u64 attr); 298 u64 attr);
299extern int __init efi_uart_console_only (void); 299extern int __init efi_uart_console_only (void);
300extern void efi_initialize_iomem_resources(struct resource *code_resource, 300extern void efi_initialize_iomem_resources(struct resource *code_resource,
301 struct resource *data_resource); 301 struct resource *data_resource, struct resource *bss_resource);
302extern unsigned long efi_get_time(void); 302extern unsigned long efi_get_time(void);
303extern int efi_set_rtc_mmss(unsigned long nowtime); 303extern int efi_set_rtc_mmss(unsigned long nowtime);
304extern int is_available_memory(efi_memory_desc_t * md); 304extern int is_available_memory(efi_memory_desc_t * md);
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index 16cb25cbf7c5..dd57fe523e97 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -35,6 +35,7 @@ static inline struct efs_sb_info *SUPER_INFO(struct super_block *sb)
35} 35}
36 36
37struct statfs; 37struct statfs;
38struct fid;
38 39
39extern const struct inode_operations efs_dir_inode_operations; 40extern const struct inode_operations efs_dir_inode_operations;
40extern const struct file_operations efs_dir_operations; 41extern const struct file_operations efs_dir_operations;
@@ -45,7 +46,10 @@ extern efs_block_t efs_map_block(struct inode *, efs_block_t);
45extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); 46extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
46 47
47extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *); 48extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *);
48extern struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp); 49extern struct dentry *efs_fh_to_dentry(struct super_block *sb, struct fid *fid,
50 int fh_len, int fh_type);
51extern struct dentry *efs_fh_to_parent(struct super_block *sb, struct fid *fid,
52 int fh_len, int fh_type);
49extern struct dentry *efs_get_parent(struct dentry *); 53extern struct dentry *efs_get_parent(struct dentry *);
50extern int efs_bmap(struct inode *, int); 54extern int efs_bmap(struct inode *, int);
51 55
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 8872fe8392d6..51d214138814 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -4,9 +4,48 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct dentry; 6struct dentry;
7struct inode;
7struct super_block; 8struct super_block;
8struct vfsmount; 9struct vfsmount;
9 10
11/*
12 * The fileid_type identifies how the file within the filesystem is encoded.
13 * In theory this is freely set and parsed by the filesystem, but we try to
14 * stick to conventions so we can share some generic code and don't confuse
15 * sniffers like ethereal/wireshark.
16 *
17 * The filesystem must not use the value '0' or '0xff'.
18 */
19enum fid_type {
20 /*
21 * The root, or export point, of the filesystem.
22 * (Never actually passed down to the filesystem.
23 */
24 FILEID_ROOT = 0,
25
26 /*
27 * 32bit inode number, 32 bit generation number.
28 */
29 FILEID_INO32_GEN = 1,
30
31 /*
32 * 32bit inode number, 32 bit generation number,
33 * 32 bit parent directory inode number.
34 */
35 FILEID_INO32_GEN_PARENT = 2,
36};
37
38struct fid {
39 union {
40 struct {
41 u32 ino;
42 u32 gen;
43 u32 parent_ino;
44 u32 parent_gen;
45 } i32;
46 __u32 raw[6];
47 };
48};
10 49
11/** 50/**
12 * struct export_operations - for nfsd to communicate with file systems 51 * struct export_operations - for nfsd to communicate with file systems
@@ -15,43 +54,9 @@ struct vfsmount;
15 * @get_name: find the name for a given inode in a given directory 54 * @get_name: find the name for a given inode in a given directory
16 * @get_parent: find the parent of a given directory 55 * @get_parent: find the parent of a given directory
17 * @get_dentry: find a dentry for the inode given a file handle sub-fragment 56 * @get_dentry: find a dentry for the inode given a file handle sub-fragment
18 * @find_exported_dentry:
19 * set by the exporting module to a standard helper function.
20 *
21 * Description:
22 * The export_operations structure provides a means for nfsd to communicate
23 * with a particular exported file system - particularly enabling nfsd and
24 * the filesystem to co-operate when dealing with file handles.
25 *
26 * export_operations contains two basic operation for dealing with file
27 * handles, decode_fh() and encode_fh(), and allows for some other
28 * operations to be defined which standard helper routines use to get
29 * specific information from the filesystem.
30 *
31 * nfsd encodes information use to determine which filesystem a filehandle
32 * applies to in the initial part of the file handle. The remainder, termed
33 * a file handle fragment, is controlled completely by the filesystem. The
34 * standard helper routines assume that this fragment will contain one or
35 * two sub-fragments, one which identifies the file, and one which may be
36 * used to identify the (a) directory containing the file.
37 * 57 *
38 * In some situations, nfsd needs to get a dentry which is connected into a 58 * See Documentation/filesystems/Exporting for details on how to use
39 * specific part of the file tree. To allow for this, it passes the 59 * this interface correctly.
40 * function acceptable() together with a @context which can be used to see
41 * if the dentry is acceptable. As there can be multiple dentrys for a
42 * given file, the filesystem should check each one for acceptability before
43 * looking for the next. As soon as an acceptable one is found, it should
44 * be returned.
45 *
46 * decode_fh:
47 * @decode_fh is given a &struct super_block (@sb), a file handle fragment
48 * (@fh, @fh_len) and an acceptability testing function (@acceptable,
49 * @context). It should return a &struct dentry which refers to the same
50 * file that the file handle fragment refers to, and which passes the
51 * acceptability test. If it cannot, it should return a %NULL pointer if
52 * the file was found but no acceptable &dentries were available, or a
53 * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
54 * %ENOMEM).
55 * 60 *
56 * encode_fh: 61 * encode_fh:
57 * @encode_fh should store in the file handle fragment @fh (using at most 62 * @encode_fh should store in the file handle fragment @fh (using at most
@@ -63,6 +68,21 @@ struct vfsmount;
63 * the filehandle fragment. encode_fh() should return the number of bytes 68 * the filehandle fragment. encode_fh() should return the number of bytes
64 * stored or a negative error code such as %-ENOSPC 69 * stored or a negative error code such as %-ENOSPC
65 * 70 *
71 * fh_to_dentry:
72 * @fh_to_dentry is given a &struct super_block (@sb) and a file handle
73 * fragment (@fh, @fh_len). It should return a &struct dentry which refers
74 * to the same file that the file handle fragment refers to. If it cannot,
75 * it should return a %NULL pointer if the file was found but no acceptable
76 * &dentries were available, or an %ERR_PTR error code indicating why it
77 * couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be
78 * returned including, if necessary, a new dentry created with d_alloc_root.
79 * The caller can then find any other extant dentries by following the
80 * d_alias links.
81 *
82 * fh_to_parent:
83 * Same as @fh_to_dentry, except that it returns a pointer to the parent
84 * dentry if it was encoded into the filehandle fragment by @encode_fh.
85 *
66 * get_name: 86 * get_name:
67 * @get_name should find a name for the given @child in the given @parent 87 * @get_name should find a name for the given @child in the given @parent
68 * directory. The name should be stored in the @name (with the 88 * directory. The name should be stored in the @name (with the
@@ -75,52 +95,37 @@ struct vfsmount;
75 * is also a directory. In the event that it cannot be found, or storage 95 * is also a directory. In the event that it cannot be found, or storage
76 * space cannot be allocated, a %ERR_PTR should be returned. 96 * space cannot be allocated, a %ERR_PTR should be returned.
77 * 97 *
78 * get_dentry:
79 * Given a &super_block (@sb) and a pointer to a file-system specific inode
80 * identifier, possibly an inode number, (@inump) get_dentry() should find
81 * the identified inode and return a dentry for that inode. Any suitable
82 * dentry can be returned including, if necessary, a new dentry created with
83 * d_alloc_root. The caller can then find any other extant dentrys by
84 * following the d_alias links. If a new dentry was created using
85 * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
86 * should be d_rehash()ed.
87 *
88 * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
89 * can be returned. The @inump will be whatever was passed to
90 * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
91 *
92 * Locking rules: 98 * Locking rules:
93 * get_parent is called with child->d_inode->i_mutex down 99 * get_parent is called with child->d_inode->i_mutex down
94 * get_name is not (which is possibly inconsistent) 100 * get_name is not (which is possibly inconsistent)
95 */ 101 */
96 102
97struct export_operations { 103struct export_operations {
98 struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh,
99 int fh_len, int fh_type,
100 int (*acceptable)(void *context, struct dentry *de),
101 void *context);
102 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, 104 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
103 int connectable); 105 int connectable);
106 struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
107 int fh_len, int fh_type);
108 struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
109 int fh_len, int fh_type);
104 int (*get_name)(struct dentry *parent, char *name, 110 int (*get_name)(struct dentry *parent, char *name,
105 struct dentry *child); 111 struct dentry *child);
106 struct dentry * (*get_parent)(struct dentry *child); 112 struct dentry * (*get_parent)(struct dentry *child);
107 struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
108
109 /* This is set by the exporting module to a standard helper */
110 struct dentry * (*find_exported_dentry)(
111 struct super_block *sb, void *obj, void *parent,
112 int (*acceptable)(void *context, struct dentry *de),
113 void *context);
114}; 113};
115 114
116extern struct dentry *find_exported_dentry(struct super_block *sb, void *obj, 115extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
117 void *parent, int (*acceptable)(void *context, struct dentry *de), 116 int *max_len, int connectable);
118 void *context); 117extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
119
120extern int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
121 int connectable);
122extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh,
123 int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), 118 int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
124 void *context); 119 void *context);
125 120
121/*
122 * Generic helpers for filesystems.
123 */
124extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
125 struct fid *fid, int fh_len, int fh_type,
126 struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
127extern struct dentry *generic_fh_to_parent(struct super_block *sb,
128 struct fid *fid, int fh_len, int fh_type,
129 struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
130
126#endif /* LINUX_EXPORTFS_H */ 131#endif /* LINUX_EXPORTFS_H */
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index c77c3bbfe4bb..0f6c86c634fd 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -561,6 +561,7 @@ enum {
561#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1) 561#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
562#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \ 562#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
563 ~EXT2_DIR_ROUND) 563 ~EXT2_DIR_ROUND)
564#define EXT2_MAX_REC_LEN ((1<<16)-1)
564 565
565static inline ext2_fsblk_t 566static inline ext2_fsblk_t
566ext2_group_first_block_no(struct super_block *sb, unsigned long group_no) 567ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 50078bb30a1c..b3ec4a496d64 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -987,7 +987,7 @@ struct super_block {
987 const struct super_operations *s_op; 987 const struct super_operations *s_op;
988 struct dquot_operations *dq_op; 988 struct dquot_operations *dq_op;
989 struct quotactl_ops *s_qcop; 989 struct quotactl_ops *s_qcop;
990 struct export_operations *s_export_op; 990 const struct export_operations *s_export_op;
991 unsigned long s_flags; 991 unsigned long s_flags;
992 unsigned long s_magic; 992 unsigned long s_magic;
993 struct dentry *s_root; 993 struct dentry *s_root;
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
new file mode 100644
index 000000000000..7907a72403ee
--- /dev/null
+++ b/include/linux/i8042.h
@@ -0,0 +1,35 @@
1#ifndef _LINUX_I8042_H
2#define _LINUX_I8042_H
3
4/*
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10
11/*
12 * Standard commands.
13 */
14
15#define I8042_CMD_CTL_RCTR 0x0120
16#define I8042_CMD_CTL_WCTR 0x1060
17#define I8042_CMD_CTL_TEST 0x01aa
18
19#define I8042_CMD_KBD_DISABLE 0x00ad
20#define I8042_CMD_KBD_ENABLE 0x00ae
21#define I8042_CMD_KBD_TEST 0x01ab
22#define I8042_CMD_KBD_LOOP 0x11d2
23
24#define I8042_CMD_AUX_DISABLE 0x00a7
25#define I8042_CMD_AUX_ENABLE 0x00a8
26#define I8042_CMD_AUX_TEST 0x01a9
27#define I8042_CMD_AUX_SEND 0x10d4
28#define I8042_CMD_AUX_LOOP 0x11d3
29
30#define I8042_CMD_MUX_PFX 0x0090
31#define I8042_CMD_MUX_SEND 0x1090
32
33int i8042_command(unsigned char *param, int command);
34
35#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 2e4b8dd03cfe..4ed4777bba67 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -667,7 +667,7 @@ typedef struct hwif_s {
667 u8 straight8; /* Alan's straight 8 check */ 667 u8 straight8; /* Alan's straight 8 check */
668 u8 bus_state; /* power state of the IDE bus */ 668 u8 bus_state; /* power state of the IDE bus */
669 669
670 u16 host_flags; 670 u32 host_flags;
671 671
672 u8 pio_mask; 672 u8 pio_mask;
673 673
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 6c9873f88287..ff203dd02919 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -34,6 +34,12 @@
34 name: 34 name:
35#endif 35#endif
36 36
37#ifndef WEAK
38#define WEAK(name) \
39 .weak name; \
40 name:
41#endif
42
37#define KPROBE_ENTRY(name) \ 43#define KPROBE_ENTRY(name) \
38 .pushsection .kprobes.text, "ax"; \ 44 .pushsection .kprobes.text, "ax"; \
39 ENTRY(name) 45 ENTRY(name)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 654ef5544878..33f0ff0cf634 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -41,18 +41,15 @@ struct memory_block {
41#define MEM_ONLINE (1<<0) /* exposed to userspace */ 41#define MEM_ONLINE (1<<0) /* exposed to userspace */
42#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ 42#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
43#define MEM_OFFLINE (1<<2) /* exposed to userspace */ 43#define MEM_OFFLINE (1<<2) /* exposed to userspace */
44#define MEM_GOING_ONLINE (1<<3)
45#define MEM_CANCEL_ONLINE (1<<4)
46#define MEM_CANCEL_OFFLINE (1<<5)
44 47
45/* 48struct memory_notify {
46 * All of these states are currently kernel-internal for notifying 49 unsigned long start_pfn;
47 * kernel components and architectures. 50 unsigned long nr_pages;
48 * 51 int status_change_nid;
49 * For MEM_MAPPING_INVALID, all notifier chains with priority >0 52};
50 * are called before pfn_to_page() becomes invalid. The priority=0
51 * entry is reserved for the function that actually makes
52 * pfn_to_page() stop working. Any notifiers that want to be called
53 * after that should have priority <0.
54 */
55#define MEM_MAPPING_INVALID (1<<3)
56 53
57struct notifier_block; 54struct notifier_block;
58struct mem_section; 55struct mem_section;
@@ -69,21 +66,31 @@ static inline int register_memory_notifier(struct notifier_block *nb)
69static inline void unregister_memory_notifier(struct notifier_block *nb) 66static inline void unregister_memory_notifier(struct notifier_block *nb)
70{ 67{
71} 68}
69static inline int memory_notify(unsigned long val, void *v)
70{
71 return 0;
72}
72#else 73#else
74extern int register_memory_notifier(struct notifier_block *nb);
75extern void unregister_memory_notifier(struct notifier_block *nb);
73extern int register_new_memory(struct mem_section *); 76extern int register_new_memory(struct mem_section *);
74extern int unregister_memory_section(struct mem_section *); 77extern int unregister_memory_section(struct mem_section *);
75extern int memory_dev_init(void); 78extern int memory_dev_init(void);
76extern int remove_memory_block(unsigned long, struct mem_section *, int); 79extern int remove_memory_block(unsigned long, struct mem_section *, int);
77 80extern int memory_notify(unsigned long val, void *v);
78#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) 81#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
79 82
80 83
81#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 84#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
82 85
86#ifdef CONFIG_MEMORY_HOTPLUG
83#define hotplug_memory_notifier(fn, pri) { \ 87#define hotplug_memory_notifier(fn, pri) { \
84 static struct notifier_block fn##_mem_nb = \ 88 static struct notifier_block fn##_mem_nb = \
85 { .notifier_call = fn, .priority = pri }; \ 89 { .notifier_call = fn, .priority = pri }; \
86 register_memory_notifier(&fn##_mem_nb); \ 90 register_memory_notifier(&fn##_mem_nb); \
87} 91}
92#else
93#define hotplug_memory_notifier(fn, pri) do { } while (0)
94#endif
88 95
89#endif /* _LINUX_MEMORY_H_ */ 96#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/net.h b/include/linux/net.h
index c136abce7ef6..dd79cdb8c4cf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,6 +313,10 @@ static const struct proto_ops name##_ops = { \
313#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \ 313#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \
314 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto)) 314 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto))
315 315
316#define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \
317 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
318 "-type-" __stringify(type))
319
316#ifdef CONFIG_SYSCTL 320#ifdef CONFIG_SYSCTL
317#include <linux/sysctl.h> 321#include <linux/sysctl.h>
318extern ctl_table net_table[]; 322extern ctl_table net_table[];
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6f85db3535e2..4a3f54e358e5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
996 * 996 *
997 * Check individual transmit queue of a device with multiple transmit queues. 997 * Check individual transmit queue of a device with multiple transmit queues.
998 */ 998 */
999static inline int netif_subqueue_stopped(const struct net_device *dev, 999static inline int __netif_subqueue_stopped(const struct net_device *dev,
1000 u16 queue_index) 1000 u16 queue_index)
1001{ 1001{
1002#ifdef CONFIG_NETDEVICES_MULTIQUEUE 1002#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
1007#endif 1007#endif
1008} 1008}
1009 1009
1010static inline int netif_subqueue_stopped(const struct net_device *dev,
1011 struct sk_buff *skb)
1012{
1013 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1014}
1010 1015
1011/** 1016/**
1012 * netif_wake_subqueue - allow sending packets on subqueue 1017 * netif_wake_subqueue - allow sending packets on subqueue
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 768b93359f90..5d2281f661f7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -141,6 +141,7 @@ struct pci_dev {
141 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 141 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
142 u8 revision; /* PCI revision, low byte of class word */ 142 u8 revision; /* PCI revision, low byte of class word */
143 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 143 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
144 u8 pcie_type; /* PCI-E device/port type */
144 u8 rom_base_reg; /* which config register controls the ROM */ 145 u8 rom_base_reg; /* which config register controls the ROM */
145 u8 pin; /* which interrupt pin this device uses */ 146 u8 pin; /* which interrupt pin this device uses */
146 147
@@ -183,6 +184,7 @@ struct pci_dev {
183 unsigned int msi_enabled:1; 184 unsigned int msi_enabled:1;
184 unsigned int msix_enabled:1; 185 unsigned int msix_enabled:1;
185 unsigned int is_managed:1; 186 unsigned int is_managed:1;
187 unsigned int is_pcie:1;
186 atomic_t enable_cnt; /* pci_enable_device has been called */ 188 atomic_t enable_cnt; /* pci_enable_device has been called */
187 189
188 u32 saved_config_space[16]; /* config space saved at suspend time */ 190 u32 saved_config_space[16]; /* config space saved at suspend time */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index df948b44edad..4e10a074ca56 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1943,6 +1943,7 @@
1943#define PCI_DEVICE_ID_TIGON3_5720 0x1658 1943#define PCI_DEVICE_ID_TIGON3_5720 0x1658
1944#define PCI_DEVICE_ID_TIGON3_5721 0x1659 1944#define PCI_DEVICE_ID_TIGON3_5721 0x1659
1945#define PCI_DEVICE_ID_TIGON3_5722 0x165a 1945#define PCI_DEVICE_ID_TIGON3_5722 0x165a
1946#define PCI_DEVICE_ID_TIGON3_5723 0x165b
1946#define PCI_DEVICE_ID_TIGON3_5705M 0x165d 1947#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
1947#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e 1948#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
1948#define PCI_DEVICE_ID_TIGON3_5714 0x1668 1949#define PCI_DEVICE_ID_TIGON3_5714 0x1668
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 72bfccd3da22..422eab4958a6 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -28,6 +28,8 @@
28#include <linux/reiserfs_fs_sb.h> 28#include <linux/reiserfs_fs_sb.h>
29#endif 29#endif
30 30
31struct fid;
32
31/* 33/*
32 * include/linux/reiser_fs.h 34 * include/linux/reiser_fs.h
33 * 35 *
@@ -1877,12 +1879,10 @@ void reiserfs_delete_inode(struct inode *inode);
1877int reiserfs_write_inode(struct inode *inode, int); 1879int reiserfs_write_inode(struct inode *inode, int);
1878int reiserfs_get_block(struct inode *inode, sector_t block, 1880int reiserfs_get_block(struct inode *inode, sector_t block,
1879 struct buffer_head *bh_result, int create); 1881 struct buffer_head *bh_result, int create);
1880struct dentry *reiserfs_get_dentry(struct super_block *, void *); 1882struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1881struct dentry *reiserfs_decode_fh(struct super_block *sb, __u32 * data, 1883 int fh_len, int fh_type);
1882 int len, int fhtype, 1884struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1883 int (*acceptable) (void *contect, 1885 int fh_len, int fh_type);
1884 struct dentry * de),
1885 void *context);
1886int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, 1886int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
1887 int connectable); 1887 int connectable);
1888 1888
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 2dc7464cce52..42daf5e15265 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -4,47 +4,95 @@
4#include <asm/scatterlist.h> 4#include <asm/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <asm/io.h>
7 8
9/*
10 * Notes on SG table design.
11 *
12 * Architectures must provide an unsigned long page_link field in the
13 * scatterlist struct. We use that to place the page pointer AND encode
14 * information about the sg table as well. The two lower bits are reserved
15 * for this information.
16 *
17 * If bit 0 is set, then the page_link contains a pointer to the next sg
18 * table list. Otherwise the next entry is at sg + 1.
19 *
20 * If bit 1 is set, then this sg entry is the last element in a list.
21 *
22 * See sg_next().
23 *
24 */
25
26#define SG_MAGIC 0x87654321
27
28/**
29 * sg_set_page - Set sg entry to point at given page
30 * @sg: SG entry
31 * @page: The page
32 *
33 * Description:
34 * Use this function to set an sg entry pointing at a page, never assign
35 * the page directly. We encode sg table information in the lower bits
36 * of the page pointer. See sg_page() for looking up the page belonging
37 * to an sg entry.
38 *
39 **/
40static inline void sg_set_page(struct scatterlist *sg, struct page *page)
41{
42 unsigned long page_link = sg->page_link & 0x3;
43
44#ifdef CONFIG_DEBUG_SG
45 BUG_ON(sg->sg_magic != SG_MAGIC);
46#endif
47 sg->page_link = page_link | (unsigned long) page;
48}
49
50#define sg_page(sg) ((struct page *) ((sg)->page_link & ~0x3))
51
52/**
53 * sg_set_buf - Set sg entry to point at given data
54 * @sg: SG entry
55 * @buf: Data
56 * @buflen: Data length
57 *
58 **/
8static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 59static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
9 unsigned int buflen) 60 unsigned int buflen)
10{ 61{
11 sg->page = virt_to_page(buf); 62 sg_set_page(sg, virt_to_page(buf));
12 sg->offset = offset_in_page(buf); 63 sg->offset = offset_in_page(buf);
13 sg->length = buflen; 64 sg->length = buflen;
14} 65}
15 66
16static inline void sg_init_one(struct scatterlist *sg, const void *buf,
17 unsigned int buflen)
18{
19 memset(sg, 0, sizeof(*sg));
20 sg_set_buf(sg, buf, buflen);
21}
22
23/* 67/*
24 * We overload the LSB of the page pointer to indicate whether it's 68 * We overload the LSB of the page pointer to indicate whether it's
25 * a valid sg entry, or whether it points to the start of a new scatterlist. 69 * a valid sg entry, or whether it points to the start of a new scatterlist.
26 * Those low bits are there for everyone! (thanks mason :-) 70 * Those low bits are there for everyone! (thanks mason :-)
27 */ 71 */
28#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01) 72#define sg_is_chain(sg) ((sg)->page_link & 0x01)
73#define sg_is_last(sg) ((sg)->page_link & 0x02)
29#define sg_chain_ptr(sg) \ 74#define sg_chain_ptr(sg) \
30 ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01)) 75 ((struct scatterlist *) ((sg)->page_link & ~0x03))
31 76
32/** 77/**
33 * sg_next - return the next scatterlist entry in a list 78 * sg_next - return the next scatterlist entry in a list
34 * @sg: The current sg entry 79 * @sg: The current sg entry
35 * 80 *
36 * Usually the next entry will be @sg@ + 1, but if this sg element is part 81 * Description:
37 * of a chained scatterlist, it could jump to the start of a new 82 * Usually the next entry will be @sg@ + 1, but if this sg element is part
38 * scatterlist array. 83 * of a chained scatterlist, it could jump to the start of a new
39 * 84 * scatterlist array.
40 * Note that the caller must ensure that there are further entries after
41 * the current entry, this function will NOT return NULL for an end-of-list.
42 * 85 *
43 */ 86 **/
44static inline struct scatterlist *sg_next(struct scatterlist *sg) 87static inline struct scatterlist *sg_next(struct scatterlist *sg)
45{ 88{
46 sg++; 89#ifdef CONFIG_DEBUG_SG
90 BUG_ON(sg->sg_magic != SG_MAGIC);
91#endif
92 if (sg_is_last(sg))
93 return NULL;
47 94
95 sg++;
48 if (unlikely(sg_is_chain(sg))) 96 if (unlikely(sg_is_chain(sg)))
49 sg = sg_chain_ptr(sg); 97 sg = sg_chain_ptr(sg);
50 98
@@ -62,14 +110,15 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
62 * @sgl: First entry in the scatterlist 110 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist 111 * @nents: Number of entries in the scatterlist
64 * 112 *
65 * Should only be used casually, it (currently) scan the entire list 113 * Description:
66 * to get the last entry. 114 * Should only be used casually, it (currently) scan the entire list
115 * to get the last entry.
67 * 116 *
68 * Note that the @sgl@ pointer passed in need not be the first one, 117 * Note that the @sgl@ pointer passed in need not be the first one,
69 * the important bit is that @nents@ denotes the number of entries that 118 * the important bit is that @nents@ denotes the number of entries that
70 * exist from @sgl@. 119 * exist from @sgl@.
71 * 120 *
72 */ 121 **/
73static inline struct scatterlist *sg_last(struct scatterlist *sgl, 122static inline struct scatterlist *sg_last(struct scatterlist *sgl,
74 unsigned int nents) 123 unsigned int nents)
75{ 124{
@@ -83,6 +132,10 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl,
83 ret = sg; 132 ret = sg;
84 133
85#endif 134#endif
135#ifdef CONFIG_DEBUG_SG
136 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
137 BUG_ON(!sg_is_last(ret));
138#endif
86 return ret; 139 return ret;
87} 140}
88 141
@@ -92,16 +145,111 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl,
92 * @prv_nents: Number of entries in prv 145 * @prv_nents: Number of entries in prv
93 * @sgl: Second scatterlist 146 * @sgl: Second scatterlist
94 * 147 *
95 * Links @prv@ and @sgl@ together, to form a longer scatterlist. 148 * Description:
149 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
96 * 150 *
97 */ 151 **/
98static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 152static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
99 struct scatterlist *sgl) 153 struct scatterlist *sgl)
100{ 154{
101#ifndef ARCH_HAS_SG_CHAIN 155#ifndef ARCH_HAS_SG_CHAIN
102 BUG(); 156 BUG();
103#endif 157#endif
104 prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01); 158 prv[prv_nents - 1].page_link = (unsigned long) sgl | 0x01;
159}
160
161/**
162 * sg_mark_end - Mark the end of the scatterlist
163 * @sgl: Scatterlist
164 * @nents: Number of entries in sgl
165 *
166 * Description:
167 * Marks the last entry as the termination point for sg_next()
168 *
169 **/
170static inline void sg_mark_end(struct scatterlist *sgl, unsigned int nents)
171{
172 sgl[nents - 1].page_link = 0x02;
173}
174
175static inline void __sg_mark_end(struct scatterlist *sg)
176{
177 sg->page_link |= 0x02;
178}
179
180/**
181 * sg_init_one - Initialize a single entry sg list
182 * @sg: SG entry
183 * @buf: Virtual address for IO
184 * @buflen: IO length
185 *
186 * Notes:
187 * This should not be used on a single entry that is part of a larger
188 * table. Use sg_init_table() for that.
189 *
190 **/
191static inline void sg_init_one(struct scatterlist *sg, const void *buf,
192 unsigned int buflen)
193{
194 memset(sg, 0, sizeof(*sg));
195#ifdef CONFIG_DEBUG_SG
196 sg->sg_magic = SG_MAGIC;
197#endif
198 sg_mark_end(sg, 1);
199 sg_set_buf(sg, buf, buflen);
200}
201
202/**
203 * sg_init_table - Initialize SG table
204 * @sgl: The SG table
205 * @nents: Number of entries in table
206 *
207 * Notes:
208 * If this is part of a chained sg table, sg_mark_end() should be
209 * used only on the last table part.
210 *
211 **/
212static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
213{
214 memset(sgl, 0, sizeof(*sgl) * nents);
215 sg_mark_end(sgl, nents);
216#ifdef CONFIG_DEBUG_SG
217 {
218 int i;
219 for (i = 0; i < nents; i++)
220 sgl[i].sg_magic = SG_MAGIC;
221 }
222#endif
223}
224
225/**
226 * sg_phys - Return physical address of an sg entry
227 * @sg: SG entry
228 *
229 * Description:
230 * This calls page_to_phys() on the page in this sg entry, and adds the
231 * sg offset. The caller must know that it is legal to call page_to_phys()
232 * on the sg page.
233 *
234 **/
235static inline unsigned long sg_phys(struct scatterlist *sg)
236{
237 return page_to_phys(sg_page(sg)) + sg->offset;
238}
239
240/**
241 * sg_virt - Return virtual address of an sg entry
242 * @sg: SG entry
243 *
244 * Description:
245 * This calls page_address() on the page in this sg entry, and adds the
246 * sg offset. The caller must know that the sg page has a valid virtual
247 * mapping.
248 *
249 **/
250static inline void *sg_virt(struct scatterlist *sg)
251{
252 return page_address(sg_page(sg)) + sg->offset;
105} 253}
106 254
107#endif /* _LINUX_SCATTERLIST_H */ 255#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f93f22b3d2ff..fd4e12f24270 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -41,8 +41,7 @@
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1)) 42 ~(SMP_CACHE_BYTES - 1))
43#define SKB_WITH_OVERHEAD(X) \ 43#define SKB_WITH_OVERHEAD(X) \
44 (((X) - sizeof(struct skb_shared_info)) & \ 44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45 ~(SMP_CACHE_BYTES - 1))
46#define SKB_MAX_ORDER(X, ORDER) \ 45#define SKB_MAX_ORDER(X, ORDER) \
47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
48#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 47#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -301,8 +300,9 @@ struct sk_buff {
301#endif 300#endif
302 301
303 int iif; 302 int iif;
303#ifdef CONFIG_NETDEVICES_MULTIQUEUE
304 __u16 queue_mapping; 304 __u16 queue_mapping;
305 305#endif
306#ifdef CONFIG_NET_SCHED 306#ifdef CONFIG_NET_SCHED
307 __u16 tc_index; /* traffic control index */ 307 __u16 tc_index; /* traffic control index */
308#ifdef CONFIG_NET_CLS_ACT 308#ifdef CONFIG_NET_CLS_ACT
@@ -1770,6 +1770,15 @@ static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1770#endif 1770#endif
1771} 1771}
1772 1772
1773static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1774{
1775#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1776 return skb->queue_mapping;
1777#else
1778 return 0;
1779#endif
1780}
1781
1773static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 1782static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1774{ 1783{
1775#ifdef CONFIG_NETDEVICES_MULTIQUEUE 1784#ifdef CONFIG_NETDEVICES_MULTIQUEUE
diff --git a/include/linux/socket.h b/include/linux/socket.h
index f852e1afd65a..c22ef1c1afb8 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -291,6 +291,7 @@ struct ucred {
291#define SOL_TIPC 271 291#define SOL_TIPC 271
292#define SOL_RXRPC 272 292#define SOL_RXRPC 272
293#define SOL_PPPOL2TP 273 293#define SOL_PPPOL2TP 273
294#define SOL_BLUETOOTH 274
294 295
295/* IPX options */ 296/* IPX options */
296#define IPX_TYPE 1 297#define IPX_TYPE 1
diff --git a/include/linux/videodev.h b/include/linux/videodev.h
index 8dba97a291f6..52e3d5fd5be4 100644
--- a/include/linux/videodev.h
+++ b/include/linux/videodev.h
@@ -294,48 +294,6 @@ struct video_code
294#define VID_PLAY_RESET 13 294#define VID_PLAY_RESET 13
295#define VID_PLAY_END_MARK 14 295#define VID_PLAY_END_MARK 14
296 296
297
298
299#define VID_HARDWARE_BT848 1
300#define VID_HARDWARE_QCAM_BW 2
301#define VID_HARDWARE_PMS 3
302#define VID_HARDWARE_QCAM_C 4
303#define VID_HARDWARE_PSEUDO 5
304#define VID_HARDWARE_SAA5249 6
305#define VID_HARDWARE_AZTECH 7
306#define VID_HARDWARE_SF16MI 8
307#define VID_HARDWARE_RTRACK 9
308#define VID_HARDWARE_ZOLTRIX 10
309#define VID_HARDWARE_SAA7146 11
310#define VID_HARDWARE_VIDEUM 12 /* Reserved for Winnov videum */
311#define VID_HARDWARE_RTRACK2 13
312#define VID_HARDWARE_PERMEDIA2 14 /* Reserved for Permedia2 */
313#define VID_HARDWARE_RIVA128 15 /* Reserved for RIVA 128 */
314#define VID_HARDWARE_PLANB 16 /* PowerMac motherboard video-in */
315#define VID_HARDWARE_BROADWAY 17 /* Broadway project */
316#define VID_HARDWARE_GEMTEK 18
317#define VID_HARDWARE_TYPHOON 19
318#define VID_HARDWARE_VINO 20 /* SGI Indy Vino */
319#define VID_HARDWARE_CADET 21 /* Cadet radio */
320#define VID_HARDWARE_TRUST 22 /* Trust FM Radio */
321#define VID_HARDWARE_TERRATEC 23 /* TerraTec ActiveRadio */
322#define VID_HARDWARE_CPIA 24
323#define VID_HARDWARE_ZR36120 25 /* Zoran ZR36120/ZR36125 */
324#define VID_HARDWARE_ZR36067 26 /* Zoran ZR36067/36060 */
325#define VID_HARDWARE_OV511 27
326#define VID_HARDWARE_ZR356700 28 /* Zoran 36700 series */
327#define VID_HARDWARE_W9966 29
328#define VID_HARDWARE_SE401 30 /* SE401 USB webcams */
329#define VID_HARDWARE_PWC 31 /* Philips webcams */
330#define VID_HARDWARE_MEYE 32 /* Sony Vaio MotionEye cameras */
331#define VID_HARDWARE_CPIA2 33
332#define VID_HARDWARE_VICAM 34
333#define VID_HARDWARE_SF16FMR2 35
334#define VID_HARDWARE_W9968CF 36
335#define VID_HARDWARE_SAA7114H 37
336#define VID_HARDWARE_SN9C102 38
337#define VID_HARDWARE_ARV 39
338
339#endif /* CONFIG_VIDEO_V4L1_COMPAT */ 297#endif /* CONFIG_VIDEO_V4L1_COMPAT */
340 298
341#endif /* __LINUX_VIDEODEV_H */ 299#endif /* __LINUX_VIDEODEV_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 1f503e94eff1..439474f24e34 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -441,94 +441,6 @@ struct v4l2_timecode
441#define V4L2_TC_USERBITS_8BITCHARS 0x0008 441#define V4L2_TC_USERBITS_8BITCHARS 0x0008
442/* The above is based on SMPTE timecodes */ 442/* The above is based on SMPTE timecodes */
443 443
444#ifdef __KERNEL__
445/*
446 * M P E G C O M P R E S S I O N P A R A M E T E R S
447 *
448 * ### WARNING: This experimental MPEG compression API is obsolete.
449 * ### It is replaced by the MPEG controls API.
450 * ### This old API will disappear in the near future!
451 *
452 */
453enum v4l2_bitrate_mode {
454 V4L2_BITRATE_NONE = 0, /* not specified */
455 V4L2_BITRATE_CBR, /* constant bitrate */
456 V4L2_BITRATE_VBR, /* variable bitrate */
457};
458struct v4l2_bitrate {
459 /* rates are specified in kbit/sec */
460 enum v4l2_bitrate_mode mode;
461 __u32 min;
462 __u32 target; /* use this one for CBR */
463 __u32 max;
464};
465
466enum v4l2_mpeg_streamtype {
467 V4L2_MPEG_SS_1, /* MPEG-1 system stream */
468 V4L2_MPEG_PS_2, /* MPEG-2 program stream */
469 V4L2_MPEG_TS_2, /* MPEG-2 transport stream */
470 V4L2_MPEG_PS_DVD, /* MPEG-2 program stream with DVD header fixups */
471};
472enum v4l2_mpeg_audiotype {
473 V4L2_MPEG_AU_2_I, /* MPEG-2 layer 1 */
474 V4L2_MPEG_AU_2_II, /* MPEG-2 layer 2 */
475 V4L2_MPEG_AU_2_III, /* MPEG-2 layer 3 */
476 V4L2_MPEG_AC3, /* AC3 */
477 V4L2_MPEG_LPCM, /* LPCM */
478};
479enum v4l2_mpeg_videotype {
480 V4L2_MPEG_VI_1, /* MPEG-1 */
481 V4L2_MPEG_VI_2, /* MPEG-2 */
482};
483enum v4l2_mpeg_aspectratio {
484 V4L2_MPEG_ASPECT_SQUARE = 1, /* square pixel */
485 V4L2_MPEG_ASPECT_4_3 = 2, /* 4 : 3 */
486 V4L2_MPEG_ASPECT_16_9 = 3, /* 16 : 9 */
487 V4L2_MPEG_ASPECT_1_221 = 4, /* 1 : 2,21 */
488};
489
490struct v4l2_mpeg_compression {
491 /* general */
492 enum v4l2_mpeg_streamtype st_type;
493 struct v4l2_bitrate st_bitrate;
494
495 /* transport streams */
496 __u16 ts_pid_pmt;
497 __u16 ts_pid_audio;
498 __u16 ts_pid_video;
499 __u16 ts_pid_pcr;
500
501 /* program stream */
502 __u16 ps_size;
503 __u16 reserved_1; /* align */
504
505 /* audio */
506 enum v4l2_mpeg_audiotype au_type;
507 struct v4l2_bitrate au_bitrate;
508 __u32 au_sample_rate;
509 __u8 au_pesid;
510 __u8 reserved_2[3]; /* align */
511
512 /* video */
513 enum v4l2_mpeg_videotype vi_type;
514 enum v4l2_mpeg_aspectratio vi_aspect_ratio;
515 struct v4l2_bitrate vi_bitrate;
516 __u32 vi_frame_rate;
517 __u16 vi_frames_per_gop;
518 __u16 vi_bframes_count;
519 __u8 vi_pesid;
520 __u8 reserved_3[3]; /* align */
521
522 /* misc flags */
523 __u32 closed_gops:1;
524 __u32 pulldown:1;
525 __u32 reserved_4:30; /* align */
526
527 /* I don't expect the above being perfect yet ;) */
528 __u32 reserved_5[8];
529};
530#endif
531
532struct v4l2_jpegcompression 444struct v4l2_jpegcompression
533{ 445{
534 int quality; 446 int quality;
@@ -1420,10 +1332,6 @@ struct v4l2_chip_ident {
1420#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) 1332#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc)
1421#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) 1333#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format)
1422#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) 1334#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format)
1423#ifdef __KERNEL__
1424#define VIDIOC_G_MPEGCOMP _IOR ('V', 6, struct v4l2_mpeg_compression)
1425#define VIDIOC_S_MPEGCOMP _IOW ('V', 7, struct v4l2_mpeg_compression)
1426#endif
1427#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) 1335#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers)
1428#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) 1336#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer)
1429#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) 1337#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer)
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index e75d5e6c4cea..c544c6f90893 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -94,7 +94,6 @@ struct video_device
94 char name[32]; 94 char name[32];
95 int type; /* v4l1 */ 95 int type; /* v4l1 */
96 int type2; /* v4l2 */ 96 int type2; /* v4l2 */
97 int hardware;
98 int minor; 97 int minor;
99 98
100 int debug; /* Activates debug level*/ 99 int debug; /* Activates debug level*/
@@ -272,10 +271,6 @@ struct video_device
272 int (*vidioc_s_crop) (struct file *file, void *fh, 271 int (*vidioc_s_crop) (struct file *file, void *fh,
273 struct v4l2_crop *a); 272 struct v4l2_crop *a);
274 /* Compression ioctls */ 273 /* Compression ioctls */
275 int (*vidioc_g_mpegcomp) (struct file *file, void *fh,
276 struct v4l2_mpeg_compression *a);
277 int (*vidioc_s_mpegcomp) (struct file *file, void *fh,
278 struct v4l2_mpeg_compression *a);
279 int (*vidioc_g_jpegcomp) (struct file *file, void *fh, 274 int (*vidioc_g_jpegcomp) (struct file *file, void *fh,
280 struct v4l2_jpegcompression *a); 275 struct v4l2_jpegcompression *a);
281 int (*vidioc_s_jpegcomp) (struct file *file, void *fh, 276 int (*vidioc_s_jpegcomp) (struct file *file, void *fh,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ebfb96b41106..a8a9eb6af966 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -200,119 +200,18 @@ enum {
200#define HCI_LM_SECURE 0x0020 200#define HCI_LM_SECURE 0x0020
201 201
202/* ----- HCI Commands ---- */ 202/* ----- HCI Commands ---- */
203/* OGF & OCF values */ 203#define HCI_OP_INQUIRY 0x0401
204 204struct hci_cp_inquiry {
205/* Informational Parameters */ 205 __u8 lap[3];
206#define OGF_INFO_PARAM 0x04 206 __u8 length;
207 207 __u8 num_rsp;
208#define OCF_READ_LOCAL_VERSION 0x0001
209struct hci_rp_read_loc_version {
210 __u8 status;
211 __u8 hci_ver;
212 __le16 hci_rev;
213 __u8 lmp_ver;
214 __le16 manufacturer;
215 __le16 lmp_subver;
216} __attribute__ ((packed));
217
218#define OCF_READ_LOCAL_FEATURES 0x0003
219struct hci_rp_read_local_features {
220 __u8 status;
221 __u8 features[8];
222} __attribute__ ((packed));
223
224#define OCF_READ_BUFFER_SIZE 0x0005
225struct hci_rp_read_buffer_size {
226 __u8 status;
227 __le16 acl_mtu;
228 __u8 sco_mtu;
229 __le16 acl_max_pkt;
230 __le16 sco_max_pkt;
231} __attribute__ ((packed));
232
233#define OCF_READ_BD_ADDR 0x0009
234struct hci_rp_read_bd_addr {
235 __u8 status;
236 bdaddr_t bdaddr;
237} __attribute__ ((packed));
238
239/* Host Controller and Baseband */
240#define OGF_HOST_CTL 0x03
241#define OCF_RESET 0x0003
242#define OCF_READ_AUTH_ENABLE 0x001F
243#define OCF_WRITE_AUTH_ENABLE 0x0020
244 #define AUTH_DISABLED 0x00
245 #define AUTH_ENABLED 0x01
246
247#define OCF_READ_ENCRYPT_MODE 0x0021
248#define OCF_WRITE_ENCRYPT_MODE 0x0022
249 #define ENCRYPT_DISABLED 0x00
250 #define ENCRYPT_P2P 0x01
251 #define ENCRYPT_BOTH 0x02
252
253#define OCF_WRITE_CA_TIMEOUT 0x0016
254#define OCF_WRITE_PG_TIMEOUT 0x0018
255
256#define OCF_WRITE_SCAN_ENABLE 0x001A
257 #define SCAN_DISABLED 0x00
258 #define SCAN_INQUIRY 0x01
259 #define SCAN_PAGE 0x02
260
261#define OCF_SET_EVENT_FLT 0x0005
262struct hci_cp_set_event_flt {
263 __u8 flt_type;
264 __u8 cond_type;
265 __u8 condition[0];
266} __attribute__ ((packed));
267
268/* Filter types */
269#define HCI_FLT_CLEAR_ALL 0x00
270#define HCI_FLT_INQ_RESULT 0x01
271#define HCI_FLT_CONN_SETUP 0x02
272
273/* CONN_SETUP Condition types */
274#define HCI_CONN_SETUP_ALLOW_ALL 0x00
275#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
276#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
277
278/* CONN_SETUP Conditions */
279#define HCI_CONN_SETUP_AUTO_OFF 0x01
280#define HCI_CONN_SETUP_AUTO_ON 0x02
281
282#define OCF_READ_CLASS_OF_DEV 0x0023
283struct hci_rp_read_dev_class {
284 __u8 status;
285 __u8 dev_class[3];
286} __attribute__ ((packed));
287
288#define OCF_WRITE_CLASS_OF_DEV 0x0024
289struct hci_cp_write_dev_class {
290 __u8 dev_class[3];
291} __attribute__ ((packed));
292
293#define OCF_READ_VOICE_SETTING 0x0025
294struct hci_rp_read_voice_setting {
295 __u8 status;
296 __le16 voice_setting;
297} __attribute__ ((packed)); 208} __attribute__ ((packed));
298 209
299#define OCF_WRITE_VOICE_SETTING 0x0026 210#define HCI_OP_INQUIRY_CANCEL 0x0402
300struct hci_cp_write_voice_setting {
301 __le16 voice_setting;
302} __attribute__ ((packed));
303 211
304#define OCF_HOST_BUFFER_SIZE 0x0033 212#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
305struct hci_cp_host_buffer_size {
306 __le16 acl_mtu;
307 __u8 sco_mtu;
308 __le16 acl_max_pkt;
309 __le16 sco_max_pkt;
310} __attribute__ ((packed));
311
312/* Link Control */
313#define OGF_LINK_CTL 0x01
314 213
315#define OCF_CREATE_CONN 0x0005 214#define HCI_OP_CREATE_CONN 0x0405
316struct hci_cp_create_conn { 215struct hci_cp_create_conn {
317 bdaddr_t bdaddr; 216 bdaddr_t bdaddr;
318 __le16 pkt_type; 217 __le16 pkt_type;
@@ -322,105 +221,138 @@ struct hci_cp_create_conn {
322 __u8 role_switch; 221 __u8 role_switch;
323} __attribute__ ((packed)); 222} __attribute__ ((packed));
324 223
325#define OCF_CREATE_CONN_CANCEL 0x0008 224#define HCI_OP_DISCONNECT 0x0406
326struct hci_cp_create_conn_cancel {
327 bdaddr_t bdaddr;
328} __attribute__ ((packed));
329
330#define OCF_ACCEPT_CONN_REQ 0x0009
331struct hci_cp_accept_conn_req {
332 bdaddr_t bdaddr;
333 __u8 role;
334} __attribute__ ((packed));
335
336#define OCF_REJECT_CONN_REQ 0x000a
337struct hci_cp_reject_conn_req {
338 bdaddr_t bdaddr;
339 __u8 reason;
340} __attribute__ ((packed));
341
342#define OCF_DISCONNECT 0x0006
343struct hci_cp_disconnect { 225struct hci_cp_disconnect {
344 __le16 handle; 226 __le16 handle;
345 __u8 reason; 227 __u8 reason;
346} __attribute__ ((packed)); 228} __attribute__ ((packed));
347 229
348#define OCF_ADD_SCO 0x0007 230#define HCI_OP_ADD_SCO 0x0407
349struct hci_cp_add_sco { 231struct hci_cp_add_sco {
350 __le16 handle; 232 __le16 handle;
351 __le16 pkt_type; 233 __le16 pkt_type;
352} __attribute__ ((packed)); 234} __attribute__ ((packed));
353 235
354#define OCF_INQUIRY 0x0001 236#define HCI_OP_CREATE_CONN_CANCEL 0x0408
355struct hci_cp_inquiry { 237struct hci_cp_create_conn_cancel {
356 __u8 lap[3]; 238 bdaddr_t bdaddr;
357 __u8 length;
358 __u8 num_rsp;
359} __attribute__ ((packed)); 239} __attribute__ ((packed));
360 240
361#define OCF_INQUIRY_CANCEL 0x0002 241#define HCI_OP_ACCEPT_CONN_REQ 0x0409
242struct hci_cp_accept_conn_req {
243 bdaddr_t bdaddr;
244 __u8 role;
245} __attribute__ ((packed));
362 246
363#define OCF_EXIT_PERIODIC_INQ 0x0004 247#define HCI_OP_REJECT_CONN_REQ 0x040a
248struct hci_cp_reject_conn_req {
249 bdaddr_t bdaddr;
250 __u8 reason;
251} __attribute__ ((packed));
364 252
365#define OCF_LINK_KEY_REPLY 0x000B 253#define HCI_OP_LINK_KEY_REPLY 0x040b
366struct hci_cp_link_key_reply { 254struct hci_cp_link_key_reply {
367 bdaddr_t bdaddr; 255 bdaddr_t bdaddr;
368 __u8 link_key[16]; 256 __u8 link_key[16];
369} __attribute__ ((packed)); 257} __attribute__ ((packed));
370 258
371#define OCF_LINK_KEY_NEG_REPLY 0x000C 259#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
372struct hci_cp_link_key_neg_reply { 260struct hci_cp_link_key_neg_reply {
373 bdaddr_t bdaddr; 261 bdaddr_t bdaddr;
374} __attribute__ ((packed)); 262} __attribute__ ((packed));
375 263
376#define OCF_PIN_CODE_REPLY 0x000D 264#define HCI_OP_PIN_CODE_REPLY 0x040d
377struct hci_cp_pin_code_reply { 265struct hci_cp_pin_code_reply {
378 bdaddr_t bdaddr; 266 bdaddr_t bdaddr;
379 __u8 pin_len; 267 __u8 pin_len;
380 __u8 pin_code[16]; 268 __u8 pin_code[16];
381} __attribute__ ((packed)); 269} __attribute__ ((packed));
382 270
383#define OCF_PIN_CODE_NEG_REPLY 0x000E 271#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
384struct hci_cp_pin_code_neg_reply { 272struct hci_cp_pin_code_neg_reply {
385 bdaddr_t bdaddr; 273 bdaddr_t bdaddr;
386} __attribute__ ((packed)); 274} __attribute__ ((packed));
387 275
388#define OCF_CHANGE_CONN_PTYPE 0x000F 276#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
389struct hci_cp_change_conn_ptype { 277struct hci_cp_change_conn_ptype {
390 __le16 handle; 278 __le16 handle;
391 __le16 pkt_type; 279 __le16 pkt_type;
392} __attribute__ ((packed)); 280} __attribute__ ((packed));
393 281
394#define OCF_AUTH_REQUESTED 0x0011 282#define HCI_OP_AUTH_REQUESTED 0x0411
395struct hci_cp_auth_requested { 283struct hci_cp_auth_requested {
396 __le16 handle; 284 __le16 handle;
397} __attribute__ ((packed)); 285} __attribute__ ((packed));
398 286
399#define OCF_SET_CONN_ENCRYPT 0x0013 287#define HCI_OP_SET_CONN_ENCRYPT 0x0413
400struct hci_cp_set_conn_encrypt { 288struct hci_cp_set_conn_encrypt {
401 __le16 handle; 289 __le16 handle;
402 __u8 encrypt; 290 __u8 encrypt;
403} __attribute__ ((packed)); 291} __attribute__ ((packed));
404 292
405#define OCF_CHANGE_CONN_LINK_KEY 0x0015 293#define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415
406struct hci_cp_change_conn_link_key { 294struct hci_cp_change_conn_link_key {
407 __le16 handle; 295 __le16 handle;
408} __attribute__ ((packed)); 296} __attribute__ ((packed));
409 297
410#define OCF_READ_REMOTE_FEATURES 0x001B 298#define HCI_OP_REMOTE_NAME_REQ 0x0419
299struct hci_cp_remote_name_req {
300 bdaddr_t bdaddr;
301 __u8 pscan_rep_mode;
302 __u8 pscan_mode;
303 __le16 clock_offset;
304} __attribute__ ((packed));
305
306#define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a
307struct hci_cp_remote_name_req_cancel {
308 bdaddr_t bdaddr;
309} __attribute__ ((packed));
310
311#define HCI_OP_READ_REMOTE_FEATURES 0x041b
411struct hci_cp_read_remote_features { 312struct hci_cp_read_remote_features {
412 __le16 handle; 313 __le16 handle;
413} __attribute__ ((packed)); 314} __attribute__ ((packed));
414 315
415#define OCF_READ_REMOTE_VERSION 0x001D 316#define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c
317struct hci_cp_read_remote_ext_features {
318 __le16 handle;
319 __u8 page;
320} __attribute__ ((packed));
321
322#define HCI_OP_READ_REMOTE_VERSION 0x041d
416struct hci_cp_read_remote_version { 323struct hci_cp_read_remote_version {
417 __le16 handle; 324 __le16 handle;
418} __attribute__ ((packed)); 325} __attribute__ ((packed));
419 326
420/* Link Policy */ 327#define HCI_OP_SETUP_SYNC_CONN 0x0428
421#define OGF_LINK_POLICY 0x02 328struct hci_cp_setup_sync_conn {
329 __le16 handle;
330 __le32 tx_bandwidth;
331 __le32 rx_bandwidth;
332 __le16 max_latency;
333 __le16 voice_setting;
334 __u8 retrans_effort;
335 __le16 pkt_type;
336} __attribute__ ((packed));
422 337
423#define OCF_SNIFF_MODE 0x0003 338#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
339struct hci_cp_accept_sync_conn_req {
340 bdaddr_t bdaddr;
341 __le32 tx_bandwidth;
342 __le32 rx_bandwidth;
343 __le16 max_latency;
344 __le16 content_format;
345 __u8 retrans_effort;
346 __le16 pkt_type;
347} __attribute__ ((packed));
348
349#define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a
350struct hci_cp_reject_sync_conn_req {
351 bdaddr_t bdaddr;
352 __u8 reason;
353} __attribute__ ((packed));
354
355#define HCI_OP_SNIFF_MODE 0x0803
424struct hci_cp_sniff_mode { 356struct hci_cp_sniff_mode {
425 __le16 handle; 357 __le16 handle;
426 __le16 max_interval; 358 __le16 max_interval;
@@ -429,12 +361,12 @@ struct hci_cp_sniff_mode {
429 __le16 timeout; 361 __le16 timeout;
430} __attribute__ ((packed)); 362} __attribute__ ((packed));
431 363
432#define OCF_EXIT_SNIFF_MODE 0x0004 364#define HCI_OP_EXIT_SNIFF_MODE 0x0804
433struct hci_cp_exit_sniff_mode { 365struct hci_cp_exit_sniff_mode {
434 __le16 handle; 366 __le16 handle;
435} __attribute__ ((packed)); 367} __attribute__ ((packed));
436 368
437#define OCF_ROLE_DISCOVERY 0x0009 369#define HCI_OP_ROLE_DISCOVERY 0x0809
438struct hci_cp_role_discovery { 370struct hci_cp_role_discovery {
439 __le16 handle; 371 __le16 handle;
440} __attribute__ ((packed)); 372} __attribute__ ((packed));
@@ -444,7 +376,13 @@ struct hci_rp_role_discovery {
444 __u8 role; 376 __u8 role;
445} __attribute__ ((packed)); 377} __attribute__ ((packed));
446 378
447#define OCF_READ_LINK_POLICY 0x000C 379#define HCI_OP_SWITCH_ROLE 0x080b
380struct hci_cp_switch_role {
381 bdaddr_t bdaddr;
382 __u8 role;
383} __attribute__ ((packed));
384
385#define HCI_OP_READ_LINK_POLICY 0x080c
448struct hci_cp_read_link_policy { 386struct hci_cp_read_link_policy {
449 __le16 handle; 387 __le16 handle;
450} __attribute__ ((packed)); 388} __attribute__ ((packed));
@@ -454,13 +392,7 @@ struct hci_rp_read_link_policy {
454 __le16 policy; 392 __le16 policy;
455} __attribute__ ((packed)); 393} __attribute__ ((packed));
456 394
457#define OCF_SWITCH_ROLE 0x000B 395#define HCI_OP_WRITE_LINK_POLICY 0x080d
458struct hci_cp_switch_role {
459 bdaddr_t bdaddr;
460 __u8 role;
461} __attribute__ ((packed));
462
463#define OCF_WRITE_LINK_POLICY 0x000D
464struct hci_cp_write_link_policy { 396struct hci_cp_write_link_policy {
465 __le16 handle; 397 __le16 handle;
466 __le16 policy; 398 __le16 policy;
@@ -470,7 +402,7 @@ struct hci_rp_write_link_policy {
470 __le16 handle; 402 __le16 handle;
471} __attribute__ ((packed)); 403} __attribute__ ((packed));
472 404
473#define OCF_SNIFF_SUBRATE 0x0011 405#define HCI_OP_SNIFF_SUBRATE 0x0811
474struct hci_cp_sniff_subrate { 406struct hci_cp_sniff_subrate {
475 __le16 handle; 407 __le16 handle;
476 __le16 max_latency; 408 __le16 max_latency;
@@ -478,59 +410,156 @@ struct hci_cp_sniff_subrate {
478 __le16 min_local_timeout; 410 __le16 min_local_timeout;
479} __attribute__ ((packed)); 411} __attribute__ ((packed));
480 412
481/* Status params */ 413#define HCI_OP_SET_EVENT_MASK 0x0c01
482#define OGF_STATUS_PARAM 0x05 414struct hci_cp_set_event_mask {
415 __u8 mask[8];
416} __attribute__ ((packed));
483 417
484/* Testing commands */ 418#define HCI_OP_RESET 0x0c03
485#define OGF_TESTING_CMD 0x3E
486 419
487/* Vendor specific commands */ 420#define HCI_OP_SET_EVENT_FLT 0x0c05
488#define OGF_VENDOR_CMD 0x3F 421struct hci_cp_set_event_flt {
422 __u8 flt_type;
423 __u8 cond_type;
424 __u8 condition[0];
425} __attribute__ ((packed));
489 426
490/* ---- HCI Events ---- */ 427/* Filter types */
491#define HCI_EV_INQUIRY_COMPLETE 0x01 428#define HCI_FLT_CLEAR_ALL 0x00
429#define HCI_FLT_INQ_RESULT 0x01
430#define HCI_FLT_CONN_SETUP 0x02
492 431
493#define HCI_EV_INQUIRY_RESULT 0x02 432/* CONN_SETUP Condition types */
494struct inquiry_info { 433#define HCI_CONN_SETUP_ALLOW_ALL 0x00
495 bdaddr_t bdaddr; 434#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
496 __u8 pscan_rep_mode; 435#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
497 __u8 pscan_period_mode; 436
498 __u8 pscan_mode; 437/* CONN_SETUP Conditions */
438#define HCI_CONN_SETUP_AUTO_OFF 0x01
439#define HCI_CONN_SETUP_AUTO_ON 0x02
440
441#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
442struct hci_cp_write_local_name {
443 __u8 name[248];
444} __attribute__ ((packed));
445
446#define HCI_OP_READ_LOCAL_NAME 0x0c14
447struct hci_rp_read_local_name {
448 __u8 status;
449 __u8 name[248];
450} __attribute__ ((packed));
451
452#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
453
454#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
455
456#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
457 #define SCAN_DISABLED 0x00
458 #define SCAN_INQUIRY 0x01
459 #define SCAN_PAGE 0x02
460
461#define HCI_OP_READ_AUTH_ENABLE 0x0c1f
462
463#define HCI_OP_WRITE_AUTH_ENABLE 0x0c20
464 #define AUTH_DISABLED 0x00
465 #define AUTH_ENABLED 0x01
466
467#define HCI_OP_READ_ENCRYPT_MODE 0x0c21
468
469#define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22
470 #define ENCRYPT_DISABLED 0x00
471 #define ENCRYPT_P2P 0x01
472 #define ENCRYPT_BOTH 0x02
473
474#define HCI_OP_READ_CLASS_OF_DEV 0x0c23
475struct hci_rp_read_class_of_dev {
476 __u8 status;
499 __u8 dev_class[3]; 477 __u8 dev_class[3];
500 __le16 clock_offset;
501} __attribute__ ((packed)); 478} __attribute__ ((packed));
502 479
503#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 480#define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24
504struct inquiry_info_with_rssi { 481struct hci_cp_write_class_of_dev {
505 bdaddr_t bdaddr;
506 __u8 pscan_rep_mode;
507 __u8 pscan_period_mode;
508 __u8 dev_class[3]; 482 __u8 dev_class[3];
509 __le16 clock_offset;
510 __s8 rssi;
511} __attribute__ ((packed)); 483} __attribute__ ((packed));
512struct inquiry_info_with_rssi_and_pscan_mode { 484
485#define HCI_OP_READ_VOICE_SETTING 0x0c25
486struct hci_rp_read_voice_setting {
487 __u8 status;
488 __le16 voice_setting;
489} __attribute__ ((packed));
490
491#define HCI_OP_WRITE_VOICE_SETTING 0x0c26
492struct hci_cp_write_voice_setting {
493 __le16 voice_setting;
494} __attribute__ ((packed));
495
496#define HCI_OP_HOST_BUFFER_SIZE 0x0c33
497struct hci_cp_host_buffer_size {
498 __le16 acl_mtu;
499 __u8 sco_mtu;
500 __le16 acl_max_pkt;
501 __le16 sco_max_pkt;
502} __attribute__ ((packed));
503
504#define HCI_OP_READ_LOCAL_VERSION 0x1001
505struct hci_rp_read_local_version {
506 __u8 status;
507 __u8 hci_ver;
508 __le16 hci_rev;
509 __u8 lmp_ver;
510 __le16 manufacturer;
511 __le16 lmp_subver;
512} __attribute__ ((packed));
513
514#define HCI_OP_READ_LOCAL_COMMANDS 0x1002
515struct hci_rp_read_local_commands {
516 __u8 status;
517 __u8 commands[64];
518} __attribute__ ((packed));
519
520#define HCI_OP_READ_LOCAL_FEATURES 0x1003
521struct hci_rp_read_local_features {
522 __u8 status;
523 __u8 features[8];
524} __attribute__ ((packed));
525
526#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
527struct hci_rp_read_local_ext_features {
528 __u8 status;
529 __u8 page;
530 __u8 max_page;
531 __u8 features[8];
532} __attribute__ ((packed));
533
534#define HCI_OP_READ_BUFFER_SIZE 0x1005
535struct hci_rp_read_buffer_size {
536 __u8 status;
537 __le16 acl_mtu;
538 __u8 sco_mtu;
539 __le16 acl_max_pkt;
540 __le16 sco_max_pkt;
541} __attribute__ ((packed));
542
543#define HCI_OP_READ_BD_ADDR 0x1009
544struct hci_rp_read_bd_addr {
545 __u8 status;
513 bdaddr_t bdaddr; 546 bdaddr_t bdaddr;
514 __u8 pscan_rep_mode;
515 __u8 pscan_period_mode;
516 __u8 pscan_mode;
517 __u8 dev_class[3];
518 __le16 clock_offset;
519 __s8 rssi;
520} __attribute__ ((packed)); 547} __attribute__ ((packed));
521 548
522#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2F 549/* ---- HCI Events ---- */
523struct extended_inquiry_info { 550#define HCI_EV_INQUIRY_COMPLETE 0x01
551
552#define HCI_EV_INQUIRY_RESULT 0x02
553struct inquiry_info {
524 bdaddr_t bdaddr; 554 bdaddr_t bdaddr;
525 __u8 pscan_rep_mode; 555 __u8 pscan_rep_mode;
526 __u8 pscan_period_mode; 556 __u8 pscan_period_mode;
557 __u8 pscan_mode;
527 __u8 dev_class[3]; 558 __u8 dev_class[3];
528 __le16 clock_offset; 559 __le16 clock_offset;
529 __s8 rssi;
530 __u8 data[240];
531} __attribute__ ((packed)); 560} __attribute__ ((packed));
532 561
533#define HCI_EV_CONN_COMPLETE 0x03 562#define HCI_EV_CONN_COMPLETE 0x03
534struct hci_ev_conn_complete { 563struct hci_ev_conn_complete {
535 __u8 status; 564 __u8 status;
536 __le16 handle; 565 __le16 handle;
@@ -539,40 +568,63 @@ struct hci_ev_conn_complete {
539 __u8 encr_mode; 568 __u8 encr_mode;
540} __attribute__ ((packed)); 569} __attribute__ ((packed));
541 570
542#define HCI_EV_CONN_REQUEST 0x04 571#define HCI_EV_CONN_REQUEST 0x04
543struct hci_ev_conn_request { 572struct hci_ev_conn_request {
544 bdaddr_t bdaddr; 573 bdaddr_t bdaddr;
545 __u8 dev_class[3]; 574 __u8 dev_class[3];
546 __u8 link_type; 575 __u8 link_type;
547} __attribute__ ((packed)); 576} __attribute__ ((packed));
548 577
549#define HCI_EV_DISCONN_COMPLETE 0x05 578#define HCI_EV_DISCONN_COMPLETE 0x05
550struct hci_ev_disconn_complete { 579struct hci_ev_disconn_complete {
551 __u8 status; 580 __u8 status;
552 __le16 handle; 581 __le16 handle;
553 __u8 reason; 582 __u8 reason;
554} __attribute__ ((packed)); 583} __attribute__ ((packed));
555 584
556#define HCI_EV_AUTH_COMPLETE 0x06 585#define HCI_EV_AUTH_COMPLETE 0x06
557struct hci_ev_auth_complete { 586struct hci_ev_auth_complete {
558 __u8 status; 587 __u8 status;
559 __le16 handle; 588 __le16 handle;
560} __attribute__ ((packed)); 589} __attribute__ ((packed));
561 590
562#define HCI_EV_ENCRYPT_CHANGE 0x08 591#define HCI_EV_REMOTE_NAME 0x07
592struct hci_ev_remote_name {
593 __u8 status;
594 bdaddr_t bdaddr;
595 __u8 name[248];
596} __attribute__ ((packed));
597
598#define HCI_EV_ENCRYPT_CHANGE 0x08
563struct hci_ev_encrypt_change { 599struct hci_ev_encrypt_change {
564 __u8 status; 600 __u8 status;
565 __le16 handle; 601 __le16 handle;
566 __u8 encrypt; 602 __u8 encrypt;
567} __attribute__ ((packed)); 603} __attribute__ ((packed));
568 604
569#define HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE 0x09 605#define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09
570struct hci_ev_change_conn_link_key_complete { 606struct hci_ev_change_link_key_complete {
607 __u8 status;
608 __le16 handle;
609} __attribute__ ((packed));
610
611#define HCI_EV_REMOTE_FEATURES 0x0b
612struct hci_ev_remote_features {
613 __u8 status;
614 __le16 handle;
615 __u8 features[8];
616} __attribute__ ((packed));
617
618#define HCI_EV_REMOTE_VERSION 0x0c
619struct hci_ev_remote_version {
571 __u8 status; 620 __u8 status;
572 __le16 handle; 621 __le16 handle;
622 __u8 lmp_ver;
623 __le16 manufacturer;
624 __le16 lmp_subver;
573} __attribute__ ((packed)); 625} __attribute__ ((packed));
574 626
575#define HCI_EV_QOS_SETUP_COMPLETE 0x0D 627#define HCI_EV_QOS_SETUP_COMPLETE 0x0d
576struct hci_qos { 628struct hci_qos {
577 __u8 service_type; 629 __u8 service_type;
578 __u32 token_rate; 630 __u32 token_rate;
@@ -586,33 +638,33 @@ struct hci_ev_qos_setup_complete {
586 struct hci_qos qos; 638 struct hci_qos qos;
587} __attribute__ ((packed)); 639} __attribute__ ((packed));
588 640
589#define HCI_EV_CMD_COMPLETE 0x0E 641#define HCI_EV_CMD_COMPLETE 0x0e
590struct hci_ev_cmd_complete { 642struct hci_ev_cmd_complete {
591 __u8 ncmd; 643 __u8 ncmd;
592 __le16 opcode; 644 __le16 opcode;
593} __attribute__ ((packed)); 645} __attribute__ ((packed));
594 646
595#define HCI_EV_CMD_STATUS 0x0F 647#define HCI_EV_CMD_STATUS 0x0f
596struct hci_ev_cmd_status { 648struct hci_ev_cmd_status {
597 __u8 status; 649 __u8 status;
598 __u8 ncmd; 650 __u8 ncmd;
599 __le16 opcode; 651 __le16 opcode;
600} __attribute__ ((packed)); 652} __attribute__ ((packed));
601 653
602#define HCI_EV_NUM_COMP_PKTS 0x13 654#define HCI_EV_ROLE_CHANGE 0x12
603struct hci_ev_num_comp_pkts {
604 __u8 num_hndl;
605 /* variable length part */
606} __attribute__ ((packed));
607
608#define HCI_EV_ROLE_CHANGE 0x12
609struct hci_ev_role_change { 655struct hci_ev_role_change {
610 __u8 status; 656 __u8 status;
611 bdaddr_t bdaddr; 657 bdaddr_t bdaddr;
612 __u8 role; 658 __u8 role;
613} __attribute__ ((packed)); 659} __attribute__ ((packed));
614 660
615#define HCI_EV_MODE_CHANGE 0x14 661#define HCI_EV_NUM_COMP_PKTS 0x13
662struct hci_ev_num_comp_pkts {
663 __u8 num_hndl;
664 /* variable length part */
665} __attribute__ ((packed));
666
667#define HCI_EV_MODE_CHANGE 0x14
616struct hci_ev_mode_change { 668struct hci_ev_mode_change {
617 __u8 status; 669 __u8 status;
618 __le16 handle; 670 __le16 handle;
@@ -620,53 +672,88 @@ struct hci_ev_mode_change {
620 __le16 interval; 672 __le16 interval;
621} __attribute__ ((packed)); 673} __attribute__ ((packed));
622 674
623#define HCI_EV_PIN_CODE_REQ 0x16 675#define HCI_EV_PIN_CODE_REQ 0x16
624struct hci_ev_pin_code_req { 676struct hci_ev_pin_code_req {
625 bdaddr_t bdaddr; 677 bdaddr_t bdaddr;
626} __attribute__ ((packed)); 678} __attribute__ ((packed));
627 679
628#define HCI_EV_LINK_KEY_REQ 0x17 680#define HCI_EV_LINK_KEY_REQ 0x17
629struct hci_ev_link_key_req { 681struct hci_ev_link_key_req {
630 bdaddr_t bdaddr; 682 bdaddr_t bdaddr;
631} __attribute__ ((packed)); 683} __attribute__ ((packed));
632 684
633#define HCI_EV_LINK_KEY_NOTIFY 0x18 685#define HCI_EV_LINK_KEY_NOTIFY 0x18
634struct hci_ev_link_key_notify { 686struct hci_ev_link_key_notify {
635 bdaddr_t bdaddr; 687 bdaddr_t bdaddr;
636 __u8 link_key[16]; 688 __u8 link_key[16];
637 __u8 key_type; 689 __u8 key_type;
638} __attribute__ ((packed)); 690} __attribute__ ((packed));
639 691
640#define HCI_EV_REMOTE_FEATURES 0x0B 692#define HCI_EV_CLOCK_OFFSET 0x1c
641struct hci_ev_remote_features { 693struct hci_ev_clock_offset {
642 __u8 status; 694 __u8 status;
643 __le16 handle; 695 __le16 handle;
644 __u8 features[8]; 696 __le16 clock_offset;
645} __attribute__ ((packed)); 697} __attribute__ ((packed));
646 698
647#define HCI_EV_REMOTE_VERSION 0x0C 699#define HCI_EV_PSCAN_REP_MODE 0x20
648struct hci_ev_remote_version { 700struct hci_ev_pscan_rep_mode {
701 bdaddr_t bdaddr;
702 __u8 pscan_rep_mode;
703} __attribute__ ((packed));
704
705#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
706struct inquiry_info_with_rssi {
707 bdaddr_t bdaddr;
708 __u8 pscan_rep_mode;
709 __u8 pscan_period_mode;
710 __u8 dev_class[3];
711 __le16 clock_offset;
712 __s8 rssi;
713} __attribute__ ((packed));
714struct inquiry_info_with_rssi_and_pscan_mode {
715 bdaddr_t bdaddr;
716 __u8 pscan_rep_mode;
717 __u8 pscan_period_mode;
718 __u8 pscan_mode;
719 __u8 dev_class[3];
720 __le16 clock_offset;
721 __s8 rssi;
722} __attribute__ ((packed));
723
724#define HCI_EV_REMOTE_EXT_FEATURES 0x23
725struct hci_ev_remote_ext_features {
649 __u8 status; 726 __u8 status;
650 __le16 handle; 727 __le16 handle;
651 __u8 lmp_ver; 728 __u8 page;
652 __le16 manufacturer; 729 __u8 max_page;
653 __le16 lmp_subver; 730 __u8 features[8];
654} __attribute__ ((packed)); 731} __attribute__ ((packed));
655 732
656#define HCI_EV_CLOCK_OFFSET 0x01C 733#define HCI_EV_SYNC_CONN_COMPLETE 0x2c
657struct hci_ev_clock_offset { 734struct hci_ev_sync_conn_complete {
658 __u8 status; 735 __u8 status;
659 __le16 handle; 736 __le16 handle;
660 __le16 clock_offset; 737 bdaddr_t bdaddr;
738 __u8 link_type;
739 __u8 tx_interval;
740 __u8 retrans_window;
741 __le16 rx_pkt_len;
742 __le16 tx_pkt_len;
743 __u8 air_mode;
661} __attribute__ ((packed)); 744} __attribute__ ((packed));
662 745
663#define HCI_EV_PSCAN_REP_MODE 0x20 746#define HCI_EV_SYNC_CONN_CHANGED 0x2d
664struct hci_ev_pscan_rep_mode { 747struct hci_ev_sync_conn_changed {
665 bdaddr_t bdaddr; 748 __u8 status;
666 __u8 pscan_rep_mode; 749 __le16 handle;
750 __u8 tx_interval;
751 __u8 retrans_window;
752 __le16 rx_pkt_len;
753 __le16 tx_pkt_len;
667} __attribute__ ((packed)); 754} __attribute__ ((packed));
668 755
669#define HCI_EV_SNIFF_SUBRATE 0x2E 756#define HCI_EV_SNIFF_SUBRATE 0x2e
670struct hci_ev_sniff_subrate { 757struct hci_ev_sniff_subrate {
671 __u8 status; 758 __u8 status;
672 __le16 handle; 759 __le16 handle;
@@ -676,14 +763,25 @@ struct hci_ev_sniff_subrate {
676 __le16 max_local_timeout; 763 __le16 max_local_timeout;
677} __attribute__ ((packed)); 764} __attribute__ ((packed));
678 765
766#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f
767struct extended_inquiry_info {
768 bdaddr_t bdaddr;
769 __u8 pscan_rep_mode;
770 __u8 pscan_period_mode;
771 __u8 dev_class[3];
772 __le16 clock_offset;
773 __s8 rssi;
774 __u8 data[240];
775} __attribute__ ((packed));
776
679/* Internal events generated by Bluetooth stack */ 777/* Internal events generated by Bluetooth stack */
680#define HCI_EV_STACK_INTERNAL 0xFD 778#define HCI_EV_STACK_INTERNAL 0xfd
681struct hci_ev_stack_internal { 779struct hci_ev_stack_internal {
682 __u16 type; 780 __u16 type;
683 __u8 data[0]; 781 __u8 data[0];
684} __attribute__ ((packed)); 782} __attribute__ ((packed));
685 783
686#define HCI_EV_SI_DEVICE 0x01 784#define HCI_EV_SI_DEVICE 0x01
687struct hci_ev_si_device { 785struct hci_ev_si_device {
688 __u16 event; 786 __u16 event;
689 __u16 dev_id; 787 __u16 dev_id;
@@ -704,40 +802,40 @@ struct hci_ev_si_security {
704#define HCI_SCO_HDR_SIZE 3 802#define HCI_SCO_HDR_SIZE 3
705 803
706struct hci_command_hdr { 804struct hci_command_hdr {
707 __le16 opcode; /* OCF & OGF */ 805 __le16 opcode; /* OCF & OGF */
708 __u8 plen; 806 __u8 plen;
709} __attribute__ ((packed)); 807} __attribute__ ((packed));
710 808
711struct hci_event_hdr { 809struct hci_event_hdr {
712 __u8 evt; 810 __u8 evt;
713 __u8 plen; 811 __u8 plen;
714} __attribute__ ((packed)); 812} __attribute__ ((packed));
715 813
716struct hci_acl_hdr { 814struct hci_acl_hdr {
717 __le16 handle; /* Handle & Flags(PB, BC) */ 815 __le16 handle; /* Handle & Flags(PB, BC) */
718 __le16 dlen; 816 __le16 dlen;
719} __attribute__ ((packed)); 817} __attribute__ ((packed));
720 818
721struct hci_sco_hdr { 819struct hci_sco_hdr {
722 __le16 handle; 820 __le16 handle;
723 __u8 dlen; 821 __u8 dlen;
724} __attribute__ ((packed)); 822} __attribute__ ((packed));
725 823
726#ifdef __KERNEL__ 824#ifdef __KERNEL__
727#include <linux/skbuff.h> 825#include <linux/skbuff.h>
728static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) 826static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
729{ 827{
730 return (struct hci_event_hdr *)skb->data; 828 return (struct hci_event_hdr *) skb->data;
731} 829}
732 830
733static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) 831static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
734{ 832{
735 return (struct hci_acl_hdr *)skb->data; 833 return (struct hci_acl_hdr *) skb->data;
736} 834}
737 835
738static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) 836static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
739{ 837{
740 return (struct hci_sco_hdr *)skb->data; 838 return (struct hci_sco_hdr *) skb->data;
741} 839}
742#endif 840#endif
743 841
@@ -771,13 +869,13 @@ struct sockaddr_hci {
771struct hci_filter { 869struct hci_filter {
772 unsigned long type_mask; 870 unsigned long type_mask;
773 unsigned long event_mask[2]; 871 unsigned long event_mask[2];
774 __le16 opcode; 872 __le16 opcode;
775}; 873};
776 874
777struct hci_ufilter { 875struct hci_ufilter {
778 __u32 type_mask; 876 __u32 type_mask;
779 __u32 event_mask[2]; 877 __u32 event_mask[2];
780 __le16 opcode; 878 __le16 opcode;
781}; 879};
782 880
783#define HCI_FLT_TYPE_BITS 31 881#define HCI_FLT_TYPE_BITS 31
@@ -825,15 +923,15 @@ struct hci_dev_info {
825struct hci_conn_info { 923struct hci_conn_info {
826 __u16 handle; 924 __u16 handle;
827 bdaddr_t bdaddr; 925 bdaddr_t bdaddr;
828 __u8 type; 926 __u8 type;
829 __u8 out; 927 __u8 out;
830 __u16 state; 928 __u16 state;
831 __u32 link_mode; 929 __u32 link_mode;
832}; 930};
833 931
834struct hci_dev_req { 932struct hci_dev_req {
835 __u16 dev_id; 933 __u16 dev_id;
836 __u32 dev_opt; 934 __u32 dev_opt;
837}; 935};
838 936
839struct hci_dev_list_req { 937struct hci_dev_list_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f67c8a7169b..ea13baa3851b 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -71,7 +71,10 @@ struct hci_dev {
71 __u16 id; 71 __u16 id;
72 __u8 type; 72 __u8 type;
73 bdaddr_t bdaddr; 73 bdaddr_t bdaddr;
74 __u8 dev_name[248];
75 __u8 dev_class[3];
74 __u8 features[8]; 76 __u8 features[8];
77 __u8 commands[64];
75 __u8 hci_ver; 78 __u8 hci_ver;
76 __u16 hci_rev; 79 __u16 hci_rev;
77 __u16 manufacturer; 80 __u16 manufacturer;
@@ -310,10 +313,12 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
310void hci_acl_connect(struct hci_conn *conn); 313void hci_acl_connect(struct hci_conn *conn);
311void hci_acl_disconn(struct hci_conn *conn, __u8 reason); 314void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
312void hci_add_sco(struct hci_conn *conn, __u16 handle); 315void hci_add_sco(struct hci_conn *conn, __u16 handle);
316void hci_setup_sync(struct hci_conn *conn, __u16 handle);
313 317
314struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
315int hci_conn_del(struct hci_conn *conn); 319int hci_conn_del(struct hci_conn *conn);
316void hci_conn_hash_flush(struct hci_dev *hdev); 320void hci_conn_hash_flush(struct hci_dev *hdev);
321void hci_conn_check_pending(struct hci_dev *hdev);
317 322
318struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); 323struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
319int hci_conn_auth(struct hci_conn *conn); 324int hci_conn_auth(struct hci_conn *conn);
@@ -617,11 +622,11 @@ int hci_unregister_cb(struct hci_cb *hcb);
617int hci_register_notifier(struct notifier_block *nb); 622int hci_register_notifier(struct notifier_block *nb);
618int hci_unregister_notifier(struct notifier_block *nb); 623int hci_unregister_notifier(struct notifier_block *nb);
619 624
620int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param); 625int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
621int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); 626int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
622int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 627int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
623 628
624void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf); 629void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
625 630
626void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); 631void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
627 632
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 70e70f5d3dd6..73e115bc12dd 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -29,7 +29,8 @@
29#define L2CAP_DEFAULT_MTU 672 29#define L2CAP_DEFAULT_MTU 672
30#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF 30#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
31 31
32#define L2CAP_CONN_TIMEOUT (HZ * 40) 32#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
33#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
33 34
34/* L2CAP socket address */ 35/* L2CAP socket address */
35struct sockaddr_l2 { 36struct sockaddr_l2 {
@@ -148,6 +149,19 @@ struct l2cap_conf_opt {
148 149
149#define L2CAP_CONF_MAX_SIZE 22 150#define L2CAP_CONF_MAX_SIZE 22
150 151
152struct l2cap_conf_rfc {
153 __u8 mode;
154 __u8 txwin_size;
155 __u8 max_transmit;
156 __le16 retrans_timeout;
157 __le16 monitor_timeout;
158 __le16 max_pdu_size;
159} __attribute__ ((packed));
160
161#define L2CAP_MODE_BASIC 0x00
162#define L2CAP_MODE_RETRANS 0x01
163#define L2CAP_MODE_FLOWCTL 0x02
164
151struct l2cap_disconn_req { 165struct l2cap_disconn_req {
152 __le16 dcid; 166 __le16 dcid;
153 __le16 scid; 167 __le16 scid;
@@ -160,7 +174,6 @@ struct l2cap_disconn_rsp {
160 174
161struct l2cap_info_req { 175struct l2cap_info_req {
162 __le16 type; 176 __le16 type;
163 __u8 data[0];
164} __attribute__ ((packed)); 177} __attribute__ ((packed));
165 178
166struct l2cap_info_rsp { 179struct l2cap_info_rsp {
@@ -192,6 +205,13 @@ struct l2cap_conn {
192 205
193 unsigned int mtu; 206 unsigned int mtu;
194 207
208 __u32 feat_mask;
209
210 __u8 info_state;
211 __u8 info_ident;
212
213 struct timer_list info_timer;
214
195 spinlock_t lock; 215 spinlock_t lock;
196 216
197 struct sk_buff *rx_skb; 217 struct sk_buff *rx_skb;
@@ -202,6 +222,9 @@ struct l2cap_conn {
202 struct l2cap_chan_list chan_list; 222 struct l2cap_chan_list chan_list;
203}; 223};
204 224
225#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
226#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x02
227
205/* ----- L2CAP channel and socket info ----- */ 228/* ----- L2CAP channel and socket info ----- */
206#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) 229#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
207 230
@@ -221,7 +244,6 @@ struct l2cap_pinfo {
221 __u8 conf_len; 244 __u8 conf_len;
222 __u8 conf_state; 245 __u8 conf_state;
223 __u8 conf_retry; 246 __u8 conf_retry;
224 __u16 conf_mtu;
225 247
226 __u8 ident; 248 __u8 ident;
227 249
@@ -232,10 +254,11 @@ struct l2cap_pinfo {
232 struct sock *prev_c; 254 struct sock *prev_c;
233}; 255};
234 256
235#define L2CAP_CONF_REQ_SENT 0x01 257#define L2CAP_CONF_REQ_SENT 0x01
236#define L2CAP_CONF_INPUT_DONE 0x02 258#define L2CAP_CONF_INPUT_DONE 0x02
237#define L2CAP_CONF_OUTPUT_DONE 0x04 259#define L2CAP_CONF_OUTPUT_DONE 0x04
238#define L2CAP_CONF_MAX_RETRIES 2 260
261#define L2CAP_CONF_MAX_RETRIES 2
239 262
240void l2cap_load(void); 263void l2cap_load(void);
241 264
diff --git a/include/sound/version.h b/include/sound/version.h
index 8d4a8dd89237..a2be8ad8894b 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
1/* include/version.h. Generated by alsa/ksync script. */ 1/* include/version.h. Generated by alsa/ksync script. */
2#define CONFIG_SND_VERSION "1.0.15" 2#define CONFIG_SND_VERSION "1.0.15"
3#define CONFIG_SND_DATE " (Tue Oct 16 14:57:44 2007 UTC)" 3#define CONFIG_SND_DATE " (Tue Oct 23 06:09:18 2007 UTC)"
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 80ecab0942ef..bce9ecdb7712 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1615,7 +1615,7 @@ static void audit_copy_inode(struct audit_names *name, const struct inode *inode
1615/** 1615/**
1616 * audit_inode - store the inode and device from a lookup 1616 * audit_inode - store the inode and device from a lookup
1617 * @name: name being audited 1617 * @name: name being audited
1618 * @inode: inode being audited 1618 * @dentry: dentry being audited
1619 * 1619 *
1620 * Called from fs/namei.c:path_lookup(). 1620 * Called from fs/namei.c:path_lookup().
1621 */ 1621 */
@@ -1650,7 +1650,7 @@ void __audit_inode(const char *name, const struct dentry *dentry)
1650/** 1650/**
1651 * audit_inode_child - collect inode info for created/removed objects 1651 * audit_inode_child - collect inode info for created/removed objects
1652 * @dname: inode's dentry name 1652 * @dname: inode's dentry name
1653 * @inode: inode being audited 1653 * @dentry: dentry being audited
1654 * @parent: inode of dentry parent 1654 * @parent: inode of dentry parent
1655 * 1655 *
1656 * For syscalls that create or remove filesystem objects, audit_inode 1656 * For syscalls that create or remove filesystem objects, audit_inode
diff --git a/kernel/sched.c b/kernel/sched.c
index 7581e331b139..2810e562a991 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3375,7 +3375,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3375 3375
3376 if (p->flags & PF_VCPU) { 3376 if (p->flags & PF_VCPU) {
3377 account_guest_time(p, cputime); 3377 account_guest_time(p, cputime);
3378 p->flags &= ~PF_VCPU;
3379 return; 3378 return;
3380 } 3379 }
3381 3380
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 3c9ef5a7d575..ed6fe51df77a 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -731,7 +731,7 @@ static struct trans_ctl_table trans_net_table[] = {
731 { NET_UNIX, "unix", trans_net_unix_table }, 731 { NET_UNIX, "unix", trans_net_unix_table },
732 { NET_IPV4, "ipv4", trans_net_ipv4_table }, 732 { NET_IPV4, "ipv4", trans_net_ipv4_table },
733 { NET_IPX, "ipx", trans_net_ipx_table }, 733 { NET_IPX, "ipx", trans_net_ipx_table },
734 { NET_ATALK, "atalk", trans_net_atalk_table }, 734 { NET_ATALK, "appletalk", trans_net_atalk_table },
735 { NET_NETROM, "netrom", trans_net_netrom_table }, 735 { NET_NETROM, "netrom", trans_net_netrom_table },
736 { NET_AX25, "ax25", trans_net_ax25_table }, 736 { NET_AX25, "ax25", trans_net_ax25_table },
737 { NET_BRIDGE, "bridge", trans_net_bridge_table }, 737 { NET_BRIDGE, "bridge", trans_net_bridge_table },
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c567f219191d..1faa5087dc86 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -389,6 +389,16 @@ config DEBUG_LIST
389 389
390 If unsure, say N. 390 If unsure, say N.
391 391
392config DEBUG_SG
393 bool "Debug SG table operations"
394 depends on DEBUG_KERNEL
395 help
396 Enable this to turn on checks on scatter-gather tables. This can
397 help find problems with drivers that do not properly initialize
398 their sg tables.
399
400 If unsure, say N.
401
392config FRAME_POINTER 402config FRAME_POINTER
393 bool "Compile the kernel with frame pointers" 403 bool "Compile the kernel with frame pointers"
394 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH || BFIN) 404 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH || BFIN)
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index a58df56f09b6..0ec3f257ffdf 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -39,8 +39,7 @@
39 39
40 /* Check length parameter for validity */ 40 /* Check length parameter for validity */
41 pad = nn - nroots - len; 41 pad = nn - nroots - len;
42 if (pad < 0 || pad >= nn) 42 BUG_ON(pad < 0 || pad >= nn);
43 return -ERANGE;
44 43
45 /* Does the caller provide the syndrome ? */ 44 /* Does the caller provide the syndrome ? */
46 if (s != NULL) 45 if (s != NULL)
@@ -203,7 +202,7 @@
203 * deg(lambda) unequal to number of roots => uncorrectable 202 * deg(lambda) unequal to number of roots => uncorrectable
204 * error detected 203 * error detected
205 */ 204 */
206 count = -1; 205 count = -EBADMSG;
207 goto finish; 206 goto finish;
208 } 207 }
209 /* 208 /*
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 5b0d8522b7ca..3ea2db94d5b0 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -320,6 +320,7 @@ EXPORT_SYMBOL_GPL(encode_rs8);
320 * The syndrome and parity uses a uint16_t data type to enable 320 * The syndrome and parity uses a uint16_t data type to enable
321 * symbol size > 8. The calling code must take care of decoding of the 321 * symbol size > 8. The calling code must take care of decoding of the
322 * syndrome result and the received parity before calling this code. 322 * syndrome result and the received parity before calling this code.
323 * Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
323 */ 324 */
324int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, 325int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
325 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 326 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
@@ -363,6 +364,7 @@ EXPORT_SYMBOL_GPL(encode_rs16);
363 * @corr: buffer to store correction bitmask on eras_pos 364 * @corr: buffer to store correction bitmask on eras_pos
364 * 365 *
365 * Each field in the data array contains up to symbol size bits of valid data. 366 * Each field in the data array contains up to symbol size bits of valid data.
367 * Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
366 */ 368 */
367int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, 369int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
368 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 370 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 752fd95323f3..1a8050ade861 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -35,7 +35,7 @@
35#define OFFSET(val,align) ((unsigned long) \ 35#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 36 ( (val) & ( (align) - 1)))
37 37
38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 38#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
39#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) 39#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
40 40
41/* 41/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1833879f8438..3a47871a29d9 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -187,7 +187,24 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
187 unsigned long onlined_pages = 0; 187 unsigned long onlined_pages = 0;
188 struct zone *zone; 188 struct zone *zone;
189 int need_zonelists_rebuild = 0; 189 int need_zonelists_rebuild = 0;
190 int nid;
191 int ret;
192 struct memory_notify arg;
193
194 arg.start_pfn = pfn;
195 arg.nr_pages = nr_pages;
196 arg.status_change_nid = -1;
197
198 nid = page_to_nid(pfn_to_page(pfn));
199 if (node_present_pages(nid) == 0)
200 arg.status_change_nid = nid;
190 201
202 ret = memory_notify(MEM_GOING_ONLINE, &arg);
203 ret = notifier_to_errno(ret);
204 if (ret) {
205 memory_notify(MEM_CANCEL_ONLINE, &arg);
206 return ret;
207 }
191 /* 208 /*
192 * This doesn't need a lock to do pfn_to_page(). 209 * This doesn't need a lock to do pfn_to_page().
193 * The section can't be removed here because of the 210 * The section can't be removed here because of the
@@ -222,6 +239,10 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
222 build_all_zonelists(); 239 build_all_zonelists();
223 vm_total_pages = nr_free_pagecache_pages(); 240 vm_total_pages = nr_free_pagecache_pages();
224 writeback_set_ratelimit(); 241 writeback_set_ratelimit();
242
243 if (onlined_pages)
244 memory_notify(MEM_ONLINE, &arg);
245
225 return 0; 246 return 0;
226} 247}
227#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 248#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
@@ -467,8 +488,9 @@ int offline_pages(unsigned long start_pfn,
467{ 488{
468 unsigned long pfn, nr_pages, expire; 489 unsigned long pfn, nr_pages, expire;
469 long offlined_pages; 490 long offlined_pages;
470 int ret, drain, retry_max; 491 int ret, drain, retry_max, node;
471 struct zone *zone; 492 struct zone *zone;
493 struct memory_notify arg;
472 494
473 BUG_ON(start_pfn >= end_pfn); 495 BUG_ON(start_pfn >= end_pfn);
474 /* at least, alignment against pageblock is necessary */ 496 /* at least, alignment against pageblock is necessary */
@@ -480,11 +502,27 @@ int offline_pages(unsigned long start_pfn,
480 we assume this for now. .*/ 502 we assume this for now. .*/
481 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 503 if (!test_pages_in_a_zone(start_pfn, end_pfn))
482 return -EINVAL; 504 return -EINVAL;
505
506 zone = page_zone(pfn_to_page(start_pfn));
507 node = zone_to_nid(zone);
508 nr_pages = end_pfn - start_pfn;
509
483 /* set above range as isolated */ 510 /* set above range as isolated */
484 ret = start_isolate_page_range(start_pfn, end_pfn); 511 ret = start_isolate_page_range(start_pfn, end_pfn);
485 if (ret) 512 if (ret)
486 return ret; 513 return ret;
487 nr_pages = end_pfn - start_pfn; 514
515 arg.start_pfn = start_pfn;
516 arg.nr_pages = nr_pages;
517 arg.status_change_nid = -1;
518 if (nr_pages >= node_present_pages(node))
519 arg.status_change_nid = node;
520
521 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
522 ret = notifier_to_errno(ret);
523 if (ret)
524 goto failed_removal;
525
488 pfn = start_pfn; 526 pfn = start_pfn;
489 expire = jiffies + timeout; 527 expire = jiffies + timeout;
490 drain = 0; 528 drain = 0;
@@ -539,20 +577,24 @@ repeat:
539 /* reset pagetype flags */ 577 /* reset pagetype flags */
540 start_isolate_page_range(start_pfn, end_pfn); 578 start_isolate_page_range(start_pfn, end_pfn);
541 /* removal success */ 579 /* removal success */
542 zone = page_zone(pfn_to_page(start_pfn));
543 zone->present_pages -= offlined_pages; 580 zone->present_pages -= offlined_pages;
544 zone->zone_pgdat->node_present_pages -= offlined_pages; 581 zone->zone_pgdat->node_present_pages -= offlined_pages;
545 totalram_pages -= offlined_pages; 582 totalram_pages -= offlined_pages;
546 num_physpages -= offlined_pages; 583 num_physpages -= offlined_pages;
584
547 vm_total_pages = nr_free_pagecache_pages(); 585 vm_total_pages = nr_free_pagecache_pages();
548 writeback_set_ratelimit(); 586 writeback_set_ratelimit();
587
588 memory_notify(MEM_OFFLINE, &arg);
549 return 0; 589 return 0;
550 590
551failed_removal: 591failed_removal:
552 printk(KERN_INFO "memory offlining %lx to %lx failed\n", 592 printk(KERN_INFO "memory offlining %lx to %lx failed\n",
553 start_pfn, end_pfn); 593 start_pfn, end_pfn);
594 memory_notify(MEM_CANCEL_OFFLINE, &arg);
554 /* pushback to free area */ 595 /* pushback to free area */
555 undo_isolate_page_range(start_pfn, end_pfn); 596 undo_isolate_page_range(start_pfn, end_pfn);
597
556 return ret; 598 return ret;
557} 599}
558#else 600#else
diff --git a/mm/mmap.c b/mm/mmap.c
index 7a30c4988231..facc1a75bd4f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1171,8 +1171,7 @@ munmap_back:
1171 vm_flags = vma->vm_flags; 1171 vm_flags = vma->vm_flags;
1172 1172
1173 if (vma_wants_writenotify(vma)) 1173 if (vma_wants_writenotify(vma))
1174 vma->vm_page_prot = 1174 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1175 protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
1176 1175
1177 if (!file || !vma_merge(mm, prev, addr, vma->vm_end, 1176 if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
1178 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1177 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 55227845abbe..4de546899dc1 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -194,7 +194,7 @@ success:
194 vma->vm_flags = newflags; 194 vma->vm_flags = newflags;
195 vma->vm_page_prot = vm_get_page_prot(newflags); 195 vma->vm_page_prot = vm_get_page_prot(newflags);
196 if (vma_wants_writenotify(vma)) { 196 if (vma_wants_writenotify(vma)) {
197 vma->vm_page_prot = vm_get_page_prot(newflags); 197 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
198 dirty_accountable = 1; 198 dirty_accountable = 1;
199 } 199 }
200 200
diff --git a/mm/shmem.c b/mm/shmem.c
index 289dbb0a6fd6..404e53bb2127 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2020,33 +2020,25 @@ static int shmem_match(struct inode *ino, void *vfh)
2020 return ino->i_ino == inum && fh[0] == ino->i_generation; 2020 return ino->i_ino == inum && fh[0] == ino->i_generation;
2021} 2021}
2022 2022
2023static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh) 2023static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2024 struct fid *fid, int fh_len, int fh_type)
2024{ 2025{
2025 struct dentry *de = NULL;
2026 struct inode *inode; 2026 struct inode *inode;
2027 __u32 *fh = vfh; 2027 struct dentry *dentry = NULL;
2028 __u64 inum = fh[2]; 2028 u64 inum = fid->raw[2];
2029 inum = (inum << 32) | fh[1]; 2029 inum = (inum << 32) | fid->raw[1];
2030
2031 if (fh_len < 3)
2032 return NULL;
2030 2033
2031 inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh); 2034 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2035 shmem_match, fid->raw);
2032 if (inode) { 2036 if (inode) {
2033 de = d_find_alias(inode); 2037 dentry = d_find_alias(inode);
2034 iput(inode); 2038 iput(inode);
2035 } 2039 }
2036 2040
2037 return de? de: ERR_PTR(-ESTALE); 2041 return dentry;
2038}
2039
2040static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
2041 int len, int type,
2042 int (*acceptable)(void *context, struct dentry *de),
2043 void *context)
2044{
2045 if (len < 3)
2046 return ERR_PTR(-ESTALE);
2047
2048 return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
2049 context);
2050} 2042}
2051 2043
2052static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2044static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
@@ -2079,11 +2071,10 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2079 return 1; 2071 return 1;
2080} 2072}
2081 2073
2082static struct export_operations shmem_export_ops = { 2074static const struct export_operations shmem_export_ops = {
2083 .get_parent = shmem_get_parent, 2075 .get_parent = shmem_get_parent,
2084 .get_dentry = shmem_get_dentry,
2085 .encode_fh = shmem_encode_fh, 2076 .encode_fh = shmem_encode_fh,
2086 .decode_fh = shmem_decode_fh, 2077 .fh_to_dentry = shmem_fh_to_dentry,
2087}; 2078};
2088 2079
2089static int shmem_parse_options(char *options, int *mode, uid_t *uid, 2080static int shmem_parse_options(char *options, int *mode, uid_t *uid,
diff --git a/mm/slub.c b/mm/slub.c
index e29a42988c78..aac1dd3c657d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
20#include <linux/mempolicy.h> 20#include <linux/mempolicy.h>
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/memory.h>
23 24
24/* 25/*
25 * Lock order: 26 * Lock order:
@@ -2694,6 +2695,121 @@ int kmem_cache_shrink(struct kmem_cache *s)
2694} 2695}
2695EXPORT_SYMBOL(kmem_cache_shrink); 2696EXPORT_SYMBOL(kmem_cache_shrink);
2696 2697
2698#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2699static int slab_mem_going_offline_callback(void *arg)
2700{
2701 struct kmem_cache *s;
2702
2703 down_read(&slub_lock);
2704 list_for_each_entry(s, &slab_caches, list)
2705 kmem_cache_shrink(s);
2706 up_read(&slub_lock);
2707
2708 return 0;
2709}
2710
2711static void slab_mem_offline_callback(void *arg)
2712{
2713 struct kmem_cache_node *n;
2714 struct kmem_cache *s;
2715 struct memory_notify *marg = arg;
2716 int offline_node;
2717
2718 offline_node = marg->status_change_nid;
2719
2720 /*
2721 * If the node still has available memory. we need kmem_cache_node
2722 * for it yet.
2723 */
2724 if (offline_node < 0)
2725 return;
2726
2727 down_read(&slub_lock);
2728 list_for_each_entry(s, &slab_caches, list) {
2729 n = get_node(s, offline_node);
2730 if (n) {
2731 /*
2732 * if n->nr_slabs > 0, slabs still exist on the node
2733 * that is going down. We were unable to free them,
2734 * and offline_pages() function shoudn't call this
2735 * callback. So, we must fail.
2736 */
2737 BUG_ON(atomic_read(&n->nr_slabs));
2738
2739 s->node[offline_node] = NULL;
2740 kmem_cache_free(kmalloc_caches, n);
2741 }
2742 }
2743 up_read(&slub_lock);
2744}
2745
2746static int slab_mem_going_online_callback(void *arg)
2747{
2748 struct kmem_cache_node *n;
2749 struct kmem_cache *s;
2750 struct memory_notify *marg = arg;
2751 int nid = marg->status_change_nid;
2752 int ret = 0;
2753
2754 /*
2755 * If the node's memory is already available, then kmem_cache_node is
2756 * already created. Nothing to do.
2757 */
2758 if (nid < 0)
2759 return 0;
2760
2761 /*
2762 * We are bringing a node online. No memory is availabe yet. We must
2763 * allocate a kmem_cache_node structure in order to bring the node
2764 * online.
2765 */
2766 down_read(&slub_lock);
2767 list_for_each_entry(s, &slab_caches, list) {
2768 /*
2769 * XXX: kmem_cache_alloc_node will fallback to other nodes
2770 * since memory is not yet available from the node that
2771 * is brought up.
2772 */
2773 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2774 if (!n) {
2775 ret = -ENOMEM;
2776 goto out;
2777 }
2778 init_kmem_cache_node(n);
2779 s->node[nid] = n;
2780 }
2781out:
2782 up_read(&slub_lock);
2783 return ret;
2784}
2785
2786static int slab_memory_callback(struct notifier_block *self,
2787 unsigned long action, void *arg)
2788{
2789 int ret = 0;
2790
2791 switch (action) {
2792 case MEM_GOING_ONLINE:
2793 ret = slab_mem_going_online_callback(arg);
2794 break;
2795 case MEM_GOING_OFFLINE:
2796 ret = slab_mem_going_offline_callback(arg);
2797 break;
2798 case MEM_OFFLINE:
2799 case MEM_CANCEL_ONLINE:
2800 slab_mem_offline_callback(arg);
2801 break;
2802 case MEM_ONLINE:
2803 case MEM_CANCEL_OFFLINE:
2804 break;
2805 }
2806
2807 ret = notifier_from_errno(ret);
2808 return ret;
2809}
2810
2811#endif /* CONFIG_MEMORY_HOTPLUG */
2812
2697/******************************************************************** 2813/********************************************************************
2698 * Basic setup of slabs 2814 * Basic setup of slabs
2699 *******************************************************************/ 2815 *******************************************************************/
@@ -2715,6 +2831,8 @@ void __init kmem_cache_init(void)
2715 sizeof(struct kmem_cache_node), GFP_KERNEL); 2831 sizeof(struct kmem_cache_node), GFP_KERNEL);
2716 kmalloc_caches[0].refcount = -1; 2832 kmalloc_caches[0].refcount = -1;
2717 caches++; 2833 caches++;
2834
2835 hotplug_memory_notifier(slab_memory_callback, 1);
2718#endif 2836#endif
2719 2837
2720 /* Able to allocate the per node structures */ 2838 /* Able to allocate the per node structures */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5fdfc9a67d39..9483320f6dad 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -78,11 +78,11 @@ void hci_acl_connect(struct hci_conn *conn)
78 78
79 cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); 79 cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
80 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) 80 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
81 cp.role_switch = 0x01; 81 cp.role_switch = 0x01;
82 else 82 else
83 cp.role_switch = 0x00; 83 cp.role_switch = 0x00;
84 84
85 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp); 85 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
86} 86}
87 87
88static void hci_acl_connect_cancel(struct hci_conn *conn) 88static void hci_acl_connect_cancel(struct hci_conn *conn)
@@ -95,8 +95,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
95 return; 95 return;
96 96
97 bacpy(&cp.bdaddr, &conn->dst); 97 bacpy(&cp.bdaddr, &conn->dst);
98 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 98 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
99 OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp);
100} 99}
101 100
102void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 101void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
@@ -109,8 +108,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
109 108
110 cp.handle = cpu_to_le16(conn->handle); 109 cp.handle = cpu_to_le16(conn->handle);
111 cp.reason = reason; 110 cp.reason = reason;
112 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 111 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
113 OCF_DISCONNECT, sizeof(cp), &cp);
114} 112}
115 113
116void hci_add_sco(struct hci_conn *conn, __u16 handle) 114void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -126,7 +124,29 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
126 cp.handle = cpu_to_le16(handle); 124 cp.handle = cpu_to_le16(handle);
127 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 125 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
128 126
129 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp); 127 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
128}
129
130void hci_setup_sync(struct hci_conn *conn, __u16 handle)
131{
132 struct hci_dev *hdev = conn->hdev;
133 struct hci_cp_setup_sync_conn cp;
134
135 BT_DBG("%p", conn);
136
137 conn->state = BT_CONNECT;
138 conn->out = 1;
139
140 cp.handle = cpu_to_le16(handle);
141 cp.pkt_type = cpu_to_le16(hdev->esco_type);
142
143 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
144 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
145 cp.max_latency = cpu_to_le16(0xffff);
146 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
147 cp.retrans_effort = 0xff;
148
149 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
130} 150}
131 151
132static void hci_conn_timeout(unsigned long arg) 152static void hci_conn_timeout(unsigned long arg)
@@ -143,7 +163,10 @@ static void hci_conn_timeout(unsigned long arg)
143 163
144 switch (conn->state) { 164 switch (conn->state) {
145 case BT_CONNECT: 165 case BT_CONNECT:
146 hci_acl_connect_cancel(conn); 166 if (conn->type == ACL_LINK)
167 hci_acl_connect_cancel(conn);
168 else
169 hci_acl_disconn(conn, 0x13);
147 break; 170 break;
148 case BT_CONNECTED: 171 case BT_CONNECTED:
149 hci_acl_disconn(conn, 0x13); 172 hci_acl_disconn(conn, 0x13);
@@ -330,8 +353,12 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
330 hci_conn_hold(sco); 353 hci_conn_hold(sco);
331 354
332 if (acl->state == BT_CONNECTED && 355 if (acl->state == BT_CONNECTED &&
333 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) 356 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
334 hci_add_sco(sco, acl->handle); 357 if (lmp_esco_capable(hdev))
358 hci_setup_sync(sco, acl->handle);
359 else
360 hci_add_sco(sco, acl->handle);
361 }
335 362
336 return sco; 363 return sco;
337} 364}
@@ -348,7 +375,7 @@ int hci_conn_auth(struct hci_conn *conn)
348 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 375 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
349 struct hci_cp_auth_requested cp; 376 struct hci_cp_auth_requested cp;
350 cp.handle = cpu_to_le16(conn->handle); 377 cp.handle = cpu_to_le16(conn->handle);
351 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp); 378 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
352 } 379 }
353 return 0; 380 return 0;
354} 381}
@@ -369,7 +396,7 @@ int hci_conn_encrypt(struct hci_conn *conn)
369 struct hci_cp_set_conn_encrypt cp; 396 struct hci_cp_set_conn_encrypt cp;
370 cp.handle = cpu_to_le16(conn->handle); 397 cp.handle = cpu_to_le16(conn->handle);
371 cp.encrypt = 1; 398 cp.encrypt = 1;
372 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); 399 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp);
373 } 400 }
374 return 0; 401 return 0;
375} 402}
@@ -383,7 +410,7 @@ int hci_conn_change_link_key(struct hci_conn *conn)
383 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 410 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
384 struct hci_cp_change_conn_link_key cp; 411 struct hci_cp_change_conn_link_key cp;
385 cp.handle = cpu_to_le16(conn->handle); 412 cp.handle = cpu_to_le16(conn->handle);
386 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); 413 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp);
387 } 414 }
388 return 0; 415 return 0;
389} 416}
@@ -401,7 +428,7 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
401 struct hci_cp_switch_role cp; 428 struct hci_cp_switch_role cp;
402 bacpy(&cp.bdaddr, &conn->dst); 429 bacpy(&cp.bdaddr, &conn->dst);
403 cp.role = role; 430 cp.role = role;
404 hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp); 431 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
405 } 432 }
406 return 0; 433 return 0;
407} 434}
@@ -423,8 +450,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
423 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 450 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
424 struct hci_cp_exit_sniff_mode cp; 451 struct hci_cp_exit_sniff_mode cp;
425 cp.handle = cpu_to_le16(conn->handle); 452 cp.handle = cpu_to_le16(conn->handle);
426 hci_send_cmd(hdev, OGF_LINK_POLICY, 453 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
427 OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp);
428 } 454 }
429 455
430timer: 456timer:
@@ -455,8 +481,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn)
455 cp.max_latency = cpu_to_le16(0); 481 cp.max_latency = cpu_to_le16(0);
456 cp.min_remote_timeout = cpu_to_le16(0); 482 cp.min_remote_timeout = cpu_to_le16(0);
457 cp.min_local_timeout = cpu_to_le16(0); 483 cp.min_local_timeout = cpu_to_le16(0);
458 hci_send_cmd(hdev, OGF_LINK_POLICY, 484 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
459 OCF_SNIFF_SUBRATE, sizeof(cp), &cp);
460 } 485 }
461 486
462 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 487 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
@@ -466,8 +491,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn)
466 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 491 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
467 cp.attempt = cpu_to_le16(4); 492 cp.attempt = cpu_to_le16(4);
468 cp.timeout = cpu_to_le16(1); 493 cp.timeout = cpu_to_le16(1);
469 hci_send_cmd(hdev, OGF_LINK_POLICY, 494 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
470 OCF_SNIFF_MODE, sizeof(cp), &cp);
471 } 495 }
472} 496}
473 497
@@ -493,6 +517,22 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
493 } 517 }
494} 518}
495 519
520/* Check pending connect attempts */
521void hci_conn_check_pending(struct hci_dev *hdev)
522{
523 struct hci_conn *conn;
524
525 BT_DBG("hdev %s", hdev->name);
526
527 hci_dev_lock(hdev);
528
529 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
530 if (conn)
531 hci_acl_connect(conn);
532
533 hci_dev_unlock(hdev);
534}
535
496int hci_get_conn_list(void __user *arg) 536int hci_get_conn_list(void __user *arg)
497{ 537{
498 struct hci_conn_list_req req, *cl; 538 struct hci_conn_list_req req, *cl;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 18e3afc964df..372b0d3b75a8 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -176,7 +176,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt); 176 BT_DBG("%s %ld", hdev->name, opt);
177 177
178 /* Reset device */ 178 /* Reset device */
179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180} 180}
181 181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -202,16 +202,16 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
202 202
203 /* Reset */ 203 /* Reset */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) 204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 206
207 /* Read Local Supported Features */ 207 /* Read Local Supported Features */
208 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); 208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 209
210 /* Read Local Version */ 210 /* Read Local Version */
211 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL); 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 212
213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); 214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 215
216#if 0 216#if 0
217 /* Host buffer size */ 217 /* Host buffer size */
@@ -221,29 +221,35 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
221 cp.sco_mtu = HCI_MAX_SCO_SIZE; 221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 cp.acl_max_pkt = cpu_to_le16(0xffff); 222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff); 223 cp.sco_max_pkt = cpu_to_le16(0xffff);
224 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp); 224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225 } 225 }
226#endif 226#endif
227 227
228 /* Read BD Address */ 228 /* Read BD Address */
229 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); 229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230
231 /* Read Class of Device */
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233
234 /* Read Local Name */
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
230 236
231 /* Read Voice Setting */ 237 /* Read Voice Setting */
232 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); 238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
233 239
234 /* Optional initialization */ 240 /* Optional initialization */
235 241
236 /* Clear Event Filters */ 242 /* Clear Event Filters */
237 flt_type = HCI_FLT_CLEAR_ALL; 243 flt_type = HCI_FLT_CLEAR_ALL;
238 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type); 244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
239 245
240 /* Page timeout ~20 secs */ 246 /* Page timeout ~20 secs */
241 param = cpu_to_le16(0x8000); 247 param = cpu_to_le16(0x8000);
242 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param); 248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
243 249
244 /* Connection accept timeout ~20 secs */ 250 /* Connection accept timeout ~20 secs */
245 param = cpu_to_le16(0x7d00); 251 param = cpu_to_le16(0x7d00);
246 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param); 252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
247} 253}
248 254
249static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 255static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -253,7 +259,7 @@ static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
253 BT_DBG("%s %x", hdev->name, scan); 259 BT_DBG("%s %x", hdev->name, scan);
254 260
255 /* Inquiry and Page scans */ 261 /* Inquiry and Page scans */
256 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); 262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
257} 263}
258 264
259static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 265static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
@@ -263,7 +269,7 @@ static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
263 BT_DBG("%s %x", hdev->name, auth); 269 BT_DBG("%s %x", hdev->name, auth);
264 270
265 /* Authentication */ 271 /* Authentication */
266 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); 272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
267} 273}
268 274
269static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 275static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
@@ -273,7 +279,7 @@ static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
273 BT_DBG("%s %x", hdev->name, encrypt); 279 BT_DBG("%s %x", hdev->name, encrypt);
274 280
275 /* Authentication */ 281 /* Authentication */
276 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); 282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
277} 283}
278 284
279/* Get HCI device by index. 285/* Get HCI device by index.
@@ -384,7 +390,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
384 memcpy(&cp.lap, &ir->lap, 3); 390 memcpy(&cp.lap, &ir->lap, 3);
385 cp.length = ir->length; 391 cp.length = ir->length;
386 cp.num_rsp = ir->num_rsp; 392 cp.num_rsp = ir->num_rsp;
387 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); 393 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
388} 394}
389 395
390int hci_inquiry(void __user *arg) 396int hci_inquiry(void __user *arg)
@@ -1111,13 +1117,13 @@ static int hci_send_frame(struct sk_buff *skb)
1111} 1117}
1112 1118
1113/* Send HCI command */ 1119/* Send HCI command */
1114int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) 1120int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1115{ 1121{
1116 int len = HCI_COMMAND_HDR_SIZE + plen; 1122 int len = HCI_COMMAND_HDR_SIZE + plen;
1117 struct hci_command_hdr *hdr; 1123 struct hci_command_hdr *hdr;
1118 struct sk_buff *skb; 1124 struct sk_buff *skb;
1119 1125
1120 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); 1126 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1121 1127
1122 skb = bt_skb_alloc(len, GFP_ATOMIC); 1128 skb = bt_skb_alloc(len, GFP_ATOMIC);
1123 if (!skb) { 1129 if (!skb) {
@@ -1126,7 +1132,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1126 } 1132 }
1127 1133
1128 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 1134 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1129 hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf)); 1135 hdr->opcode = cpu_to_le16(opcode);
1130 hdr->plen = plen; 1136 hdr->plen = plen;
1131 1137
1132 if (plen) 1138 if (plen)
@@ -1143,7 +1149,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1143} 1149}
1144 1150
1145/* Get data from the previously sent command */ 1151/* Get data from the previously sent command */
1146void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) 1152void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1147{ 1153{
1148 struct hci_command_hdr *hdr; 1154 struct hci_command_hdr *hdr;
1149 1155
@@ -1152,10 +1158,10 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1152 1158
1153 hdr = (void *) hdev->sent_cmd->data; 1159 hdr = (void *) hdev->sent_cmd->data;
1154 1160
1155 if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf))) 1161 if (hdr->opcode != cpu_to_le16(opcode))
1156 return NULL; 1162 return NULL;
1157 1163
1158 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); 1164 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1159 1165
1160 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 1166 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1161} 1167}
@@ -1355,6 +1361,26 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
1355 } 1361 }
1356} 1362}
1357 1363
1364static inline void hci_sched_esco(struct hci_dev *hdev)
1365{
1366 struct hci_conn *conn;
1367 struct sk_buff *skb;
1368 int quote;
1369
1370 BT_DBG("%s", hdev->name);
1371
1372 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374 BT_DBG("skb %p len %d", skb, skb->len);
1375 hci_send_frame(skb);
1376
1377 conn->sent++;
1378 if (conn->sent == ~0)
1379 conn->sent = 0;
1380 }
1381 }
1382}
1383
1358static void hci_tx_task(unsigned long arg) 1384static void hci_tx_task(unsigned long arg)
1359{ 1385{
1360 struct hci_dev *hdev = (struct hci_dev *) arg; 1386 struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1370,6 +1396,8 @@ static void hci_tx_task(unsigned long arg)
1370 1396
1371 hci_sched_sco(hdev); 1397 hci_sched_sco(hdev);
1372 1398
1399 hci_sched_esco(hdev);
1400
1373 /* Send next queued raw (unknown type) packet */ 1401 /* Send next queued raw (unknown type) packet */
1374 while ((skb = skb_dequeue(&hdev->raw_q))) 1402 while ((skb = skb_dequeue(&hdev->raw_q)))
1375 hci_send_frame(skb); 1403 hci_send_frame(skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4baea1e38652..46df2e403df8 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -52,234 +52,273 @@
52 52
53/* Handle HCI Event packets */ 53/* Handle HCI Event packets */
54 54
55/* Command Complete OGF LINK_CTL */ 55static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
57{ 56{
58 __u8 status; 57 __u8 status = *((__u8 *) skb->data);
59 struct hci_conn *pend;
60 58
61 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 59 BT_DBG("%s status 0x%x", hdev->name, status);
62 60
63 switch (ocf) { 61 if (status)
64 case OCF_INQUIRY_CANCEL: 62 return;
65 case OCF_EXIT_PERIODIC_INQ:
66 status = *((__u8 *) skb->data);
67 63
68 if (status) { 64 clear_bit(HCI_INQUIRY, &hdev->flags);
69 BT_DBG("%s Inquiry cancel error: status 0x%x", hdev->name, status);
70 } else {
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 hci_req_complete(hdev, status);
73 }
74 65
75 hci_dev_lock(hdev); 66 hci_req_complete(hdev, status);
76 67
77 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 68 hci_conn_check_pending(hdev);
78 if (pend) 69}
79 hci_acl_connect(pend);
80 70
81 hci_dev_unlock(hdev); 71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73 __u8 status = *((__u8 *) skb->data);
82 74
83 break; 75 BT_DBG("%s status 0x%x", hdev->name, status);
84 76
85 default: 77 if (status)
86 BT_DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf); 78 return;
87 break; 79
80 clear_bit(HCI_INQUIRY, &hdev->flags);
81
82 hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86{
87 BT_DBG("%s", hdev->name);
88}
89
90static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91{
92 struct hci_rp_role_discovery *rp = (void *) skb->data;
93 struct hci_conn *conn;
94
95 BT_DBG("%s status 0x%x", hdev->name, rp->status);
96
97 if (rp->status)
98 return;
99
100 hci_dev_lock(hdev);
101
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) {
104 if (rp->role)
105 conn->link_mode &= ~HCI_LM_MASTER;
106 else
107 conn->link_mode |= HCI_LM_MASTER;
88 } 108 }
109
110 hci_dev_unlock(hdev);
89} 111}
90 112
91/* Command Complete OGF LINK_POLICY */ 113static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
92static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
93{ 114{
115 struct hci_rp_write_link_policy *rp = (void *) skb->data;
94 struct hci_conn *conn; 116 struct hci_conn *conn;
95 struct hci_rp_role_discovery *rd;
96 struct hci_rp_write_link_policy *lp;
97 void *sent; 117 void *sent;
98 118
99 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 119 BT_DBG("%s status 0x%x", hdev->name, rp->status);
100 120
101 switch (ocf) { 121 if (rp->status)
102 case OCF_ROLE_DISCOVERY: 122 return;
103 rd = (void *) skb->data;
104 123
105 if (rd->status) 124 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
106 break; 125 if (!sent)
126 return;
107 127
108 hci_dev_lock(hdev); 128 hci_dev_lock(hdev);
109 129
110 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rd->handle)); 130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
111 if (conn) { 131 if (conn) {
112 if (rd->role) 132 __le16 policy = get_unaligned((__le16 *) (sent + 2));
113 conn->link_mode &= ~HCI_LM_MASTER; 133 conn->link_policy = __le16_to_cpu(policy);
114 else 134 }
115 conn->link_mode |= HCI_LM_MASTER;
116 }
117 135
118 hci_dev_unlock(hdev); 136 hci_dev_unlock(hdev);
119 break; 137}
120 138
121 case OCF_WRITE_LINK_POLICY: 139static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
122 sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY); 140{
123 if (!sent) 141 __u8 status = *((__u8 *) skb->data);
124 break;
125 142
126 lp = (struct hci_rp_write_link_policy *) skb->data; 143 BT_DBG("%s status 0x%x", hdev->name, status);
127 144
128 if (lp->status) 145 hci_req_complete(hdev, status);
129 break; 146}
130 147
131 hci_dev_lock(hdev); 148static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
149{
150 __u8 status = *((__u8 *) skb->data);
151 void *sent;
132 152
133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle)); 153 BT_DBG("%s status 0x%x", hdev->name, status);
134 if (conn) {
135 __le16 policy = get_unaligned((__le16 *) (sent + 2));
136 conn->link_policy = __le16_to_cpu(policy);
137 }
138 154
139 hci_dev_unlock(hdev); 155 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
140 break; 156 if (!sent)
157 return;
141 158
142 default: 159 if (!status)
143 BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", 160 memcpy(hdev->dev_name, sent, 248);
144 hdev->name, ocf); 161}
145 break; 162
163static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
164{
165 struct hci_rp_read_local_name *rp = (void *) skb->data;
166
167 BT_DBG("%s status 0x%x", hdev->name, rp->status);
168
169 if (rp->status)
170 return;
171
172 memcpy(hdev->dev_name, rp->name, 248);
173}
174
175static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
176{
177 __u8 status = *((__u8 *) skb->data);
178 void *sent;
179
180 BT_DBG("%s status 0x%x", hdev->name, status);
181
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
183 if (!sent)
184 return;
185
186 if (!status) {
187 __u8 param = *((__u8 *) sent);
188
189 if (param == AUTH_ENABLED)
190 set_bit(HCI_AUTH, &hdev->flags);
191 else
192 clear_bit(HCI_AUTH, &hdev->flags);
146 } 193 }
194
195 hci_req_complete(hdev, status);
147} 196}
148 197
149/* Command Complete OGF HOST_CTL */ 198static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
150static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
151{ 199{
152 __u8 status, param; 200 __u8 status = *((__u8 *) skb->data);
153 __u16 setting;
154 struct hci_rp_read_voice_setting *vs;
155 void *sent; 201 void *sent;
156 202
157 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 203 BT_DBG("%s status 0x%x", hdev->name, status);
158 204
159 switch (ocf) { 205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
160 case OCF_RESET: 206 if (!sent)
161 status = *((__u8 *) skb->data); 207 return;
162 hci_req_complete(hdev, status);
163 break;
164 208
165 case OCF_SET_EVENT_FLT: 209 if (!status) {
166 status = *((__u8 *) skb->data); 210 __u8 param = *((__u8 *) sent);
167 if (status) {
168 BT_DBG("%s SET_EVENT_FLT failed %d", hdev->name, status);
169 } else {
170 BT_DBG("%s SET_EVENT_FLT succeseful", hdev->name);
171 }
172 break;
173 211
174 case OCF_WRITE_AUTH_ENABLE: 212 if (param)
175 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE); 213 set_bit(HCI_ENCRYPT, &hdev->flags);
176 if (!sent) 214 else
177 break; 215 clear_bit(HCI_ENCRYPT, &hdev->flags);
216 }
178 217
179 status = *((__u8 *) skb->data); 218 hci_req_complete(hdev, status);
180 param = *((__u8 *) sent); 219}
181 220
182 if (!status) { 221static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
183 if (param == AUTH_ENABLED) 222{
184 set_bit(HCI_AUTH, &hdev->flags); 223 __u8 status = *((__u8 *) skb->data);
185 else 224 void *sent;
186 clear_bit(HCI_AUTH, &hdev->flags);
187 }
188 hci_req_complete(hdev, status);
189 break;
190 225
191 case OCF_WRITE_ENCRYPT_MODE: 226 BT_DBG("%s status 0x%x", hdev->name, status);
192 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE);
193 if (!sent)
194 break;
195 227
196 status = *((__u8 *) skb->data); 228 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
197 param = *((__u8 *) sent); 229 if (!sent)
230 return;
198 231
199 if (!status) { 232 if (!status) {
200 if (param) 233 __u8 param = *((__u8 *) sent);
201 set_bit(HCI_ENCRYPT, &hdev->flags);
202 else
203 clear_bit(HCI_ENCRYPT, &hdev->flags);
204 }
205 hci_req_complete(hdev, status);
206 break;
207 234
208 case OCF_WRITE_CA_TIMEOUT: 235 clear_bit(HCI_PSCAN, &hdev->flags);
209 status = *((__u8 *) skb->data); 236 clear_bit(HCI_ISCAN, &hdev->flags);
210 if (status) {
211 BT_DBG("%s OCF_WRITE_CA_TIMEOUT failed %d", hdev->name, status);
212 } else {
213 BT_DBG("%s OCF_WRITE_CA_TIMEOUT succeseful", hdev->name);
214 }
215 break;
216 237
217 case OCF_WRITE_PG_TIMEOUT: 238 if (param & SCAN_INQUIRY)
218 status = *((__u8 *) skb->data); 239 set_bit(HCI_ISCAN, &hdev->flags);
219 if (status) {
220 BT_DBG("%s OCF_WRITE_PG_TIMEOUT failed %d", hdev->name, status);
221 } else {
222 BT_DBG("%s: OCF_WRITE_PG_TIMEOUT succeseful", hdev->name);
223 }
224 break;
225 240
226 case OCF_WRITE_SCAN_ENABLE: 241 if (param & SCAN_PAGE)
227 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE); 242 set_bit(HCI_PSCAN, &hdev->flags);
228 if (!sent) 243 }
229 break;
230 244
231 status = *((__u8 *) skb->data); 245 hci_req_complete(hdev, status);
232 param = *((__u8 *) sent); 246}
233 247
234 BT_DBG("param 0x%x", param); 248static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
249{
250 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
235 251
236 if (!status) { 252 BT_DBG("%s status 0x%x", hdev->name, rp->status);
237 clear_bit(HCI_PSCAN, &hdev->flags);
238 clear_bit(HCI_ISCAN, &hdev->flags);
239 if (param & SCAN_INQUIRY)
240 set_bit(HCI_ISCAN, &hdev->flags);
241 253
242 if (param & SCAN_PAGE) 254 if (rp->status)
243 set_bit(HCI_PSCAN, &hdev->flags); 255 return;
244 }
245 hci_req_complete(hdev, status);
246 break;
247 256
248 case OCF_READ_VOICE_SETTING: 257 memcpy(hdev->dev_class, rp->dev_class, 3);
249 vs = (struct hci_rp_read_voice_setting *) skb->data;
250 258
251 if (vs->status) { 259 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
252 BT_DBG("%s READ_VOICE_SETTING failed %d", hdev->name, vs->status); 260 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
253 break; 261}
254 }
255 262
256 setting = __le16_to_cpu(vs->voice_setting); 263static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
264{
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
257 267
258 if (hdev->voice_setting != setting ) { 268 BT_DBG("%s status 0x%x", hdev->name, status);
259 hdev->voice_setting = setting;
260 269
261 BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); 270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
271 if (!sent)
272 return;
262 273
263 if (hdev->notify) { 274 if (!status)
264 tasklet_disable(&hdev->tx_task); 275 memcpy(hdev->dev_class, sent, 3);
265 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 276}
266 tasklet_enable(&hdev->tx_task);
267 }
268 }
269 break;
270 277
271 case OCF_WRITE_VOICE_SETTING: 278static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
272 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_VOICE_SETTING); 279{
273 if (!sent) 280 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
274 break; 281 __u16 setting;
282
283 BT_DBG("%s status 0x%x", hdev->name, rp->status);
284
285 if (rp->status)
286 return;
287
288 setting = __le16_to_cpu(rp->voice_setting);
289
290 if (hdev->voice_setting == setting )
291 return;
292
293 hdev->voice_setting = setting;
275 294
276 status = *((__u8 *) skb->data); 295 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
277 setting = __le16_to_cpu(get_unaligned((__le16 *) sent));
278 296
279 if (!status && hdev->voice_setting != setting) { 297 if (hdev->notify) {
298 tasklet_disable(&hdev->tx_task);
299 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
300 tasklet_enable(&hdev->tx_task);
301 }
302}
303
304static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
305{
306 __u8 status = *((__u8 *) skb->data);
307 void *sent;
308
309 BT_DBG("%s status 0x%x", hdev->name, status);
310
311 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
312 if (!sent)
313 return;
314
315 if (!status) {
316 __u16 setting = __le16_to_cpu(get_unaligned((__le16 *) sent));
317
318 if (hdev->voice_setting != setting) {
280 hdev->voice_setting = setting; 319 hdev->voice_setting = setting;
281 320
282 BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); 321 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
283 322
284 if (hdev->notify) { 323 if (hdev->notify) {
285 tasklet_disable(&hdev->tx_task); 324 tasklet_disable(&hdev->tx_task);
@@ -287,143 +326,153 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
287 tasklet_enable(&hdev->tx_task); 326 tasklet_enable(&hdev->tx_task);
288 } 327 }
289 } 328 }
290 hci_req_complete(hdev, status);
291 break;
292
293 case OCF_HOST_BUFFER_SIZE:
294 status = *((__u8 *) skb->data);
295 if (status) {
296 BT_DBG("%s OCF_BUFFER_SIZE failed %d", hdev->name, status);
297 hci_req_complete(hdev, status);
298 }
299 break;
300
301 default:
302 BT_DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf);
303 break;
304 } 329 }
305} 330}
306 331
307/* Command Complete OGF INFO_PARAM */ 332static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
308static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
309{ 333{
310 struct hci_rp_read_loc_version *lv; 334 __u8 status = *((__u8 *) skb->data);
311 struct hci_rp_read_local_features *lf;
312 struct hci_rp_read_buffer_size *bs;
313 struct hci_rp_read_bd_addr *ba;
314 335
315 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 336 BT_DBG("%s status 0x%x", hdev->name, status);
316 337
317 switch (ocf) { 338 hci_req_complete(hdev, status);
318 case OCF_READ_LOCAL_VERSION: 339}
319 lv = (struct hci_rp_read_loc_version *) skb->data;
320 340
321 if (lv->status) { 341static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
322 BT_DBG("%s READ_LOCAL_VERSION failed %d", hdev->name, lf->status); 342{
323 break; 343 struct hci_rp_read_local_version *rp = (void *) skb->data;
324 }
325 344
326 hdev->hci_ver = lv->hci_ver; 345 BT_DBG("%s status 0x%x", hdev->name, rp->status);
327 hdev->hci_rev = btohs(lv->hci_rev);
328 hdev->manufacturer = btohs(lv->manufacturer);
329 346
330 BT_DBG("%s: manufacturer %d hci_ver %d hci_rev %d", hdev->name, 347 if (rp->status)
331 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); 348 return;
332 349
333 break; 350 hdev->hci_ver = rp->hci_ver;
351 hdev->hci_rev = btohs(rp->hci_rev);
352 hdev->manufacturer = btohs(rp->manufacturer);
334 353
335 case OCF_READ_LOCAL_FEATURES: 354 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
336 lf = (struct hci_rp_read_local_features *) skb->data; 355 hdev->manufacturer,
356 hdev->hci_ver, hdev->hci_rev);
357}
337 358
338 if (lf->status) { 359static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
339 BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); 360{
340 break; 361 struct hci_rp_read_local_commands *rp = (void *) skb->data;
341 }
342 362
343 memcpy(hdev->features, lf->features, sizeof(hdev->features)); 363 BT_DBG("%s status 0x%x", hdev->name, rp->status);
344 364
345 /* Adjust default settings according to features 365 if (rp->status)
346 * supported by device. */ 366 return;
347 if (hdev->features[0] & LMP_3SLOT)
348 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
349 367
350 if (hdev->features[0] & LMP_5SLOT) 368 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
351 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 369}
352 370
353 if (hdev->features[1] & LMP_HV2) { 371static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
354 hdev->pkt_type |= (HCI_HV2); 372{
355 hdev->esco_type |= (ESCO_HV2); 373 struct hci_rp_read_local_features *rp = (void *) skb->data;
356 }
357 374
358 if (hdev->features[1] & LMP_HV3) { 375 BT_DBG("%s status 0x%x", hdev->name, rp->status);
359 hdev->pkt_type |= (HCI_HV3);
360 hdev->esco_type |= (ESCO_HV3);
361 }
362 376
363 if (hdev->features[3] & LMP_ESCO) 377 if (rp->status)
364 hdev->esco_type |= (ESCO_EV3); 378 return;
365 379
366 if (hdev->features[4] & LMP_EV4) 380 memcpy(hdev->features, rp->features, 8);
367 hdev->esco_type |= (ESCO_EV4);
368 381
369 if (hdev->features[4] & LMP_EV5) 382 /* Adjust default settings according to features
370 hdev->esco_type |= (ESCO_EV5); 383 * supported by device. */
371 384
372 BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, 385 if (hdev->features[0] & LMP_3SLOT)
373 lf->features[0], lf->features[1], lf->features[2]); 386 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
374 387
375 break; 388 if (hdev->features[0] & LMP_5SLOT)
389 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
376 390
377 case OCF_READ_BUFFER_SIZE: 391 if (hdev->features[1] & LMP_HV2) {
378 bs = (struct hci_rp_read_buffer_size *) skb->data; 392 hdev->pkt_type |= (HCI_HV2);
393 hdev->esco_type |= (ESCO_HV2);
394 }
379 395
380 if (bs->status) { 396 if (hdev->features[1] & LMP_HV3) {
381 BT_DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status); 397 hdev->pkt_type |= (HCI_HV3);
382 hci_req_complete(hdev, bs->status); 398 hdev->esco_type |= (ESCO_HV3);
383 break; 399 }
384 }
385 400
386 hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); 401 if (hdev->features[3] & LMP_ESCO)
387 hdev->sco_mtu = bs->sco_mtu; 402 hdev->esco_type |= (ESCO_EV3);
388 hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt);
389 hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt);
390 403
391 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 404 if (hdev->features[4] & LMP_EV4)
392 hdev->sco_mtu = 64; 405 hdev->esco_type |= (ESCO_EV4);
393 hdev->sco_pkts = 8;
394 }
395 406
396 hdev->acl_cnt = hdev->acl_pkts; 407 if (hdev->features[4] & LMP_EV5)
397 hdev->sco_cnt = hdev->sco_pkts; 408 hdev->esco_type |= (ESCO_EV5);
398 409
399 BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, 410 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
400 hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); 411 hdev->features[0], hdev->features[1],
401 break; 412 hdev->features[2], hdev->features[3],
413 hdev->features[4], hdev->features[5],
414 hdev->features[6], hdev->features[7]);
415}
402 416
403 case OCF_READ_BD_ADDR: 417static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
404 ba = (struct hci_rp_read_bd_addr *) skb->data; 418{
419 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
405 420
406 if (!ba->status) { 421 BT_DBG("%s status 0x%x", hdev->name, rp->status);
407 bacpy(&hdev->bdaddr, &ba->bdaddr);
408 } else {
409 BT_DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status);
410 }
411 422
412 hci_req_complete(hdev, ba->status); 423 if (rp->status)
413 break; 424 return;
414 425
415 default: 426 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
416 BT_DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf); 427 hdev->sco_mtu = rp->sco_mtu;
417 break; 428 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
429 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
430
431 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
432 hdev->sco_mtu = 64;
433 hdev->sco_pkts = 8;
418 } 434 }
435
436 hdev->acl_cnt = hdev->acl_pkts;
437 hdev->sco_cnt = hdev->sco_pkts;
438
439 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
440 hdev->acl_mtu, hdev->acl_pkts,
441 hdev->sco_mtu, hdev->sco_pkts);
442}
443
444static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
445{
446 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
447
448 BT_DBG("%s status 0x%x", hdev->name, rp->status);
449
450 if (!rp->status)
451 bacpy(&hdev->bdaddr, &rp->bdaddr);
452
453 hci_req_complete(hdev, rp->status);
454}
455
456static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
457{
458 BT_DBG("%s status 0x%x", hdev->name, status);
459
460 if (status) {
461 hci_req_complete(hdev, status);
462
463 hci_conn_check_pending(hdev);
464 } else
465 set_bit(HCI_INQUIRY, &hdev->flags);
419} 466}
420 467
421/* Command Status OGF LINK_CTL */
422static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 468static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
423{ 469{
470 struct hci_cp_create_conn *cp;
424 struct hci_conn *conn; 471 struct hci_conn *conn;
425 struct hci_cp_create_conn *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN);
426 472
473 BT_DBG("%s status 0x%x", hdev->name, status);
474
475 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
427 if (!cp) 476 if (!cp)
428 return; 477 return;
429 478
@@ -431,8 +480,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
431 480
432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
433 482
434 BT_DBG("%s status 0x%x bdaddr %s conn %p", hdev->name, 483 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
435 status, batostr(&cp->bdaddr), conn);
436 484
437 if (status) { 485 if (status) {
438 if (conn && conn->state == BT_CONNECT) { 486 if (conn && conn->state == BT_CONNECT) {
@@ -457,234 +505,138 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
457 hci_dev_unlock(hdev); 505 hci_dev_unlock(hdev);
458} 506}
459 507
460static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status) 508static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
461{ 509{
462 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 510 struct hci_cp_add_sco *cp;
511 struct hci_conn *acl, *sco;
512 __u16 handle;
463 513
464 switch (ocf) { 514 BT_DBG("%s status 0x%x", hdev->name, status);
465 case OCF_CREATE_CONN:
466 hci_cs_create_conn(hdev, status);
467 break;
468
469 case OCF_ADD_SCO:
470 if (status) {
471 struct hci_conn *acl, *sco;
472 struct hci_cp_add_sco *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_ADD_SCO);
473 __u16 handle;
474
475 if (!cp)
476 break;
477 515
478 handle = __le16_to_cpu(cp->handle); 516 if (!status)
479 517 return;
480 BT_DBG("%s Add SCO error: handle %d status 0x%x", hdev->name, handle, status);
481 518
482 hci_dev_lock(hdev); 519 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
520 if (!cp)
521 return;
483 522
484 acl = hci_conn_hash_lookup_handle(hdev, handle); 523 handle = __le16_to_cpu(cp->handle);
485 if (acl && (sco = acl->link)) {
486 sco->state = BT_CLOSED;
487 524
488 hci_proto_connect_cfm(sco, status); 525 BT_DBG("%s handle %d", hdev->name, handle);
489 hci_conn_del(sco);
490 }
491 526
492 hci_dev_unlock(hdev); 527 hci_dev_lock(hdev);
493 }
494 break;
495 528
496 case OCF_INQUIRY: 529 acl = hci_conn_hash_lookup_handle(hdev, handle);
497 if (status) { 530 if (acl && (sco = acl->link)) {
498 BT_DBG("%s Inquiry error: status 0x%x", hdev->name, status); 531 sco->state = BT_CLOSED;
499 hci_req_complete(hdev, status);
500 } else {
501 set_bit(HCI_INQUIRY, &hdev->flags);
502 }
503 break;
504 532
505 default: 533 hci_proto_connect_cfm(sco, status);
506 BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d", 534 hci_conn_del(sco);
507 hdev->name, ocf, status);
508 break;
509 } 535 }
536
537 hci_dev_unlock(hdev);
510} 538}
511 539
512/* Command Status OGF LINK_POLICY */ 540static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
513static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status)
514{ 541{
515 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 542 BT_DBG("%s status 0x%x", hdev->name, status);
516 543}
517 switch (ocf) {
518 case OCF_SNIFF_MODE:
519 if (status) {
520 struct hci_conn *conn;
521 struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE);
522 544
523 if (!cp) 545static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
524 break; 546{
547 struct hci_cp_setup_sync_conn *cp;
548 struct hci_conn *acl, *sco;
549 __u16 handle;
525 550
526 hci_dev_lock(hdev); 551 BT_DBG("%s status 0x%x", hdev->name, status);
527 552
528 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 553 if (!status)
529 if (conn) { 554 return;
530 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
531 }
532
533 hci_dev_unlock(hdev);
534 }
535 break;
536 555
537 case OCF_EXIT_SNIFF_MODE: 556 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
538 if (status) { 557 if (!cp)
539 struct hci_conn *conn; 558 return;
540 struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE);
541 559
542 if (!cp) 560 handle = __le16_to_cpu(cp->handle);
543 break;
544 561
545 hci_dev_lock(hdev); 562 BT_DBG("%s handle %d", hdev->name, handle);
546 563
547 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 564 hci_dev_lock(hdev);
548 if (conn) {
549 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
550 }
551 565
552 hci_dev_unlock(hdev); 566 acl = hci_conn_hash_lookup_handle(hdev, handle);
553 } 567 if (acl && (sco = acl->link)) {
554 break; 568 sco->state = BT_CLOSED;
555 569
556 default: 570 hci_proto_connect_cfm(sco, status);
557 BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf); 571 hci_conn_del(sco);
558 break;
559 } 572 }
560}
561 573
562/* Command Status OGF HOST_CTL */ 574 hci_dev_unlock(hdev);
563static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
564{
565 BT_DBG("%s ocf 0x%x", hdev->name, ocf);
566
567 switch (ocf) {
568 default:
569 BT_DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf);
570 break;
571 }
572} 575}
573 576
574/* Command Status OGF INFO_PARAM */ 577static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
575static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status)
576{ 578{
577 BT_DBG("%s: hci_cs_info_param: ocf 0x%x", hdev->name, ocf); 579 struct hci_cp_sniff_mode *cp;
578 580 struct hci_conn *conn;
579 switch (ocf) {
580 default:
581 BT_DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf);
582 break;
583 }
584}
585 581
586/* Inquiry Complete */ 582 BT_DBG("%s status 0x%x", hdev->name, status);
587static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
588{
589 __u8 status = *((__u8 *) skb->data);
590 struct hci_conn *pend;
591 583
592 BT_DBG("%s status %d", hdev->name, status); 584 if (!status)
585 return;
593 586
594 clear_bit(HCI_INQUIRY, &hdev->flags); 587 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
595 hci_req_complete(hdev, status); 588 if (!cp)
589 return;
596 590
597 hci_dev_lock(hdev); 591 hci_dev_lock(hdev);
598 592
599 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 593 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
600 if (pend) 594 if (conn)
601 hci_acl_connect(pend); 595 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
602 596
603 hci_dev_unlock(hdev); 597 hci_dev_unlock(hdev);
604} 598}
605 599
606/* Inquiry Result */ 600static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
607static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
608{ 601{
609 struct inquiry_data data; 602 struct hci_cp_exit_sniff_mode *cp;
610 struct inquiry_info *info = (struct inquiry_info *) (skb->data + 1); 603 struct hci_conn *conn;
611 int num_rsp = *((__u8 *) skb->data);
612 604
613 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 605 BT_DBG("%s status 0x%x", hdev->name, status);
614 606
615 if (!num_rsp) 607 if (!status)
608 return;
609
610 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
611 if (!cp)
616 return; 612 return;
617 613
618 hci_dev_lock(hdev); 614 hci_dev_lock(hdev);
619 615
620 for (; num_rsp; num_rsp--) { 616 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
621 bacpy(&data.bdaddr, &info->bdaddr); 617 if (conn)
622 data.pscan_rep_mode = info->pscan_rep_mode; 618 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
623 data.pscan_period_mode = info->pscan_period_mode;
624 data.pscan_mode = info->pscan_mode;
625 memcpy(data.dev_class, info->dev_class, 3);
626 data.clock_offset = info->clock_offset;
627 data.rssi = 0x00;
628 info++;
629 hci_inquiry_cache_update(hdev, &data);
630 }
631 619
632 hci_dev_unlock(hdev); 620 hci_dev_unlock(hdev);
633} 621}
634 622
635/* Inquiry Result With RSSI */ 623static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
636static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
637{ 624{
638 struct inquiry_data data; 625 __u8 status = *((__u8 *) skb->data);
639 int num_rsp = *((__u8 *) skb->data);
640
641 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
642
643 if (!num_rsp)
644 return;
645
646 hci_dev_lock(hdev);
647 626
648 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 627 BT_DBG("%s status %d", hdev->name, status);
649 struct inquiry_info_with_rssi_and_pscan_mode *info =
650 (struct inquiry_info_with_rssi_and_pscan_mode *) (skb->data + 1);
651 628
652 for (; num_rsp; num_rsp--) { 629 clear_bit(HCI_INQUIRY, &hdev->flags);
653 bacpy(&data.bdaddr, &info->bdaddr);
654 data.pscan_rep_mode = info->pscan_rep_mode;
655 data.pscan_period_mode = info->pscan_period_mode;
656 data.pscan_mode = info->pscan_mode;
657 memcpy(data.dev_class, info->dev_class, 3);
658 data.clock_offset = info->clock_offset;
659 data.rssi = info->rssi;
660 info++;
661 hci_inquiry_cache_update(hdev, &data);
662 }
663 } else {
664 struct inquiry_info_with_rssi *info =
665 (struct inquiry_info_with_rssi *) (skb->data + 1);
666 630
667 for (; num_rsp; num_rsp--) { 631 hci_req_complete(hdev, status);
668 bacpy(&data.bdaddr, &info->bdaddr);
669 data.pscan_rep_mode = info->pscan_rep_mode;
670 data.pscan_period_mode = info->pscan_period_mode;
671 data.pscan_mode = 0x00;
672 memcpy(data.dev_class, info->dev_class, 3);
673 data.clock_offset = info->clock_offset;
674 data.rssi = info->rssi;
675 info++;
676 hci_inquiry_cache_update(hdev, &data);
677 }
678 }
679 632
680 hci_dev_unlock(hdev); 633 hci_conn_check_pending(hdev);
681} 634}
682 635
683/* Extended Inquiry Result */ 636static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
684static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
685{ 637{
686 struct inquiry_data data; 638 struct inquiry_data data;
687 struct extended_inquiry_info *info = (struct extended_inquiry_info *) (skb->data + 1); 639 struct inquiry_info *info = (void *) (skb->data + 1);
688 int num_rsp = *((__u8 *) skb->data); 640 int num_rsp = *((__u8 *) skb->data);
689 641
690 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 642 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
@@ -696,12 +648,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
696 648
697 for (; num_rsp; num_rsp--) { 649 for (; num_rsp; num_rsp--) {
698 bacpy(&data.bdaddr, &info->bdaddr); 650 bacpy(&data.bdaddr, &info->bdaddr);
699 data.pscan_rep_mode = info->pscan_rep_mode; 651 data.pscan_rep_mode = info->pscan_rep_mode;
700 data.pscan_period_mode = info->pscan_period_mode; 652 data.pscan_period_mode = info->pscan_period_mode;
701 data.pscan_mode = 0x00; 653 data.pscan_mode = info->pscan_mode;
702 memcpy(data.dev_class, info->dev_class, 3); 654 memcpy(data.dev_class, info->dev_class, 3);
703 data.clock_offset = info->clock_offset; 655 data.clock_offset = info->clock_offset;
704 data.rssi = info->rssi; 656 data.rssi = 0x00;
705 info++; 657 info++;
706 hci_inquiry_cache_update(hdev, &data); 658 hci_inquiry_cache_update(hdev, &data);
707 } 659 }
@@ -709,70 +661,18 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
709 hci_dev_unlock(hdev); 661 hci_dev_unlock(hdev);
710} 662}
711 663
712/* Connect Request */
713static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
714{
715 struct hci_ev_conn_request *ev = (struct hci_ev_conn_request *) skb->data;
716 int mask = hdev->link_mode;
717
718 BT_DBG("%s Connection request: %s type 0x%x", hdev->name,
719 batostr(&ev->bdaddr), ev->link_type);
720
721 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
722
723 if (mask & HCI_LM_ACCEPT) {
724 /* Connection accepted */
725 struct hci_conn *conn;
726 struct hci_cp_accept_conn_req cp;
727
728 hci_dev_lock(hdev);
729 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
730 if (!conn) {
731 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
732 BT_ERR("No memmory for new connection");
733 hci_dev_unlock(hdev);
734 return;
735 }
736 }
737 memcpy(conn->dev_class, ev->dev_class, 3);
738 conn->state = BT_CONNECT;
739 hci_dev_unlock(hdev);
740
741 bacpy(&cp.bdaddr, &ev->bdaddr);
742
743 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
744 cp.role = 0x00; /* Become master */
745 else
746 cp.role = 0x01; /* Remain slave */
747
748 hci_send_cmd(hdev, OGF_LINK_CTL,
749 OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp);
750 } else {
751 /* Connection rejected */
752 struct hci_cp_reject_conn_req cp;
753
754 bacpy(&cp.bdaddr, &ev->bdaddr);
755 cp.reason = 0x0f;
756 hci_send_cmd(hdev, OGF_LINK_CTL,
757 OCF_REJECT_CONN_REQ, sizeof(cp), &cp);
758 }
759}
760
761/* Connect Complete */
762static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 664static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
763{ 665{
764 struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; 666 struct hci_ev_conn_complete *ev = (void *) skb->data;
765 struct hci_conn *conn, *pend; 667 struct hci_conn *conn;
766 668
767 BT_DBG("%s", hdev->name); 669 BT_DBG("%s", hdev->name);
768 670
769 hci_dev_lock(hdev); 671 hci_dev_lock(hdev);
770 672
771 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 673 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
772 if (!conn) { 674 if (!conn)
773 hci_dev_unlock(hdev); 675 goto unlock;
774 return;
775 }
776 676
777 if (!ev->status) { 677 if (!ev->status) {
778 conn->handle = __le16_to_cpu(ev->handle); 678 conn->handle = __le16_to_cpu(ev->handle);
@@ -788,8 +688,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
788 if (conn->type == ACL_LINK) { 688 if (conn->type == ACL_LINK) {
789 struct hci_cp_read_remote_features cp; 689 struct hci_cp_read_remote_features cp;
790 cp.handle = ev->handle; 690 cp.handle = ev->handle;
791 hci_send_cmd(hdev, OGF_LINK_CTL, 691 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp);
792 OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp);
793 } 692 }
794 693
795 /* Set link policy */ 694 /* Set link policy */
@@ -797,8 +696,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
797 struct hci_cp_write_link_policy cp; 696 struct hci_cp_write_link_policy cp;
798 cp.handle = ev->handle; 697 cp.handle = ev->handle;
799 cp.policy = cpu_to_le16(hdev->link_policy); 698 cp.policy = cpu_to_le16(hdev->link_policy);
800 hci_send_cmd(hdev, OGF_LINK_POLICY, 699 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
801 OCF_WRITE_LINK_POLICY, sizeof(cp), &cp);
802 } 700 }
803 701
804 /* Set packet type for incoming connection */ 702 /* Set packet type for incoming connection */
@@ -809,8 +707,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
809 cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): 707 cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
810 cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 708 cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
811 709
812 hci_send_cmd(hdev, OGF_LINK_CTL, 710 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
813 OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
814 } else { 711 } else {
815 /* Update disconnect timer */ 712 /* Update disconnect timer */
816 hci_conn_hold(conn); 713 hci_conn_hold(conn);
@@ -822,9 +719,12 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
822 if (conn->type == ACL_LINK) { 719 if (conn->type == ACL_LINK) {
823 struct hci_conn *sco = conn->link; 720 struct hci_conn *sco = conn->link;
824 if (sco) { 721 if (sco) {
825 if (!ev->status) 722 if (!ev->status) {
826 hci_add_sco(sco, conn->handle); 723 if (lmp_esco_capable(hdev))
827 else { 724 hci_setup_sync(sco, conn->handle);
725 else
726 hci_add_sco(sco, conn->handle);
727 } else {
828 hci_proto_connect_cfm(sco, ev->status); 728 hci_proto_connect_cfm(sco, ev->status);
829 hci_conn_del(sco); 729 hci_conn_del(sco);
830 } 730 }
@@ -835,136 +735,104 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
835 if (ev->status) 735 if (ev->status)
836 hci_conn_del(conn); 736 hci_conn_del(conn);
837 737
838 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 738unlock:
839 if (pend)
840 hci_acl_connect(pend);
841
842 hci_dev_unlock(hdev); 739 hci_dev_unlock(hdev);
843}
844
845/* Disconnect Complete */
846static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
847{
848 struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data;
849 struct hci_conn *conn;
850
851 BT_DBG("%s status %d", hdev->name, ev->status);
852
853 if (ev->status)
854 return;
855 740
856 hci_dev_lock(hdev); 741 hci_conn_check_pending(hdev);
857
858 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
859 if (conn) {
860 conn->state = BT_CLOSED;
861 hci_proto_disconn_ind(conn, ev->reason);
862 hci_conn_del(conn);
863 }
864
865 hci_dev_unlock(hdev);
866} 742}
867 743
868/* Number of completed packets */ 744static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
869static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
870{ 745{
871 struct hci_ev_num_comp_pkts *ev = (struct hci_ev_num_comp_pkts *) skb->data; 746 struct hci_ev_conn_request *ev = (void *) skb->data;
872 __le16 *ptr; 747 int mask = hdev->link_mode;
873 int i;
874
875 skb_pull(skb, sizeof(*ev));
876
877 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
878 748
879 if (skb->len < ev->num_hndl * 4) { 749 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
880 BT_DBG("%s bad parameters", hdev->name); 750 batostr(&ev->bdaddr), ev->link_type);
881 return;
882 }
883 751
884 tasklet_disable(&hdev->tx_task); 752 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
885 753
886 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { 754 if (mask & HCI_LM_ACCEPT) {
755 /* Connection accepted */
887 struct hci_conn *conn; 756 struct hci_conn *conn;
888 __u16 handle, count;
889
890 handle = __le16_to_cpu(get_unaligned(ptr++));
891 count = __le16_to_cpu(get_unaligned(ptr++));
892 757
893 conn = hci_conn_hash_lookup_handle(hdev, handle); 758 hci_dev_lock(hdev);
894 if (conn) {
895 conn->sent -= count;
896 759
897 if (conn->type == ACL_LINK) { 760 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
898 if ((hdev->acl_cnt += count) > hdev->acl_pkts) 761 if (!conn) {
899 hdev->acl_cnt = hdev->acl_pkts; 762 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
900 } else { 763 BT_ERR("No memmory for new connection");
901 if ((hdev->sco_cnt += count) > hdev->sco_pkts) 764 hci_dev_unlock(hdev);
902 hdev->sco_cnt = hdev->sco_pkts; 765 return;
903 } 766 }
904 } 767 }
905 }
906 hci_sched_tx(hdev);
907 768
908 tasklet_enable(&hdev->tx_task); 769 memcpy(conn->dev_class, ev->dev_class, 3);
909} 770 conn->state = BT_CONNECT;
910 771
911/* Role Change */ 772 hci_dev_unlock(hdev);
912static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
913{
914 struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data;
915 struct hci_conn *conn;
916 773
917 BT_DBG("%s status %d", hdev->name, ev->status); 774 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
775 struct hci_cp_accept_conn_req cp;
918 776
919 hci_dev_lock(hdev); 777 bacpy(&cp.bdaddr, &ev->bdaddr);
920 778
921 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 779 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
922 if (conn) { 780 cp.role = 0x00; /* Become master */
923 if (!ev->status) {
924 if (ev->role)
925 conn->link_mode &= ~HCI_LM_MASTER;
926 else 781 else
927 conn->link_mode |= HCI_LM_MASTER; 782 cp.role = 0x01; /* Remain slave */
928 }
929 783
930 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 784 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
785 sizeof(cp), &cp);
786 } else {
787 struct hci_cp_accept_sync_conn_req cp;
931 788
932 hci_role_switch_cfm(conn, ev->status, ev->role); 789 bacpy(&cp.bdaddr, &ev->bdaddr);
933 } 790 cp.pkt_type = cpu_to_le16(hdev->esco_type);
934 791
935 hci_dev_unlock(hdev); 792 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
793 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
794 cp.max_latency = cpu_to_le16(0xffff);
795 cp.content_format = cpu_to_le16(hdev->voice_setting);
796 cp.retrans_effort = 0xff;
797
798 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
799 sizeof(cp), &cp);
800 }
801 } else {
802 /* Connection rejected */
803 struct hci_cp_reject_conn_req cp;
804
805 bacpy(&cp.bdaddr, &ev->bdaddr);
806 cp.reason = 0x0f;
807 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
808 }
936} 809}
937 810
938/* Mode Change */ 811static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
939static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
940{ 812{
941 struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data; 813 struct hci_ev_disconn_complete *ev = (void *) skb->data;
942 struct hci_conn *conn; 814 struct hci_conn *conn;
943 815
944 BT_DBG("%s status %d", hdev->name, ev->status); 816 BT_DBG("%s status %d", hdev->name, ev->status);
945 817
818 if (ev->status)
819 return;
820
946 hci_dev_lock(hdev); 821 hci_dev_lock(hdev);
947 822
948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 823 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
949 if (conn) { 824 if (conn) {
950 conn->mode = ev->mode; 825 conn->state = BT_CLOSED;
951 conn->interval = __le16_to_cpu(ev->interval); 826 hci_proto_disconn_ind(conn, ev->reason);
952 827 hci_conn_del(conn);
953 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
954 if (conn->mode == HCI_CM_ACTIVE)
955 conn->power_save = 1;
956 else
957 conn->power_save = 0;
958 }
959 } 828 }
960 829
961 hci_dev_unlock(hdev); 830 hci_dev_unlock(hdev);
962} 831}
963 832
964/* Authentication Complete */
965static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 833static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
966{ 834{
967 struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; 835 struct hci_ev_auth_complete *ev = (void *) skb->data;
968 struct hci_conn *conn; 836 struct hci_conn *conn;
969 837
970 BT_DBG("%s status %d", hdev->name, ev->status); 838 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -985,8 +853,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
985 struct hci_cp_set_conn_encrypt cp; 853 struct hci_cp_set_conn_encrypt cp;
986 cp.handle = cpu_to_le16(conn->handle); 854 cp.handle = cpu_to_le16(conn->handle);
987 cp.encrypt = 1; 855 cp.encrypt = 1;
988 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 856 hci_send_cmd(conn->hdev,
989 OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); 857 HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp);
990 } else { 858 } else {
991 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 859 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
992 hci_encrypt_cfm(conn, ev->status, 0x00); 860 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -997,10 +865,16 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
997 hci_dev_unlock(hdev); 865 hci_dev_unlock(hdev);
998} 866}
999 867
1000/* Encryption Change */ 868static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
869{
870 BT_DBG("%s", hdev->name);
871
872 hci_conn_check_pending(hdev);
873}
874
1001static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 875static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1002{ 876{
1003 struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; 877 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1004 struct hci_conn *conn; 878 struct hci_conn *conn;
1005 879
1006 BT_DBG("%s status %d", hdev->name, ev->status); 880 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1024,10 +898,9 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1024 hci_dev_unlock(hdev); 898 hci_dev_unlock(hdev);
1025} 899}
1026 900
1027/* Change Connection Link Key Complete */ 901static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1028static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1029{ 902{
1030 struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; 903 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1031 struct hci_conn *conn; 904 struct hci_conn *conn;
1032 905
1033 BT_DBG("%s status %d", hdev->name, ev->status); 906 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1047,25 +920,263 @@ static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, s
1047 hci_dev_unlock(hdev); 920 hci_dev_unlock(hdev);
1048} 921}
1049 922
1050/* Pin Code Request*/ 923static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1051static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1052{ 924{
925 struct hci_ev_remote_features *ev = (void *) skb->data;
926 struct hci_conn *conn;
927
928 BT_DBG("%s status %d", hdev->name, ev->status);
929
930 if (ev->status)
931 return;
932
933 hci_dev_lock(hdev);
934
935 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
936 if (conn)
937 memcpy(conn->features, ev->features, 8);
938
939 hci_dev_unlock(hdev);
1053} 940}
1054 941
1055/* Link Key Request */ 942static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1056static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1057{ 943{
944 BT_DBG("%s", hdev->name);
1058} 945}
1059 946
1060/* Link Key Notification */ 947static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1061static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1062{ 948{
949 BT_DBG("%s", hdev->name);
1063} 950}
1064 951
1065/* Remote Features */ 952static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1066static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1067{ 953{
1068 struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data; 954 struct hci_ev_cmd_complete *ev = (void *) skb->data;
955 __u16 opcode;
956
957 skb_pull(skb, sizeof(*ev));
958
959 opcode = __le16_to_cpu(ev->opcode);
960
961 switch (opcode) {
962 case HCI_OP_INQUIRY_CANCEL:
963 hci_cc_inquiry_cancel(hdev, skb);
964 break;
965
966 case HCI_OP_EXIT_PERIODIC_INQ:
967 hci_cc_exit_periodic_inq(hdev, skb);
968 break;
969
970 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
971 hci_cc_remote_name_req_cancel(hdev, skb);
972 break;
973
974 case HCI_OP_ROLE_DISCOVERY:
975 hci_cc_role_discovery(hdev, skb);
976 break;
977
978 case HCI_OP_WRITE_LINK_POLICY:
979 hci_cc_write_link_policy(hdev, skb);
980 break;
981
982 case HCI_OP_RESET:
983 hci_cc_reset(hdev, skb);
984 break;
985
986 case HCI_OP_WRITE_LOCAL_NAME:
987 hci_cc_write_local_name(hdev, skb);
988 break;
989
990 case HCI_OP_READ_LOCAL_NAME:
991 hci_cc_read_local_name(hdev, skb);
992 break;
993
994 case HCI_OP_WRITE_AUTH_ENABLE:
995 hci_cc_write_auth_enable(hdev, skb);
996 break;
997
998 case HCI_OP_WRITE_ENCRYPT_MODE:
999 hci_cc_write_encrypt_mode(hdev, skb);
1000 break;
1001
1002 case HCI_OP_WRITE_SCAN_ENABLE:
1003 hci_cc_write_scan_enable(hdev, skb);
1004 break;
1005
1006 case HCI_OP_READ_CLASS_OF_DEV:
1007 hci_cc_read_class_of_dev(hdev, skb);
1008 break;
1009
1010 case HCI_OP_WRITE_CLASS_OF_DEV:
1011 hci_cc_write_class_of_dev(hdev, skb);
1012 break;
1013
1014 case HCI_OP_READ_VOICE_SETTING:
1015 hci_cc_read_voice_setting(hdev, skb);
1016 break;
1017
1018 case HCI_OP_WRITE_VOICE_SETTING:
1019 hci_cc_write_voice_setting(hdev, skb);
1020 break;
1021
1022 case HCI_OP_HOST_BUFFER_SIZE:
1023 hci_cc_host_buffer_size(hdev, skb);
1024 break;
1025
1026 case HCI_OP_READ_LOCAL_VERSION:
1027 hci_cc_read_local_version(hdev, skb);
1028 break;
1029
1030 case HCI_OP_READ_LOCAL_COMMANDS:
1031 hci_cc_read_local_commands(hdev, skb);
1032 break;
1033
1034 case HCI_OP_READ_LOCAL_FEATURES:
1035 hci_cc_read_local_features(hdev, skb);
1036 break;
1037
1038 case HCI_OP_READ_BUFFER_SIZE:
1039 hci_cc_read_buffer_size(hdev, skb);
1040 break;
1041
1042 case HCI_OP_READ_BD_ADDR:
1043 hci_cc_read_bd_addr(hdev, skb);
1044 break;
1045
1046 default:
1047 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1048 break;
1049 }
1050
1051 if (ev->ncmd) {
1052 atomic_set(&hdev->cmd_cnt, 1);
1053 if (!skb_queue_empty(&hdev->cmd_q))
1054 hci_sched_cmd(hdev);
1055 }
1056}
1057
1058static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1059{
1060 struct hci_ev_cmd_status *ev = (void *) skb->data;
1061 __u16 opcode;
1062
1063 skb_pull(skb, sizeof(*ev));
1064
1065 opcode = __le16_to_cpu(ev->opcode);
1066
1067 switch (opcode) {
1068 case HCI_OP_INQUIRY:
1069 hci_cs_inquiry(hdev, ev->status);
1070 break;
1071
1072 case HCI_OP_CREATE_CONN:
1073 hci_cs_create_conn(hdev, ev->status);
1074 break;
1075
1076 case HCI_OP_ADD_SCO:
1077 hci_cs_add_sco(hdev, ev->status);
1078 break;
1079
1080 case HCI_OP_REMOTE_NAME_REQ:
1081 hci_cs_remote_name_req(hdev, ev->status);
1082 break;
1083
1084 case HCI_OP_SETUP_SYNC_CONN:
1085 hci_cs_setup_sync_conn(hdev, ev->status);
1086 break;
1087
1088 case HCI_OP_SNIFF_MODE:
1089 hci_cs_sniff_mode(hdev, ev->status);
1090 break;
1091
1092 case HCI_OP_EXIT_SNIFF_MODE:
1093 hci_cs_exit_sniff_mode(hdev, ev->status);
1094 break;
1095
1096 default:
1097 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1098 break;
1099 }
1100
1101 if (ev->ncmd) {
1102 atomic_set(&hdev->cmd_cnt, 1);
1103 if (!skb_queue_empty(&hdev->cmd_q))
1104 hci_sched_cmd(hdev);
1105 }
1106}
1107
1108static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1109{
1110 struct hci_ev_role_change *ev = (void *) skb->data;
1111 struct hci_conn *conn;
1112
1113 BT_DBG("%s status %d", hdev->name, ev->status);
1114
1115 hci_dev_lock(hdev);
1116
1117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1118 if (conn) {
1119 if (!ev->status) {
1120 if (ev->role)
1121 conn->link_mode &= ~HCI_LM_MASTER;
1122 else
1123 conn->link_mode |= HCI_LM_MASTER;
1124 }
1125
1126 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1127
1128 hci_role_switch_cfm(conn, ev->status, ev->role);
1129 }
1130
1131 hci_dev_unlock(hdev);
1132}
1133
1134static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1135{
1136 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1137 __le16 *ptr;
1138 int i;
1139
1140 skb_pull(skb, sizeof(*ev));
1141
1142 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1143
1144 if (skb->len < ev->num_hndl * 4) {
1145 BT_DBG("%s bad parameters", hdev->name);
1146 return;
1147 }
1148
1149 tasklet_disable(&hdev->tx_task);
1150
1151 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1152 struct hci_conn *conn;
1153 __u16 handle, count;
1154
1155 handle = __le16_to_cpu(get_unaligned(ptr++));
1156 count = __le16_to_cpu(get_unaligned(ptr++));
1157
1158 conn = hci_conn_hash_lookup_handle(hdev, handle);
1159 if (conn) {
1160 conn->sent -= count;
1161
1162 if (conn->type == ACL_LINK) {
1163 if ((hdev->acl_cnt += count) > hdev->acl_pkts)
1164 hdev->acl_cnt = hdev->acl_pkts;
1165 } else {
1166 if ((hdev->sco_cnt += count) > hdev->sco_pkts)
1167 hdev->sco_cnt = hdev->sco_pkts;
1168 }
1169 }
1170 }
1171
1172 hci_sched_tx(hdev);
1173
1174 tasklet_enable(&hdev->tx_task);
1175}
1176
1177static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1178{
1179 struct hci_ev_mode_change *ev = (void *) skb->data;
1069 struct hci_conn *conn; 1180 struct hci_conn *conn;
1070 1181
1071 BT_DBG("%s status %d", hdev->name, ev->status); 1182 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1073,17 +1184,39 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1073 hci_dev_lock(hdev); 1184 hci_dev_lock(hdev);
1074 1185
1075 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1186 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1076 if (conn && !ev->status) { 1187 if (conn) {
1077 memcpy(conn->features, ev->features, sizeof(conn->features)); 1188 conn->mode = ev->mode;
1189 conn->interval = __le16_to_cpu(ev->interval);
1190
1191 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
1192 if (conn->mode == HCI_CM_ACTIVE)
1193 conn->power_save = 1;
1194 else
1195 conn->power_save = 0;
1196 }
1078 } 1197 }
1079 1198
1080 hci_dev_unlock(hdev); 1199 hci_dev_unlock(hdev);
1081} 1200}
1082 1201
1083/* Clock Offset */ 1202static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1203{
1204 BT_DBG("%s", hdev->name);
1205}
1206
1207static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1208{
1209 BT_DBG("%s", hdev->name);
1210}
1211
1212static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1213{
1214 BT_DBG("%s", hdev->name);
1215}
1216
1084static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 1217static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
1085{ 1218{
1086 struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; 1219 struct hci_ev_clock_offset *ev = (void *) skb->data;
1087 struct hci_conn *conn; 1220 struct hci_conn *conn;
1088 1221
1089 BT_DBG("%s status %d", hdev->name, ev->status); 1222 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1103,10 +1236,9 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
1103 hci_dev_unlock(hdev); 1236 hci_dev_unlock(hdev);
1104} 1237}
1105 1238
1106/* Page Scan Repetition Mode */
1107static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 1239static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
1108{ 1240{
1109 struct hci_ev_pscan_rep_mode *ev = (struct hci_ev_pscan_rep_mode *) skb->data; 1241 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
1110 struct inquiry_entry *ie; 1242 struct inquiry_entry *ie;
1111 1243
1112 BT_DBG("%s", hdev->name); 1244 BT_DBG("%s", hdev->name);
@@ -1121,10 +1253,91 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
1121 hci_dev_unlock(hdev); 1253 hci_dev_unlock(hdev);
1122} 1254}
1123 1255
1124/* Sniff Subrate */ 1256static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
1257{
1258 struct inquiry_data data;
1259 int num_rsp = *((__u8 *) skb->data);
1260
1261 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1262
1263 if (!num_rsp)
1264 return;
1265
1266 hci_dev_lock(hdev);
1267
1268 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1269 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
1270
1271 for (; num_rsp; num_rsp--) {
1272 bacpy(&data.bdaddr, &info->bdaddr);
1273 data.pscan_rep_mode = info->pscan_rep_mode;
1274 data.pscan_period_mode = info->pscan_period_mode;
1275 data.pscan_mode = info->pscan_mode;
1276 memcpy(data.dev_class, info->dev_class, 3);
1277 data.clock_offset = info->clock_offset;
1278 data.rssi = info->rssi;
1279 info++;
1280 hci_inquiry_cache_update(hdev, &data);
1281 }
1282 } else {
1283 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
1284
1285 for (; num_rsp; num_rsp--) {
1286 bacpy(&data.bdaddr, &info->bdaddr);
1287 data.pscan_rep_mode = info->pscan_rep_mode;
1288 data.pscan_period_mode = info->pscan_period_mode;
1289 data.pscan_mode = 0x00;
1290 memcpy(data.dev_class, info->dev_class, 3);
1291 data.clock_offset = info->clock_offset;
1292 data.rssi = info->rssi;
1293 info++;
1294 hci_inquiry_cache_update(hdev, &data);
1295 }
1296 }
1297
1298 hci_dev_unlock(hdev);
1299}
1300
1301static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1302{
1303 BT_DBG("%s", hdev->name);
1304}
1305
1306static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1307{
1308 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
1309 struct hci_conn *conn;
1310
1311 BT_DBG("%s status %d", hdev->name, ev->status);
1312
1313 hci_dev_lock(hdev);
1314
1315 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1316 if (!conn)
1317 goto unlock;
1318
1319 if (!ev->status) {
1320 conn->handle = __le16_to_cpu(ev->handle);
1321 conn->state = BT_CONNECTED;
1322 } else
1323 conn->state = BT_CLOSED;
1324
1325 hci_proto_connect_cfm(conn, ev->status);
1326 if (ev->status)
1327 hci_conn_del(conn);
1328
1329unlock:
1330 hci_dev_unlock(hdev);
1331}
1332
1333static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
1334{
1335 BT_DBG("%s", hdev->name);
1336}
1337
1125static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 1338static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1126{ 1339{
1127 struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data; 1340 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
1128 struct hci_conn *conn; 1341 struct hci_conn *conn;
1129 1342
1130 BT_DBG("%s status %d", hdev->name, ev->status); 1343 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1138,22 +1351,42 @@ static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *s
1138 hci_dev_unlock(hdev); 1351 hci_dev_unlock(hdev);
1139} 1352}
1140 1353
1141void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 1354static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1142{ 1355{
1143 struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; 1356 struct inquiry_data data;
1144 struct hci_ev_cmd_complete *ec; 1357 struct extended_inquiry_info *info = (void *) (skb->data + 1);
1145 struct hci_ev_cmd_status *cs; 1358 int num_rsp = *((__u8 *) skb->data);
1146 u16 opcode, ocf, ogf;
1147 1359
1148 skb_pull(skb, HCI_EVENT_HDR_SIZE); 1360 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1149 1361
1150 BT_DBG("%s evt 0x%x", hdev->name, hdr->evt); 1362 if (!num_rsp)
1363 return;
1151 1364
1152 switch (hdr->evt) { 1365 hci_dev_lock(hdev);
1153 case HCI_EV_NUM_COMP_PKTS: 1366
1154 hci_num_comp_pkts_evt(hdev, skb); 1367 for (; num_rsp; num_rsp--) {
1155 break; 1368 bacpy(&data.bdaddr, &info->bdaddr);
1369 data.pscan_rep_mode = info->pscan_rep_mode;
1370 data.pscan_period_mode = info->pscan_period_mode;
1371 data.pscan_mode = 0x00;
1372 memcpy(data.dev_class, info->dev_class, 3);
1373 data.clock_offset = info->clock_offset;
1374 data.rssi = info->rssi;
1375 info++;
1376 hci_inquiry_cache_update(hdev, &data);
1377 }
1156 1378
1379 hci_dev_unlock(hdev);
1380}
1381
1382void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1383{
1384 struct hci_event_hdr *hdr = (void *) skb->data;
1385 __u8 event = hdr->evt;
1386
1387 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1388
1389 switch (event) {
1157 case HCI_EV_INQUIRY_COMPLETE: 1390 case HCI_EV_INQUIRY_COMPLETE:
1158 hci_inquiry_complete_evt(hdev, skb); 1391 hci_inquiry_complete_evt(hdev, skb);
1159 break; 1392 break;
@@ -1162,44 +1395,64 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1162 hci_inquiry_result_evt(hdev, skb); 1395 hci_inquiry_result_evt(hdev, skb);
1163 break; 1396 break;
1164 1397
1165 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 1398 case HCI_EV_CONN_COMPLETE:
1166 hci_inquiry_result_with_rssi_evt(hdev, skb); 1399 hci_conn_complete_evt(hdev, skb);
1167 break;
1168
1169 case HCI_EV_EXTENDED_INQUIRY_RESULT:
1170 hci_extended_inquiry_result_evt(hdev, skb);
1171 break; 1400 break;
1172 1401
1173 case HCI_EV_CONN_REQUEST: 1402 case HCI_EV_CONN_REQUEST:
1174 hci_conn_request_evt(hdev, skb); 1403 hci_conn_request_evt(hdev, skb);
1175 break; 1404 break;
1176 1405
1177 case HCI_EV_CONN_COMPLETE:
1178 hci_conn_complete_evt(hdev, skb);
1179 break;
1180
1181 case HCI_EV_DISCONN_COMPLETE: 1406 case HCI_EV_DISCONN_COMPLETE:
1182 hci_disconn_complete_evt(hdev, skb); 1407 hci_disconn_complete_evt(hdev, skb);
1183 break; 1408 break;
1184 1409
1185 case HCI_EV_ROLE_CHANGE:
1186 hci_role_change_evt(hdev, skb);
1187 break;
1188
1189 case HCI_EV_MODE_CHANGE:
1190 hci_mode_change_evt(hdev, skb);
1191 break;
1192
1193 case HCI_EV_AUTH_COMPLETE: 1410 case HCI_EV_AUTH_COMPLETE:
1194 hci_auth_complete_evt(hdev, skb); 1411 hci_auth_complete_evt(hdev, skb);
1195 break; 1412 break;
1196 1413
1414 case HCI_EV_REMOTE_NAME:
1415 hci_remote_name_evt(hdev, skb);
1416 break;
1417
1197 case HCI_EV_ENCRYPT_CHANGE: 1418 case HCI_EV_ENCRYPT_CHANGE:
1198 hci_encrypt_change_evt(hdev, skb); 1419 hci_encrypt_change_evt(hdev, skb);
1199 break; 1420 break;
1200 1421
1201 case HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE: 1422 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
1202 hci_change_conn_link_key_complete_evt(hdev, skb); 1423 hci_change_link_key_complete_evt(hdev, skb);
1424 break;
1425
1426 case HCI_EV_REMOTE_FEATURES:
1427 hci_remote_features_evt(hdev, skb);
1428 break;
1429
1430 case HCI_EV_REMOTE_VERSION:
1431 hci_remote_version_evt(hdev, skb);
1432 break;
1433
1434 case HCI_EV_QOS_SETUP_COMPLETE:
1435 hci_qos_setup_complete_evt(hdev, skb);
1436 break;
1437
1438 case HCI_EV_CMD_COMPLETE:
1439 hci_cmd_complete_evt(hdev, skb);
1440 break;
1441
1442 case HCI_EV_CMD_STATUS:
1443 hci_cmd_status_evt(hdev, skb);
1444 break;
1445
1446 case HCI_EV_ROLE_CHANGE:
1447 hci_role_change_evt(hdev, skb);
1448 break;
1449
1450 case HCI_EV_NUM_COMP_PKTS:
1451 hci_num_comp_pkts_evt(hdev, skb);
1452 break;
1453
1454 case HCI_EV_MODE_CHANGE:
1455 hci_mode_change_evt(hdev, skb);
1203 break; 1456 break;
1204 1457
1205 case HCI_EV_PIN_CODE_REQ: 1458 case HCI_EV_PIN_CODE_REQ:
@@ -1214,10 +1467,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1214 hci_link_key_notify_evt(hdev, skb); 1467 hci_link_key_notify_evt(hdev, skb);
1215 break; 1468 break;
1216 1469
1217 case HCI_EV_REMOTE_FEATURES:
1218 hci_remote_features_evt(hdev, skb);
1219 break;
1220
1221 case HCI_EV_CLOCK_OFFSET: 1470 case HCI_EV_CLOCK_OFFSET:
1222 hci_clock_offset_evt(hdev, skb); 1471 hci_clock_offset_evt(hdev, skb);
1223 break; 1472 break;
@@ -1226,82 +1475,32 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1226 hci_pscan_rep_mode_evt(hdev, skb); 1475 hci_pscan_rep_mode_evt(hdev, skb);
1227 break; 1476 break;
1228 1477
1229 case HCI_EV_SNIFF_SUBRATE: 1478 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
1230 hci_sniff_subrate_evt(hdev, skb); 1479 hci_inquiry_result_with_rssi_evt(hdev, skb);
1231 break; 1480 break;
1232 1481
1233 case HCI_EV_CMD_STATUS: 1482 case HCI_EV_REMOTE_EXT_FEATURES:
1234 cs = (struct hci_ev_cmd_status *) skb->data; 1483 hci_remote_ext_features_evt(hdev, skb);
1235 skb_pull(skb, sizeof(cs));
1236
1237 opcode = __le16_to_cpu(cs->opcode);
1238 ogf = hci_opcode_ogf(opcode);
1239 ocf = hci_opcode_ocf(opcode);
1240
1241 switch (ogf) {
1242 case OGF_INFO_PARAM:
1243 hci_cs_info_param(hdev, ocf, cs->status);
1244 break;
1245
1246 case OGF_HOST_CTL:
1247 hci_cs_host_ctl(hdev, ocf, cs->status);
1248 break;
1249
1250 case OGF_LINK_CTL:
1251 hci_cs_link_ctl(hdev, ocf, cs->status);
1252 break;
1253
1254 case OGF_LINK_POLICY:
1255 hci_cs_link_policy(hdev, ocf, cs->status);
1256 break;
1257
1258 default:
1259 BT_DBG("%s Command Status OGF %x", hdev->name, ogf);
1260 break;
1261 }
1262
1263 if (cs->ncmd) {
1264 atomic_set(&hdev->cmd_cnt, 1);
1265 if (!skb_queue_empty(&hdev->cmd_q))
1266 hci_sched_cmd(hdev);
1267 }
1268 break; 1484 break;
1269 1485
1270 case HCI_EV_CMD_COMPLETE: 1486 case HCI_EV_SYNC_CONN_COMPLETE:
1271 ec = (struct hci_ev_cmd_complete *) skb->data; 1487 hci_sync_conn_complete_evt(hdev, skb);
1272 skb_pull(skb, sizeof(*ec)); 1488 break;
1273
1274 opcode = __le16_to_cpu(ec->opcode);
1275 ogf = hci_opcode_ogf(opcode);
1276 ocf = hci_opcode_ocf(opcode);
1277
1278 switch (ogf) {
1279 case OGF_INFO_PARAM:
1280 hci_cc_info_param(hdev, ocf, skb);
1281 break;
1282
1283 case OGF_HOST_CTL:
1284 hci_cc_host_ctl(hdev, ocf, skb);
1285 break;
1286 1489
1287 case OGF_LINK_CTL: 1490 case HCI_EV_SYNC_CONN_CHANGED:
1288 hci_cc_link_ctl(hdev, ocf, skb); 1491 hci_sync_conn_changed_evt(hdev, skb);
1289 break; 1492 break;
1290 1493
1291 case OGF_LINK_POLICY: 1494 case HCI_EV_SNIFF_SUBRATE:
1292 hci_cc_link_policy(hdev, ocf, skb); 1495 hci_sniff_subrate_evt(hdev, skb);
1293 break; 1496 break;
1294 1497
1295 default: 1498 case HCI_EV_EXTENDED_INQUIRY_RESULT:
1296 BT_DBG("%s Command Completed OGF %x", hdev->name, ogf); 1499 hci_extended_inquiry_result_evt(hdev, skb);
1297 break; 1500 break;
1298 }
1299 1501
1300 if (ec->ncmd) { 1502 default:
1301 atomic_set(&hdev->cmd_cnt, 1); 1503 BT_DBG("%s event 0x%x", hdev->name, event);
1302 if (!skb_queue_empty(&hdev->cmd_q))
1303 hci_sched_cmd(hdev);
1304 }
1305 break; 1504 break;
1306 } 1505 }
1307 1506
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 43dd6373bff9..8825102c517c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -451,7 +451,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
451 goto drop; 451 goto drop;
452 } 452 }
453 453
454 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) { 454 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
455 skb_queue_tail(&hdev->raw_q, skb); 455 skb_queue_tail(&hdev->raw_q, skb);
456 hci_sched_tx(hdev); 456 hci_sched_tx(hdev);
457 } else { 457 } else {
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 25835403d659..cef1e3e1881c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -41,6 +41,26 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char
41 return sprintf(buf, "%s\n", typetostr(hdev->type)); 41 return sprintf(buf, "%s\n", typetostr(hdev->type));
42} 42}
43 43
44static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
45{
46 struct hci_dev *hdev = dev_get_drvdata(dev);
47 char name[249];
48 int i;
49
50 for (i = 0; i < 248; i++)
51 name[i] = hdev->dev_name[i];
52
53 name[248] = '\0';
54 return sprintf(buf, "%s\n", name);
55}
56
57static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
58{
59 struct hci_dev *hdev = dev_get_drvdata(dev);
60 return sprintf(buf, "0x%.2x%.2x%.2x\n",
61 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
62}
63
44static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 64static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
45{ 65{
46 struct hci_dev *hdev = dev_get_drvdata(dev); 66 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -49,6 +69,17 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr, c
49 return sprintf(buf, "%s\n", batostr(&bdaddr)); 69 return sprintf(buf, "%s\n", batostr(&bdaddr));
50} 70}
51 71
72static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
73{
74 struct hci_dev *hdev = dev_get_drvdata(dev);
75
76 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
77 hdev->features[0], hdev->features[1],
78 hdev->features[2], hdev->features[3],
79 hdev->features[4], hdev->features[5],
80 hdev->features[6], hdev->features[7]);
81}
82
52static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 83static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
53{ 84{
54 struct hci_dev *hdev = dev_get_drvdata(dev); 85 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -170,7 +201,10 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
170} 201}
171 202
172static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 203static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
204static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
205static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
173static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 206static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
207static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
174static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); 208static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
175static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); 209static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
176static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 210static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
@@ -185,7 +219,10 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
185 219
186static struct device_attribute *bt_attrs[] = { 220static struct device_attribute *bt_attrs[] = {
187 &dev_attr_type, 221 &dev_attr_type,
222 &dev_attr_name,
223 &dev_attr_class,
188 &dev_attr_address, 224 &dev_attr_address,
225 &dev_attr_features,
189 &dev_attr_manufacturer, 226 &dev_attr_manufacturer,
190 &dev_attr_hci_version, 227 &dev_attr_hci_version,
191 &dev_attr_hci_revision, 228 &dev_attr_hci_revision,
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 66c736953cfe..4bbacddeb49d 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -247,7 +247,7 @@ static inline int hidp_queue_report(struct hidp_session *session, unsigned char
247{ 247{
248 struct sk_buff *skb; 248 struct sk_buff *skb;
249 249
250 BT_DBG("session %p hid %p data %p size %d", session, device, data, size); 250 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
251 251
252 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 252 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
253 BT_ERR("Can't allocate memory for new frame"); 253 BT_ERR("Can't allocate memory for new frame");
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 36ef27b625db..6fbbae78b304 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -55,7 +55,9 @@
55#define BT_DBG(D...) 55#define BT_DBG(D...)
56#endif 56#endif
57 57
58#define VERSION "2.8" 58#define VERSION "2.9"
59
60static u32 l2cap_feat_mask = 0x0000;
59 61
60static const struct proto_ops l2cap_sock_ops; 62static const struct proto_ops l2cap_sock_ops;
61 63
@@ -258,7 +260,119 @@ static void l2cap_chan_del(struct sock *sk, int err)
258 sk->sk_state_change(sk); 260 sk->sk_state_change(sk);
259} 261}
260 262
263static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
264{
265 u8 id;
266
267 /* Get next available identificator.
268 * 1 - 128 are used by kernel.
269 * 129 - 199 are reserved.
270 * 200 - 254 are used by utilities like l2ping, etc.
271 */
272
273 spin_lock_bh(&conn->lock);
274
275 if (++conn->tx_ident > 128)
276 conn->tx_ident = 1;
277
278 id = conn->tx_ident;
279
280 spin_unlock_bh(&conn->lock);
281
282 return id;
283}
284
285static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
286{
287 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
288
289 BT_DBG("code 0x%2.2x", code);
290
291 if (!skb)
292 return -ENOMEM;
293
294 return hci_send_acl(conn->hcon, skb, 0);
295}
296
261/* ---- L2CAP connections ---- */ 297/* ---- L2CAP connections ---- */
298static void l2cap_conn_start(struct l2cap_conn *conn)
299{
300 struct l2cap_chan_list *l = &conn->chan_list;
301 struct sock *sk;
302
303 BT_DBG("conn %p", conn);
304
305 read_lock(&l->lock);
306
307 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
308 bh_lock_sock(sk);
309
310 if (sk->sk_type != SOCK_SEQPACKET) {
311 l2cap_sock_clear_timer(sk);
312 sk->sk_state = BT_CONNECTED;
313 sk->sk_state_change(sk);
314 } else if (sk->sk_state == BT_CONNECT) {
315 struct l2cap_conn_req req;
316 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
319 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
320 L2CAP_CONN_REQ, sizeof(req), &req);
321 }
322
323 bh_unlock_sock(sk);
324 }
325
326 read_unlock(&l->lock);
327}
328
329static void l2cap_conn_ready(struct l2cap_conn *conn)
330{
331 BT_DBG("conn %p", conn);
332
333 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
334 struct l2cap_info_req req;
335
336 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
337
338 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
339 conn->info_ident = l2cap_get_ident(conn);
340
341 mod_timer(&conn->info_timer,
342 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
343
344 l2cap_send_cmd(conn, conn->info_ident,
345 L2CAP_INFO_REQ, sizeof(req), &req);
346 }
347}
348
349/* Notify sockets that we cannot guaranty reliability anymore */
350static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
351{
352 struct l2cap_chan_list *l = &conn->chan_list;
353 struct sock *sk;
354
355 BT_DBG("conn %p", conn);
356
357 read_lock(&l->lock);
358
359 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
360 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
361 sk->sk_err = err;
362 }
363
364 read_unlock(&l->lock);
365}
366
367static void l2cap_info_timeout(unsigned long arg)
368{
369 struct l2cap_conn *conn = (void *) arg;
370
371 conn->info_ident = 0;
372
373 l2cap_conn_start(conn);
374}
375
262static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 376static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
263{ 377{
264 struct l2cap_conn *conn = hcon->l2cap_data; 378 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -279,6 +393,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
279 conn->src = &hcon->hdev->bdaddr; 393 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst; 394 conn->dst = &hcon->dst;
281 395
396 conn->feat_mask = 0;
397
398 init_timer(&conn->info_timer);
399 conn->info_timer.function = l2cap_info_timeout;
400 conn->info_timer.data = (unsigned long) conn;
401
282 spin_lock_init(&conn->lock); 402 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock); 403 rwlock_init(&conn->chan_list.lock);
284 404
@@ -318,40 +438,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
318 write_unlock_bh(&l->lock); 438 write_unlock_bh(&l->lock);
319} 439}
320 440
321static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
322{
323 u8 id;
324
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
329 */
330
331 spin_lock_bh(&conn->lock);
332
333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1;
335
336 id = conn->tx_ident;
337
338 spin_unlock_bh(&conn->lock);
339
340 return id;
341}
342
343static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
344{
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
346
347 BT_DBG("code 0x%2.2x", code);
348
349 if (!skb)
350 return -ENOMEM;
351
352 return hci_send_acl(conn->hcon, skb, 0);
353}
354
355/* ---- Socket interface ---- */ 441/* ---- Socket interface ---- */
356static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) 442static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
357{ 443{
@@ -508,7 +594,6 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
508 594
509 /* Default config options */ 595 /* Default config options */
510 pi->conf_len = 0; 596 pi->conf_len = 0;
511 pi->conf_mtu = L2CAP_DEFAULT_MTU;
512 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; 597 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513} 598}
514 599
@@ -530,7 +615,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
530 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 615 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531 616
532 sk->sk_destruct = l2cap_sock_destruct; 617 sk->sk_destruct = l2cap_sock_destruct;
533 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; 618 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
534 619
535 sock_reset_flag(sk, SOCK_ZAPPED); 620 sock_reset_flag(sk, SOCK_ZAPPED);
536 621
@@ -650,6 +735,11 @@ static int l2cap_do_connect(struct sock *sk)
650 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 735 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651 736
652 if (hcon->state == BT_CONNECTED) { 737 if (hcon->state == BT_CONNECTED) {
738 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
739 l2cap_conn_ready(conn);
740 goto done;
741 }
742
653 if (sk->sk_type == SOCK_SEQPACKET) { 743 if (sk->sk_type == SOCK_SEQPACKET) {
654 struct l2cap_conn_req req; 744 struct l2cap_conn_req req;
655 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 745 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
@@ -958,7 +1048,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
958 opts.imtu = l2cap_pi(sk)->imtu; 1048 opts.imtu = l2cap_pi(sk)->imtu;
959 opts.omtu = l2cap_pi(sk)->omtu; 1049 opts.omtu = l2cap_pi(sk)->omtu;
960 opts.flush_to = l2cap_pi(sk)->flush_to; 1050 opts.flush_to = l2cap_pi(sk)->flush_to;
961 opts.mode = 0x00; 1051 opts.mode = L2CAP_MODE_BASIC;
962 1052
963 len = min_t(unsigned int, sizeof(opts), optlen); 1053 len = min_t(unsigned int, sizeof(opts), optlen);
964 if (copy_from_user((char *) &opts, optval, len)) { 1054 if (copy_from_user((char *) &opts, optval, len)) {
@@ -1007,7 +1097,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
1007 opts.imtu = l2cap_pi(sk)->imtu; 1097 opts.imtu = l2cap_pi(sk)->imtu;
1008 opts.omtu = l2cap_pi(sk)->omtu; 1098 opts.omtu = l2cap_pi(sk)->omtu;
1009 opts.flush_to = l2cap_pi(sk)->flush_to; 1099 opts.flush_to = l2cap_pi(sk)->flush_to;
1010 opts.mode = 0x00; 1100 opts.mode = L2CAP_MODE_BASIC;
1011 1101
1012 len = min_t(unsigned int, len, sizeof(opts)); 1102 len = min_t(unsigned int, len, sizeof(opts));
1013 if (copy_to_user(optval, (char *) &opts, len)) 1103 if (copy_to_user(optval, (char *) &opts, len))
@@ -1084,52 +1174,6 @@ static int l2cap_sock_release(struct socket *sock)
1084 return err; 1174 return err;
1085} 1175}
1086 1176
1087static void l2cap_conn_ready(struct l2cap_conn *conn)
1088{
1089 struct l2cap_chan_list *l = &conn->chan_list;
1090 struct sock *sk;
1091
1092 BT_DBG("conn %p", conn);
1093
1094 read_lock(&l->lock);
1095
1096 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1097 bh_lock_sock(sk);
1098
1099 if (sk->sk_type != SOCK_SEQPACKET) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1102 sk->sk_state_change(sk);
1103 } else if (sk->sk_state == BT_CONNECT) {
1104 struct l2cap_conn_req req;
1105 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1106 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1107 req.psm = l2cap_pi(sk)->psm;
1108 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1109 }
1110
1111 bh_unlock_sock(sk);
1112 }
1113
1114 read_unlock(&l->lock);
1115}
1116
1117/* Notify sockets that we cannot guaranty reliability anymore */
1118static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1119{
1120 struct l2cap_chan_list *l = &conn->chan_list;
1121 struct sock *sk;
1122
1123 BT_DBG("conn %p", conn);
1124
1125 read_lock(&l->lock);
1126 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1127 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1128 sk->sk_err = err;
1129 }
1130 read_unlock(&l->lock);
1131}
1132
1133static void l2cap_chan_ready(struct sock *sk) 1177static void l2cap_chan_ready(struct sock *sk)
1134{ 1178{
1135 struct sock *parent = bt_sk(sk)->parent; 1179 struct sock *parent = bt_sk(sk)->parent;
@@ -1256,11 +1300,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
1256 break; 1300 break;
1257 1301
1258 case 2: 1302 case 2:
1259 *val = __le16_to_cpu(*((__le16 *)opt->val)); 1303 *val = __le16_to_cpu(*((__le16 *) opt->val));
1260 break; 1304 break;
1261 1305
1262 case 4: 1306 case 4:
1263 *val = __le32_to_cpu(*((__le32 *)opt->val)); 1307 *val = __le32_to_cpu(*((__le32 *) opt->val));
1264 break; 1308 break;
1265 1309
1266 default: 1310 default:
@@ -1332,6 +1376,8 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1332 int len = pi->conf_len; 1376 int len = pi->conf_len;
1333 int type, hint, olen; 1377 int type, hint, olen;
1334 unsigned long val; 1378 unsigned long val;
1379 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1380 u16 mtu = L2CAP_DEFAULT_MTU;
1335 u16 result = L2CAP_CONF_SUCCESS; 1381 u16 result = L2CAP_CONF_SUCCESS;
1336 1382
1337 BT_DBG("sk %p", sk); 1383 BT_DBG("sk %p", sk);
@@ -1344,7 +1390,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1344 1390
1345 switch (type) { 1391 switch (type) {
1346 case L2CAP_CONF_MTU: 1392 case L2CAP_CONF_MTU:
1347 pi->conf_mtu = val; 1393 mtu = val;
1348 break; 1394 break;
1349 1395
1350 case L2CAP_CONF_FLUSH_TO: 1396 case L2CAP_CONF_FLUSH_TO:
@@ -1354,6 +1400,11 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1354 case L2CAP_CONF_QOS: 1400 case L2CAP_CONF_QOS:
1355 break; 1401 break;
1356 1402
1403 case L2CAP_CONF_RFC:
1404 if (olen == sizeof(rfc))
1405 memcpy(&rfc, (void *) val, olen);
1406 break;
1407
1357 default: 1408 default:
1358 if (hint) 1409 if (hint)
1359 break; 1410 break;
@@ -1368,12 +1419,24 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1368 /* Configure output options and let the other side know 1419 /* Configure output options and let the other side know
1369 * which ones we don't like. */ 1420 * which ones we don't like. */
1370 1421
1371 if (pi->conf_mtu < pi->omtu) 1422 if (rfc.mode == L2CAP_MODE_BASIC) {
1423 if (mtu < pi->omtu)
1424 result = L2CAP_CONF_UNACCEPT;
1425 else {
1426 pi->omtu = mtu;
1427 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1428 }
1429
1430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1431 } else {
1372 result = L2CAP_CONF_UNACCEPT; 1432 result = L2CAP_CONF_UNACCEPT;
1373 else
1374 pi->omtu = pi->conf_mtu;
1375 1433
1376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 1434 memset(&rfc, 0, sizeof(rfc));
1435 rfc.mode = L2CAP_MODE_BASIC;
1436
1437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1438 sizeof(rfc), (unsigned long) &rfc);
1439 }
1377 } 1440 }
1378 1441
1379 rsp->scid = cpu_to_le16(pi->dcid); 1442 rsp->scid = cpu_to_le16(pi->dcid);
@@ -1397,6 +1460,23 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla
1397 return ptr - data; 1460 return ptr - data;
1398} 1461}
1399 1462
1463static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1464{
1465 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1466
1467 if (rej->reason != 0x0000)
1468 return 0;
1469
1470 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1471 cmd->ident == conn->info_ident) {
1472 conn->info_ident = 0;
1473 del_timer(&conn->info_timer);
1474 l2cap_conn_start(conn);
1475 }
1476
1477 return 0;
1478}
1479
1400static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 1480static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1401{ 1481{
1402 struct l2cap_chan_list *list = &conn->chan_list; 1482 struct l2cap_chan_list *list = &conn->chan_list;
@@ -1577,16 +1657,19 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
1577 1657
1578 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 1658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1579 1659
1580 /* Output config done. */
1581 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1582
1583 /* Reset config buffer. */ 1660 /* Reset config buffer. */
1584 l2cap_pi(sk)->conf_len = 0; 1661 l2cap_pi(sk)->conf_len = 0;
1585 1662
1663 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1664 goto unlock;
1665
1586 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 1666 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1587 sk->sk_state = BT_CONNECTED; 1667 sk->sk_state = BT_CONNECTED;
1588 l2cap_chan_ready(sk); 1668 l2cap_chan_ready(sk);
1589 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { 1669 goto unlock;
1670 }
1671
1672 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1590 u8 req[64]; 1673 u8 req[64];
1591 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1592 l2cap_build_conf_req(sk, req), req); 1675 l2cap_build_conf_req(sk, req), req);
@@ -1646,7 +1729,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
1646 if (flags & 0x01) 1729 if (flags & 0x01)
1647 goto done; 1730 goto done;
1648 1731
1649 /* Input config done */
1650 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 1732 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1651 1733
1652 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 1734 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
@@ -1711,16 +1793,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
1711static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 1793static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1712{ 1794{
1713 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 1795 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1714 struct l2cap_info_rsp rsp;
1715 u16 type; 1796 u16 type;
1716 1797
1717 type = __le16_to_cpu(req->type); 1798 type = __le16_to_cpu(req->type);
1718 1799
1719 BT_DBG("type 0x%4.4x", type); 1800 BT_DBG("type 0x%4.4x", type);
1720 1801
1721 rsp.type = cpu_to_le16(type); 1802 if (type == L2CAP_IT_FEAT_MASK) {
1722 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 1803 u8 buf[8];
1723 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); 1804 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1805 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1806 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1807 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1808 l2cap_send_cmd(conn, cmd->ident,
1809 L2CAP_INFO_RSP, sizeof(buf), buf);
1810 } else {
1811 struct l2cap_info_rsp rsp;
1812 rsp.type = cpu_to_le16(type);
1813 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1814 l2cap_send_cmd(conn, cmd->ident,
1815 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1816 }
1724 1817
1725 return 0; 1818 return 0;
1726} 1819}
@@ -1735,6 +1828,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
1735 1828
1736 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 1829 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1737 1830
1831 conn->info_ident = 0;
1832
1833 del_timer(&conn->info_timer);
1834
1835 if (type == L2CAP_IT_FEAT_MASK)
1836 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1837
1838 l2cap_conn_start(conn);
1839
1738 return 0; 1840 return 0;
1739} 1841}
1740 1842
@@ -1764,7 +1866,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
1764 1866
1765 switch (cmd.code) { 1867 switch (cmd.code) {
1766 case L2CAP_COMMAND_REJ: 1868 case L2CAP_COMMAND_REJ:
1767 /* FIXME: We should process this */ 1869 l2cap_command_rej(conn, &cmd, data);
1768 break; 1870 break;
1769 1871
1770 case L2CAP_CONN_REQ: 1872 case L2CAP_CONN_REQ:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index bb7220770f2c..e7ac6ba7ecab 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,11 +33,11 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/signal.h> 34#include <linux/signal.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/freezer.h>
37#include <linux/wait.h> 36#include <linux/wait.h>
38#include <linux/device.h> 37#include <linux/device.h>
39#include <linux/net.h> 38#include <linux/net.h>
40#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/kthread.h>
41 41
42#include <net/sock.h> 42#include <net/sock.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -68,7 +68,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
68static unsigned long rfcomm_event; 68static unsigned long rfcomm_event;
69 69
70static LIST_HEAD(session_list); 70static LIST_HEAD(session_list);
71static atomic_t terminate, running;
72 71
73static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); 72static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len);
74static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); 73static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci);
@@ -1850,26 +1849,6 @@ static inline void rfcomm_process_sessions(void)
1850 rfcomm_unlock(); 1849 rfcomm_unlock();
1851} 1850}
1852 1851
1853static void rfcomm_worker(void)
1854{
1855 BT_DBG("");
1856
1857 while (!atomic_read(&terminate)) {
1858 set_current_state(TASK_INTERRUPTIBLE);
1859 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
1860 /* No pending events. Let's sleep.
1861 * Incoming connections and data will wake us up. */
1862 schedule();
1863 }
1864 set_current_state(TASK_RUNNING);
1865
1866 /* Process stuff */
1867 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
1868 rfcomm_process_sessions();
1869 }
1870 return;
1871}
1872
1873static int rfcomm_add_listener(bdaddr_t *ba) 1852static int rfcomm_add_listener(bdaddr_t *ba)
1874{ 1853{
1875 struct sockaddr_l2 addr; 1854 struct sockaddr_l2 addr;
@@ -1935,22 +1914,28 @@ static void rfcomm_kill_listener(void)
1935 1914
1936static int rfcomm_run(void *unused) 1915static int rfcomm_run(void *unused)
1937{ 1916{
1938 rfcomm_thread = current; 1917 BT_DBG("");
1939
1940 atomic_inc(&running);
1941 1918
1942 daemonize("krfcommd");
1943 set_user_nice(current, -10); 1919 set_user_nice(current, -10);
1944 1920
1945 BT_DBG("");
1946
1947 rfcomm_add_listener(BDADDR_ANY); 1921 rfcomm_add_listener(BDADDR_ANY);
1948 1922
1949 rfcomm_worker(); 1923 while (!kthread_should_stop()) {
1924 set_current_state(TASK_INTERRUPTIBLE);
1925 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
1926 /* No pending events. Let's sleep.
1927 * Incoming connections and data will wake us up. */
1928 schedule();
1929 }
1930 set_current_state(TASK_RUNNING);
1931
1932 /* Process stuff */
1933 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
1934 rfcomm_process_sessions();
1935 }
1950 1936
1951 rfcomm_kill_listener(); 1937 rfcomm_kill_listener();
1952 1938
1953 atomic_dec(&running);
1954 return 0; 1939 return 0;
1955} 1940}
1956 1941
@@ -2059,7 +2044,11 @@ static int __init rfcomm_init(void)
2059 2044
2060 hci_register_cb(&rfcomm_cb); 2045 hci_register_cb(&rfcomm_cb);
2061 2046
2062 kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); 2047 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
2048 if (IS_ERR(rfcomm_thread)) {
2049 hci_unregister_cb(&rfcomm_cb);
2050 return PTR_ERR(rfcomm_thread);
2051 }
2063 2052
2064 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2053 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
2065 BT_ERR("Failed to create RFCOMM info file"); 2054 BT_ERR("Failed to create RFCOMM info file");
@@ -2081,14 +2070,7 @@ static void __exit rfcomm_exit(void)
2081 2070
2082 hci_unregister_cb(&rfcomm_cb); 2071 hci_unregister_cb(&rfcomm_cb);
2083 2072
2084 /* Terminate working thread. 2073 kthread_stop(rfcomm_thread);
2085 * ie. Set terminate flag and wake it up */
2086 atomic_inc(&terminate);
2087 rfcomm_schedule(RFCOMM_SCHED_STATE);
2088
2089 /* Wait until thread is running */
2090 while (atomic_read(&running))
2091 schedule();
2092 2074
2093#ifdef CONFIG_BT_RFCOMM_TTY 2075#ifdef CONFIG_BT_RFCOMM_TTY
2094 rfcomm_cleanup_ttys(); 2076 rfcomm_cleanup_ttys();
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 22a832098d44..e447651a2dbe 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -189,6 +189,23 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
189 return conn ? &conn->dev : NULL; 189 return conn ? &conn->dev : NULL;
190} 190}
191 191
192static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
193{
194 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
195 bdaddr_t bdaddr;
196 baswap(&bdaddr, &dev->dst);
197 return sprintf(buf, "%s\n", batostr(&bdaddr));
198}
199
200static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
201{
202 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
203 return sprintf(buf, "%d\n", dev->channel);
204}
205
206static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
207static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
208
192static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) 209static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
193{ 210{
194 struct rfcomm_dev *dev; 211 struct rfcomm_dev *dev;
@@ -281,6 +298,14 @@ out:
281 return err; 298 return err;
282 } 299 }
283 300
301 dev_set_drvdata(dev->tty_dev, dev);
302
303 if (device_create_file(dev->tty_dev, &dev_attr_address) < 0)
304 BT_ERR("Failed to create address attribute");
305
306 if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0)
307 BT_ERR("Failed to create channel attribute");
308
284 return dev->id; 309 return dev->id;
285} 310}
286 311
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 65b6fb1c4154..82d0dfdfa7e2 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk)
189 struct sco_conn *conn; 189 struct sco_conn *conn;
190 struct hci_conn *hcon; 190 struct hci_conn *hcon;
191 struct hci_dev *hdev; 191 struct hci_dev *hdev;
192 int err = 0; 192 int err, type;
193 193
194 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 194 BT_DBG("%s -> %s", batostr(src), batostr(dst));
195 195
@@ -200,7 +200,9 @@ static int sco_connect(struct sock *sk)
200 200
201 err = -ENOMEM; 201 err = -ENOMEM;
202 202
203 hcon = hci_connect(hdev, SCO_LINK, dst); 203 type = lmp_esco_capable(hdev) ? ESCO_LINK : SCO_LINK;
204
205 hcon = hci_connect(hdev, type, dst);
204 if (!hcon) 206 if (!hcon)
205 goto done; 207 goto done;
206 208
@@ -224,6 +226,7 @@ static int sco_connect(struct sock *sk)
224 sk->sk_state = BT_CONNECT; 226 sk->sk_state = BT_CONNECT;
225 sco_sock_set_timer(sk, sk->sk_sndtimeo); 227 sco_sock_set_timer(sk, sk->sk_sndtimeo);
226 } 228 }
229
227done: 230done:
228 hci_dev_unlock_bh(hdev); 231 hci_dev_unlock_bh(hdev);
229 hci_dev_put(hdev); 232 hci_dev_put(hdev);
@@ -846,7 +849,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
846{ 849{
847 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 850 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
848 851
849 if (hcon->type != SCO_LINK) 852 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
850 return 0; 853 return 0;
851 854
852 if (!status) { 855 if (!status) {
@@ -865,10 +868,11 @@ static int sco_disconn_ind(struct hci_conn *hcon, __u8 reason)
865{ 868{
866 BT_DBG("hcon %p reason %d", hcon, reason); 869 BT_DBG("hcon %p reason %d", hcon, reason);
867 870
868 if (hcon->type != SCO_LINK) 871 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
869 return 0; 872 return 0;
870 873
871 sco_conn_del(hcon, bt_err(reason)); 874 sco_conn_del(hcon, bt_err(reason));
875
872 return 0; 876 return 0;
873} 877}
874 878
diff --git a/net/core/dev.c b/net/core/dev.c
index 38b03da5c1ca..872658927e47 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1553,7 +1553,7 @@ gso:
1553 return rc; 1553 return rc;
1554 } 1554 }
1555 if (unlikely((netif_queue_stopped(dev) || 1555 if (unlikely((netif_queue_stopped(dev) ||
1556 netif_subqueue_stopped(dev, skb->queue_mapping)) && 1556 netif_subqueue_stopped(dev, skb)) &&
1557 skb->next)) 1557 skb->next))
1558 return NETDEV_TX_BUSY; 1558 return NETDEV_TX_BUSY;
1559 } while (skb->next); 1559 } while (skb->next);
@@ -1661,7 +1661,7 @@ gso:
1661 q = dev->qdisc; 1661 q = dev->qdisc;
1662 if (q->enqueue) { 1662 if (q->enqueue) {
1663 /* reset queue_mapping to zero */ 1663 /* reset queue_mapping to zero */
1664 skb->queue_mapping = 0; 1664 skb_set_queue_mapping(skb, 0);
1665 rc = q->enqueue(skb, q); 1665 rc = q->enqueue(skb, q);
1666 qdisc_run(dev); 1666 qdisc_run(dev);
1667 spin_unlock(&dev->queue_lock); 1667 spin_unlock(&dev->queue_lock);
@@ -1692,7 +1692,7 @@ gso:
1692 HARD_TX_LOCK(dev, cpu); 1692 HARD_TX_LOCK(dev, cpu);
1693 1693
1694 if (!netif_queue_stopped(dev) && 1694 if (!netif_queue_stopped(dev) &&
1695 !netif_subqueue_stopped(dev, skb->queue_mapping)) { 1695 !netif_subqueue_stopped(dev, skb)) {
1696 rc = 0; 1696 rc = 0;
1697 if (!dev_hard_start_xmit(skb, dev)) { 1697 if (!dev_hard_start_xmit(skb, dev)) {
1698 HARD_TX_UNLOCK(dev); 1698 HARD_TX_UNLOCK(dev);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 67ba9914e52e..05979e356963 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1438,6 +1438,9 @@ int neigh_table_clear(struct neigh_table *tbl)
1438 free_percpu(tbl->stats); 1438 free_percpu(tbl->stats);
1439 tbl->stats = NULL; 1439 tbl->stats = NULL;
1440 1440
1441 kmem_cache_destroy(tbl->kmem_cachep);
1442 tbl->kmem_cachep = NULL;
1443
1441 return 0; 1444 return 0;
1442} 1445}
1443 1446
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 95daba624967..bf8d18f1b013 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work)
67 local_irq_save(flags); 67 local_irq_save(flags);
68 netif_tx_lock(dev); 68 netif_tx_lock(dev);
69 if ((netif_queue_stopped(dev) || 69 if ((netif_queue_stopped(dev) ||
70 netif_subqueue_stopped(dev, skb->queue_mapping)) || 70 netif_subqueue_stopped(dev, skb)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72 skb_queue_head(&npinfo->txq, skb); 72 skb_queue_head(&npinfo->txq, skb);
73 netif_tx_unlock(dev); 73 netif_tx_unlock(dev);
@@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
269 tries > 0; --tries) { 269 tries > 0; --tries) {
270 if (netif_tx_trylock(dev)) { 270 if (netif_tx_trylock(dev)) {
271 if (!netif_queue_stopped(dev) && 271 if (!netif_queue_stopped(dev) &&
272 !netif_subqueue_stopped(dev, skb->queue_mapping)) 272 !netif_subqueue_stopped(dev, skb))
273 status = dev->hard_start_xmit(skb, dev); 273 status = dev->hard_start_xmit(skb, dev);
274 netif_tx_unlock(dev); 274 netif_tx_unlock(dev);
275 275
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c4719edb55c0..de33f36947e9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2603,8 +2603,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2603 skb->network_header = skb->tail; 2603 skb->network_header = skb->tail;
2604 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2604 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2605 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2605 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2606 skb->queue_mapping = pkt_dev->cur_queue_map; 2606 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map);
2607
2608 iph = ip_hdr(skb); 2607 iph = ip_hdr(skb);
2609 udph = udp_hdr(skb); 2608 udph = udp_hdr(skb);
2610 2609
@@ -2941,8 +2940,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2941 skb->network_header = skb->tail; 2940 skb->network_header = skb->tail;
2942 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2941 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2943 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2942 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
2944 skb->queue_mapping = pkt_dev->cur_queue_map; 2943 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map);
2945
2946 iph = ipv6_hdr(skb); 2944 iph = ipv6_hdr(skb);
2947 udph = udp_hdr(skb); 2945 udph = udp_hdr(skb);
2948 2946
@@ -3385,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3385 3383
3386 if ((netif_queue_stopped(odev) || 3384 if ((netif_queue_stopped(odev) ||
3387 (pkt_dev->skb && 3385 (pkt_dev->skb &&
3388 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || 3386 netif_subqueue_stopped(odev, pkt_dev->skb))) ||
3389 need_resched()) { 3387 need_resched()) {
3390 idle_start = getCurUs(); 3388 idle_start = getCurUs();
3391 3389
@@ -3402,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3402 pkt_dev->idle_acc += getCurUs() - idle_start; 3400 pkt_dev->idle_acc += getCurUs() - idle_start;
3403 3401
3404 if (netif_queue_stopped(odev) || 3402 if (netif_queue_stopped(odev) ||
3405 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { 3403 netif_subqueue_stopped(odev, pkt_dev->skb)) {
3406 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3404 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3407 pkt_dev->next_tx_ns = 0; 3405 pkt_dev->next_tx_ns = 0;
3408 goto out; /* Try the next interface */ 3406 goto out; /* Try the next interface */
@@ -3431,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3431 3429
3432 netif_tx_lock_bh(odev); 3430 netif_tx_lock_bh(odev);
3433 if (!netif_queue_stopped(odev) && 3431 if (!netif_queue_stopped(odev) &&
3434 !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { 3432 !netif_subqueue_stopped(odev, pkt_dev->skb)) {
3435 3433
3436 atomic_inc(&(pkt_dev->skb->users)); 3434 atomic_inc(&(pkt_dev->skb->users));
3437 retry_now: 3435 retry_now:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 70d9b5da96ae..4e2c84fcf276 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2045,7 +2045,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2045 if (copy > 0) { 2045 if (copy > 0) {
2046 if (copy > len) 2046 if (copy > len)
2047 copy = len; 2047 copy = len;
2048 sg[elt].page = virt_to_page(skb->data + offset); 2048 sg_set_page(&sg[elt], virt_to_page(skb->data + offset));
2049 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 2049 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
2050 sg[elt].length = copy; 2050 sg[elt].length = copy;
2051 elt++; 2051 elt++;
@@ -2065,7 +2065,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2065 2065
2066 if (copy > len) 2066 if (copy > len)
2067 copy = len; 2067 copy = len;
2068 sg[elt].page = frag->page; 2068 sg_set_page(&sg[elt], frag->page);
2069 sg[elt].offset = frag->page_offset+offset-start; 2069 sg[elt].offset = frag->page_offset+offset-start;
2070 sg[elt].length = copy; 2070 sg[elt].length = copy;
2071 elt++; 2071 elt++;
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 0f3745585a94..d8a3509b26f6 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -68,3 +68,4 @@ module_exit(dccp_diag_fini);
68MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
69MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 69MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
70MODULE_DESCRIPTION("DCCP inet_diag handler"); 70MODULE_DESCRIPTION("DCCP inet_diag handler");
71MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 44f6e17e105f..222549ab274a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -1037,8 +1037,8 @@ module_exit(dccp_v4_exit);
1037 * values directly, Also cover the case where the protocol is not specified, 1037 * values directly, Also cover the case where the protocol is not specified,
1038 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP 1038 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
1039 */ 1039 */
1040MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6"); 1040MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
1041MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6"); 1041MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
1042MODULE_LICENSE("GPL"); 1042MODULE_LICENSE("GPL");
1043MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1043MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1044MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); 1044MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index cac53548c2d8..bbadd6681b83 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1219,8 +1219,8 @@ module_exit(dccp_v6_exit);
1219 * values directly, Also cover the case where the protocol is not specified, 1219 * values directly, Also cover the case where the protocol is not specified,
1220 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP 1220 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1221 */ 1221 */
1222MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6"); 1222MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1223MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6"); 1223MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1224MODULE_LICENSE("GPL"); 1224MODULE_LICENSE("GPL");
1225MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1225MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1226MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); 1226MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 72e6ab66834f..c796661a021b 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -390,9 +390,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
390 icv[3] = crc >> 24; 390 icv[3] = crc >> 24;
391 391
392 crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); 392 crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
393 sg.page = virt_to_page(pos); 393 sg_init_one(&sg, pos, len + 4);
394 sg.offset = offset_in_page(pos);
395 sg.length = len + 4;
396 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); 394 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
397} 395}
398 396
@@ -485,9 +483,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
485 plen = skb->len - hdr_len - 12; 483 plen = skb->len - hdr_len - 12;
486 484
487 crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); 485 crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
488 sg.page = virt_to_page(pos); 486 sg_init_one(&sg, pos, plen + 4);
489 sg.offset = offset_in_page(pos);
490 sg.length = plen + 4;
491 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { 487 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
492 if (net_ratelimit()) { 488 if (net_ratelimit()) {
493 printk(KERN_DEBUG ": TKIP: failed to decrypt " 489 printk(KERN_DEBUG ": TKIP: failed to decrypt "
@@ -539,11 +535,12 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
539 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); 535 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
540 return -1; 536 return -1;
541 } 537 }
542 sg[0].page = virt_to_page(hdr); 538 sg_init_table(sg, 2);
539 sg_set_page(&sg[0], virt_to_page(hdr));
543 sg[0].offset = offset_in_page(hdr); 540 sg[0].offset = offset_in_page(hdr);
544 sg[0].length = 16; 541 sg[0].length = 16;
545 542
546 sg[1].page = virt_to_page(data); 543 sg_set_page(&sg[1], virt_to_page(data));
547 sg[1].offset = offset_in_page(data); 544 sg[1].offset = offset_in_page(data);
548 sg[1].length = data_len; 545 sg[1].length = data_len;
549 546
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 8d182459344e..0af6103d715c 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -170,9 +170,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
170 icv[3] = crc >> 24; 170 icv[3] = crc >> 24;
171 171
172 crypto_blkcipher_setkey(wep->tx_tfm, key, klen); 172 crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
173 sg.page = virt_to_page(pos); 173 sg_init_one(&sg, pos, len + 4);
174 sg.offset = offset_in_page(pos);
175 sg.length = len + 4;
176 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); 174 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
177} 175}
178 176
@@ -212,9 +210,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
212 plen = skb->len - hdr_len - 8; 210 plen = skb->len - hdr_len - 8;
213 211
214 crypto_blkcipher_setkey(wep->rx_tfm, key, klen); 212 crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
215 sg.page = virt_to_page(pos); 213 sg_init_one(&sg, pos, plen + 4);
216 sg.offset = offset_in_page(pos);
217 sg.length = plen + 4;
218 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) 214 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
219 return -7; 215 return -7;
220 216
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7eb83ebed2ec..dc429b6b0ba6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -815,6 +815,12 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
815 nlmsg_len(nlh) < hdrlen) 815 nlmsg_len(nlh) < hdrlen)
816 return -EINVAL; 816 return -EINVAL;
817 817
818#ifdef CONFIG_KMOD
819 if (inet_diag_table[nlh->nlmsg_type] == NULL)
820 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
821 NETLINK_INET_DIAG, nlh->nlmsg_type);
822#endif
823
818 if (inet_diag_table[nlh->nlmsg_type] == NULL) 824 if (inet_diag_table[nlh->nlmsg_type] == NULL)
819 return -ENOENT; 825 return -ENOENT;
820 826
@@ -914,3 +920,4 @@ static void __exit inet_diag_exit(void)
914module_init(inet_diag_init); 920module_init(inet_diag_init);
915module_exit(inet_diag_exit); 921module_exit(inet_diag_exit);
916MODULE_LICENSE("GPL"); 922MODULE_LICENSE("GPL");
923MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 3904d2158a92..2fbcc7d1b1a0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -56,3 +56,4 @@ static void __exit tcp_diag_exit(void)
56module_init(tcp_diag_init); 56module_init(tcp_diag_init);
57module_exit(tcp_diag_exit); 57module_exit(tcp_diag_exit);
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 67cd06613a25..66a9139d46e9 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -483,6 +483,7 @@ static int ah6_init_state(struct xfrm_state *x)
483 break; 483 break;
484 case XFRM_MODE_TUNNEL: 484 case XFRM_MODE_TUNNEL:
485 x->props.header_len += sizeof(struct ipv6hdr); 485 x->props.header_len += sizeof(struct ipv6hdr);
486 break;
486 default: 487 default:
487 goto error; 488 goto error;
488 } 489 }
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index b0715432e454..72a659806cad 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -360,6 +360,7 @@ static int esp6_init_state(struct xfrm_state *x)
360 break; 360 break;
361 case XFRM_MODE_TUNNEL: 361 case XFRM_MODE_TUNNEL:
362 x->props.header_len += sizeof(struct ipv6hdr); 362 x->props.header_len += sizeof(struct ipv6hdr);
363 break;
363 default: 364 default:
364 goto error; 365 goto error;
365 } 366 }
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 6675261e958f..cc806d640f7a 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -138,9 +138,7 @@ void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
138 *icv = cpu_to_le32(~crc32_le(~0, data, data_len)); 138 *icv = cpu_to_le32(~crc32_le(~0, data, data_len));
139 139
140 crypto_blkcipher_setkey(tfm, rc4key, klen); 140 crypto_blkcipher_setkey(tfm, rc4key, klen);
141 sg.page = virt_to_page(data); 141 sg_init_one(&sg, data, data_len + WEP_ICV_LEN);
142 sg.offset = offset_in_page(data);
143 sg.length = data_len + WEP_ICV_LEN;
144 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); 142 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length);
145} 143}
146 144
@@ -204,9 +202,7 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
204 __le32 crc; 202 __le32 crc;
205 203
206 crypto_blkcipher_setkey(tfm, rc4key, klen); 204 crypto_blkcipher_setkey(tfm, rc4key, klen);
207 sg.page = virt_to_page(data); 205 sg_init_one(&sg, data, data_len + WEP_ICV_LEN);
208 sg.offset = offset_in_page(data);
209 sg.length = data_len + WEP_ICV_LEN;
210 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); 206 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length);
211 207
212 crc = cpu_to_le32(~crc32_le(~0, data, data_len)); 208 crc = cpu_to_le32(~crc32_le(~0, data, data_len));
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index be57cf317a7f..421281d9dd1d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -266,7 +266,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
266 int busy; 266 int busy;
267 int nores; 267 int nores;
268 int len = skb->len; 268 int len = skb->len;
269 int subq = skb->queue_mapping; 269 int subq = skb_get_queue_mapping(skb);
270 struct sk_buff *skb_res = NULL; 270 struct sk_buff *skb_res = NULL;
271 271
272 start = master->slaves; 272 start = master->slaves;
@@ -284,7 +284,7 @@ restart:
284 if (slave->qdisc_sleeping != q) 284 if (slave->qdisc_sleeping != q)
285 continue; 285 continue;
286 if (netif_queue_stopped(slave) || 286 if (netif_queue_stopped(slave) ||
287 netif_subqueue_stopped(slave, subq) || 287 __netif_subqueue_stopped(slave, subq) ||
288 !netif_running(slave)) { 288 !netif_running(slave)) {
289 busy = 1; 289 busy = 1;
290 continue; 290 continue;
@@ -294,7 +294,7 @@ restart:
294 case 0: 294 case 0:
295 if (netif_tx_trylock(slave)) { 295 if (netif_tx_trylock(slave)) {
296 if (!netif_queue_stopped(slave) && 296 if (!netif_queue_stopped(slave) &&
297 !netif_subqueue_stopped(slave, subq) && 297 !__netif_subqueue_stopped(slave, subq) &&
298 slave->hard_start_xmit(skb, slave) == 0) { 298 slave->hard_start_xmit(skb, slave) == 0) {
299 netif_tx_unlock(slave); 299 netif_tx_unlock(slave);
300 master->slaves = NEXT_SLAVE(q); 300 master->slaves = NEXT_SLAVE(q);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 781810724714..cbd64b216cce 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -726,7 +726,8 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
726 726
727 /* set up scatter list */ 727 /* set up scatter list */
728 end = skb_tail_pointer(skb); 728 end = skb_tail_pointer(skb);
729 sg.page = virt_to_page(auth); 729 sg_init_table(&sg, 1);
730 sg_set_page(&sg, virt_to_page(auth));
730 sg.offset = (unsigned long)(auth) % PAGE_SIZE; 731 sg.offset = (unsigned long)(auth) % PAGE_SIZE;
731 sg.length = end - (unsigned char *)auth; 732 sg.length = end - (unsigned char *)auth;
732 733
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f983a369d4e2..658476c4d587 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -56,7 +56,7 @@
56#include <linux/ipv6.h> 56#include <linux/ipv6.h>
57#include <linux/net.h> 57#include <linux/net.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <asm/scatterlist.h> 59#include <linux/scatterlist.h>
60#include <linux/crypto.h> 60#include <linux/crypto.h>
61#include <net/sock.h> 61#include <net/sock.h>
62 62
@@ -1513,7 +1513,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1513 struct hash_desc desc; 1513 struct hash_desc desc;
1514 1514
1515 /* Sign the message. */ 1515 /* Sign the message. */
1516 sg.page = virt_to_page(&cookie->c); 1516 sg_init_table(&sg, 1);
1517 sg_set_page(&sg, virt_to_page(&cookie->c));
1517 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; 1518 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE;
1518 sg.length = bodysize; 1519 sg.length = bodysize;
1519 keylen = SCTP_SECRET_SIZE; 1520 keylen = SCTP_SECRET_SIZE;
@@ -1585,7 +1586,8 @@ struct sctp_association *sctp_unpack_cookie(
1585 1586
1586 /* Check the signature. */ 1587 /* Check the signature. */
1587 keylen = SCTP_SECRET_SIZE; 1588 keylen = SCTP_SECRET_SIZE;
1588 sg.page = virt_to_page(bear_cookie); 1589 sg_init_table(&sg, 1);
1590 sg_set_page(&sg, virt_to_page(bear_cookie));
1589 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; 1591 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE;
1590 sg.length = bodysize; 1592 sg.length = bodysize;
1591 key = (char *)ep->secret_key[ep->current_key]; 1593 key = (char *)ep->secret_key[ep->current_key];
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index bfb6a29633dd..32be431affcf 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -197,9 +197,9 @@ encryptor(struct scatterlist *sg, void *data)
197 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; 197 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
198 in_page = desc->pages[i]; 198 in_page = desc->pages[i];
199 } else { 199 } else {
200 in_page = sg->page; 200 in_page = sg_page(sg);
201 } 201 }
202 desc->infrags[desc->fragno].page = in_page; 202 sg_set_page(&desc->infrags[desc->fragno], in_page);
203 desc->fragno++; 203 desc->fragno++;
204 desc->fraglen += sg->length; 204 desc->fraglen += sg->length;
205 desc->pos += sg->length; 205 desc->pos += sg->length;
@@ -215,11 +215,11 @@ encryptor(struct scatterlist *sg, void *data)
215 if (ret) 215 if (ret)
216 return ret; 216 return ret;
217 if (fraglen) { 217 if (fraglen) {
218 desc->outfrags[0].page = sg->page; 218 sg_set_page(&desc->outfrags[0], sg_page(sg));
219 desc->outfrags[0].offset = sg->offset + sg->length - fraglen; 219 desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
220 desc->outfrags[0].length = fraglen; 220 desc->outfrags[0].length = fraglen;
221 desc->infrags[0] = desc->outfrags[0]; 221 desc->infrags[0] = desc->outfrags[0];
222 desc->infrags[0].page = in_page; 222 sg_set_page(&desc->infrags[0], in_page);
223 desc->fragno = 1; 223 desc->fragno = 1;
224 desc->fraglen = fraglen; 224 desc->fraglen = fraglen;
225 } else { 225 } else {
@@ -287,7 +287,7 @@ decryptor(struct scatterlist *sg, void *data)
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 if (fraglen) { 289 if (fraglen) {
290 desc->frags[0].page = sg->page; 290 sg_set_page(&desc->frags[0], sg_page(sg));
291 desc->frags[0].offset = sg->offset + sg->length - fraglen; 291 desc->frags[0].offset = sg->offset + sg->length - fraglen;
292 desc->frags[0].length = fraglen; 292 desc->frags[0].length = fraglen;
293 desc->fragno = 1; 293 desc->fragno = 1;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 6a59180e1667..3d1f7cdf9dd0 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1059,7 +1059,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1059 do { 1059 do {
1060 if (thislen > page_len) 1060 if (thislen > page_len)
1061 thislen = page_len; 1061 thislen = page_len;
1062 sg->page = buf->pages[i]; 1062 sg_set_page(sg, buf->pages[i]);
1063 sg->offset = page_offset; 1063 sg->offset = page_offset;
1064 sg->length = thislen; 1064 sg->length = thislen;
1065 ret = actor(sg, data); 1065 ret = actor(sg, data);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 5ced62c19c63..313d4bed3aa9 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pfkeyv2.h> 14#include <linux/pfkeyv2.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/scatterlist.h>
16#include <net/xfrm.h> 17#include <net/xfrm.h>
17#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) 18#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18#include <net/ah.h> 19#include <net/ah.h>
@@ -552,7 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
552 if (copy > len) 553 if (copy > len)
553 copy = len; 554 copy = len;
554 555
555 sg.page = virt_to_page(skb->data + offset); 556 sg_set_page(&sg, virt_to_page(skb->data + offset));
556 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 557 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
557 sg.length = copy; 558 sg.length = copy;
558 559
@@ -577,7 +578,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
577 if (copy > len) 578 if (copy > len)
578 copy = len; 579 copy = len;
579 580
580 sg.page = frag->page; 581 sg_set_page(&sg, frag->page);
581 sg.offset = frag->page_offset + offset-start; 582 sg.offset = frag->page_offset + offset-start;
582 sg.length = copy; 583 sg.length = copy;
583 584
diff --git a/security/commoncap.c b/security/commoncap.c
index 43f902750a1b..bf67871173ef 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -190,7 +190,8 @@ int cap_inode_killpriv(struct dentry *dentry)
190 return inode->i_op->removexattr(dentry, XATTR_NAME_CAPS); 190 return inode->i_op->removexattr(dentry, XATTR_NAME_CAPS);
191} 191}
192 192
193static inline int cap_from_disk(__le32 *caps, struct linux_binprm *bprm, 193static inline int cap_from_disk(struct vfs_cap_data *caps,
194 struct linux_binprm *bprm,
194 int size) 195 int size)
195{ 196{
196 __u32 magic_etc; 197 __u32 magic_etc;
@@ -198,7 +199,7 @@ static inline int cap_from_disk(__le32 *caps, struct linux_binprm *bprm,
198 if (size != XATTR_CAPS_SZ) 199 if (size != XATTR_CAPS_SZ)
199 return -EINVAL; 200 return -EINVAL;
200 201
201 magic_etc = le32_to_cpu(caps[0]); 202 magic_etc = le32_to_cpu(caps->magic_etc);
202 203
203 switch ((magic_etc & VFS_CAP_REVISION_MASK)) { 204 switch ((magic_etc & VFS_CAP_REVISION_MASK)) {
204 case VFS_CAP_REVISION: 205 case VFS_CAP_REVISION:
@@ -206,8 +207,8 @@ static inline int cap_from_disk(__le32 *caps, struct linux_binprm *bprm,
206 bprm->cap_effective = true; 207 bprm->cap_effective = true;
207 else 208 else
208 bprm->cap_effective = false; 209 bprm->cap_effective = false;
209 bprm->cap_permitted = to_cap_t( le32_to_cpu(caps[1]) ); 210 bprm->cap_permitted = to_cap_t(le32_to_cpu(caps->permitted));
210 bprm->cap_inheritable = to_cap_t( le32_to_cpu(caps[2]) ); 211 bprm->cap_inheritable = to_cap_t(le32_to_cpu(caps->inheritable));
211 return 0; 212 return 0;
212 default: 213 default:
213 return -EINVAL; 214 return -EINVAL;
@@ -219,7 +220,7 @@ static int get_file_caps(struct linux_binprm *bprm)
219{ 220{
220 struct dentry *dentry; 221 struct dentry *dentry;
221 int rc = 0; 222 int rc = 0;
222 __le32 v1caps[XATTR_CAPS_SZ]; 223 struct vfs_cap_data incaps;
223 struct inode *inode; 224 struct inode *inode;
224 225
225 if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID) { 226 if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID) {
@@ -232,8 +233,14 @@ static int get_file_caps(struct linux_binprm *bprm)
232 if (!inode->i_op || !inode->i_op->getxattr) 233 if (!inode->i_op || !inode->i_op->getxattr)
233 goto out; 234 goto out;
234 235
235 rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, &v1caps, 236 rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, NULL, 0);
236 XATTR_CAPS_SZ); 237 if (rc > 0) {
238 if (rc == XATTR_CAPS_SZ)
239 rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS,
240 &incaps, XATTR_CAPS_SZ);
241 else
242 rc = -EINVAL;
243 }
237 if (rc == -ENODATA || rc == -EOPNOTSUPP) { 244 if (rc == -ENODATA || rc == -EOPNOTSUPP) {
238 /* no data, that's ok */ 245 /* no data, that's ok */
239 rc = 0; 246 rc = 0;
@@ -242,7 +249,7 @@ static int get_file_caps(struct linux_binprm *bprm)
242 if (rc < 0) 249 if (rc < 0)
243 goto out; 250 goto out;
244 251
245 rc = cap_from_disk(v1caps, bprm, rc); 252 rc = cap_from_disk(&incaps, bprm, rc);
246 if (rc) 253 if (rc)
247 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", 254 printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
248 __FUNCTION__, rc, bprm->filename); 255 __FUNCTION__, rc, bprm->filename);
diff --git a/sound/core/control.c b/sound/core/control.c
index 4c3aa8e10378..df0774c76f6f 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -93,15 +93,16 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
93 93
94static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl) 94static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl)
95{ 95{
96 unsigned long flags;
96 struct snd_kctl_event *cread; 97 struct snd_kctl_event *cread;
97 98
98 spin_lock(&ctl->read_lock); 99 spin_lock_irqsave(&ctl->read_lock, flags);
99 while (!list_empty(&ctl->events)) { 100 while (!list_empty(&ctl->events)) {
100 cread = snd_kctl_event(ctl->events.next); 101 cread = snd_kctl_event(ctl->events.next);
101 list_del(&cread->list); 102 list_del(&cread->list);
102 kfree(cread); 103 kfree(cread);
103 } 104 }
104 spin_unlock(&ctl->read_lock); 105 spin_unlock_irqrestore(&ctl->read_lock, flags);
105} 106}
106 107
107static int snd_ctl_release(struct inode *inode, struct file *file) 108static int snd_ctl_release(struct inode *inode, struct file *file)
diff --git a/sound/i2c/other/tea575x-tuner.c b/sound/i2c/other/tea575x-tuner.c
index fe31bb5cffb8..37c47fb95aca 100644
--- a/sound/i2c/other/tea575x-tuner.c
+++ b/sound/i2c/other/tea575x-tuner.c
@@ -189,7 +189,6 @@ void snd_tea575x_init(struct snd_tea575x *tea)
189 tea->vd.owner = tea->card->module; 189 tea->vd.owner = tea->card->module;
190 strcpy(tea->vd.name, tea->tea5759 ? "TEA5759 radio" : "TEA5757 radio"); 190 strcpy(tea->vd.name, tea->tea5759 ? "TEA5759 radio" : "TEA5757 radio");
191 tea->vd.type = VID_TYPE_TUNER; 191 tea->vd.type = VID_TYPE_TUNER;
192 tea->vd.hardware = VID_HARDWARE_RTRACK; /* FIXME: assign new number */
193 tea->vd.release = snd_tea575x_release; 192 tea->vd.release = snd_tea575x_release;
194 video_set_drvdata(&tea->vd, tea); 193 video_set_drvdata(&tea->vd, tea);
195 tea->vd.fops = &tea->fops; 194 tea->vd.fops = &tea->fops;
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 91f9e6a112ff..2dba752faf4e 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -165,7 +165,7 @@ struct snd_bt87x_board {
165 unsigned no_digital:1; /* No digital input */ 165 unsigned no_digital:1; /* No digital input */
166}; 166};
167 167
168static const __devinitdata struct snd_bt87x_board snd_bt87x_boards[] = { 168static __devinitdata struct snd_bt87x_board snd_bt87x_boards[] = {
169 [SND_BT87X_BOARD_UNKNOWN] = { 169 [SND_BT87X_BOARD_UNKNOWN] = {
170 .dig_rate = 32000, /* just a guess */ 170 .dig_rate = 32000, /* just a guess */
171 }, 171 },
@@ -848,7 +848,7 @@ static int __devinit snd_bt87x_detect_card(struct pci_dev *pci)
848 int i; 848 int i;
849 const struct pci_device_id *supported; 849 const struct pci_device_id *supported;
850 850
851 supported = pci_match_device(&driver, pci); 851 supported = pci_match_id(snd_bt87x_ids, pci);
852 if (supported && supported->driver_data > 0) 852 if (supported && supported->driver_data > 0)
853 return supported->driver_data; 853 return supported->driver_data;
854 854
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 187533e477c6..ad4cb38109fc 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -626,24 +626,19 @@ int __devinit snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
626 snd_hda_get_codec_name(codec, bus->card->mixername, 626 snd_hda_get_codec_name(codec, bus->card->mixername,
627 sizeof(bus->card->mixername)); 627 sizeof(bus->card->mixername));
628 628
629#ifdef CONFIG_SND_HDA_GENERIC
630 if (is_generic_config(codec)) { 629 if (is_generic_config(codec)) {
631 err = snd_hda_parse_generic_codec(codec); 630 err = snd_hda_parse_generic_codec(codec);
632 goto patched; 631 goto patched;
633 } 632 }
634#endif
635 if (codec->preset && codec->preset->patch) { 633 if (codec->preset && codec->preset->patch) {
636 err = codec->preset->patch(codec); 634 err = codec->preset->patch(codec);
637 goto patched; 635 goto patched;
638 } 636 }
639 637
640 /* call the default parser */ 638 /* call the default parser */
641#ifdef CONFIG_SND_HDA_GENERIC
642 err = snd_hda_parse_generic_codec(codec); 639 err = snd_hda_parse_generic_codec(codec);
643#else 640 if (err < 0)
644 printk(KERN_ERR "hda-codec: No codec parser is available\n"); 641 printk(KERN_ERR "hda-codec: No codec parser is available\n");
645 err = -ENODEV;
646#endif
647 642
648 patched: 643 patched:
649 if (err < 0) { 644 if (err < 0) {
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index a79d0ed5469c..20c5e6250374 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -245,7 +245,14 @@ int snd_hda_multi_out_analog_cleanup(struct hda_codec *codec,
245/* 245/*
246 * generic codec parser 246 * generic codec parser
247 */ 247 */
248#ifdef CONFIG_SND_HDA_GENERIC
248int snd_hda_parse_generic_codec(struct hda_codec *codec); 249int snd_hda_parse_generic_codec(struct hda_codec *codec);
250#else
251static inline int snd_hda_parse_generic_codec(struct hda_codec *codec)
252{
253 return -ENODEV;
254}
255#endif
249 256
250/* 257/*
251 * generic proc interface 258 * generic proc interface
@@ -303,16 +310,17 @@ enum {
303 310
304extern const char *auto_pin_cfg_labels[AUTO_PIN_LAST]; 311extern const char *auto_pin_cfg_labels[AUTO_PIN_LAST];
305 312
313#define AUTO_CFG_MAX_OUTS 5
314
306struct auto_pin_cfg { 315struct auto_pin_cfg {
307 int line_outs; 316 int line_outs;
308 hda_nid_t line_out_pins[5]; /* sorted in the order of 317 /* sorted in the order of Front/Surr/CLFE/Side */
309 * Front/Surr/CLFE/Side 318 hda_nid_t line_out_pins[AUTO_CFG_MAX_OUTS];
310 */
311 int speaker_outs; 319 int speaker_outs;
312 hda_nid_t speaker_pins[5]; 320 hda_nid_t speaker_pins[AUTO_CFG_MAX_OUTS];
313 int hp_outs; 321 int hp_outs;
314 int line_out_type; /* AUTO_PIN_XXX_OUT */ 322 int line_out_type; /* AUTO_PIN_XXX_OUT */
315 hda_nid_t hp_pins[5]; 323 hda_nid_t hp_pins[AUTO_CFG_MAX_OUTS];
316 hda_nid_t input_pins[AUTO_PIN_LAST]; 324 hda_nid_t input_pins[AUTO_PIN_LAST];
317 hda_nid_t dig_out_pin; 325 hda_nid_t dig_out_pin;
318 hda_nid_t dig_in_pin; 326 hda_nid_t dig_in_pin;
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 54cfd4526d20..0ee8ae4d4410 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -72,7 +72,7 @@ struct ad198x_spec {
72 unsigned int num_kctl_alloc, num_kctl_used; 72 unsigned int num_kctl_alloc, num_kctl_used;
73 struct snd_kcontrol_new *kctl_alloc; 73 struct snd_kcontrol_new *kctl_alloc;
74 struct hda_input_mux private_imux; 74 struct hda_input_mux private_imux;
75 hda_nid_t private_dac_nids[4]; 75 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
76 76
77 unsigned int jack_present :1; 77 unsigned int jack_present :1;
78 78
@@ -612,7 +612,8 @@ static void ad1986a_hp_automute(struct hda_codec *codec)
612 unsigned int present; 612 unsigned int present;
613 613
614 present = snd_hda_codec_read(codec, 0x1a, 0, AC_VERB_GET_PIN_SENSE, 0); 614 present = snd_hda_codec_read(codec, 0x1a, 0, AC_VERB_GET_PIN_SENSE, 0);
615 spec->jack_present = (present & 0x80000000) != 0; 615 /* Lenovo N100 seems to report the reversed bit for HP jack-sensing */
616 spec->jack_present = !(present & 0x80000000);
616 ad1986a_update_hp(codec); 617 ad1986a_update_hp(codec);
617} 618}
618 619
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index 2468f3171222..6c54793bf424 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -50,7 +50,7 @@ struct cmi_spec {
50 50
51 /* playback */ 51 /* playback */
52 struct hda_multi_out multiout; 52 struct hda_multi_out multiout;
53 hda_nid_t dac_nids[4]; /* NID for each DAC */ 53 hda_nid_t dac_nids[AUTO_CFG_MAX_OUTS]; /* NID for each DAC */
54 int num_dacs; 54 int num_dacs;
55 55
56 /* capture */ 56 /* capture */
@@ -73,7 +73,6 @@ struct cmi_spec {
73 unsigned int pin_def_confs; 73 unsigned int pin_def_confs;
74 74
75 /* multichannel pins */ 75 /* multichannel pins */
76 hda_nid_t multich_pin[4]; /* max 8-channel */
77 struct hda_verb multi_init[9]; /* 2 verbs for each pin + terminator */ 76 struct hda_verb multi_init[9]; /* 2 verbs for each pin + terminator */
78}; 77};
79 78
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 080e3001d9c5..6aa073986747 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -85,7 +85,7 @@ struct conexant_spec {
85 unsigned int num_kctl_alloc, num_kctl_used; 85 unsigned int num_kctl_alloc, num_kctl_used;
86 struct snd_kcontrol_new *kctl_alloc; 86 struct snd_kcontrol_new *kctl_alloc;
87 struct hda_input_mux private_imux; 87 struct hda_input_mux private_imux;
88 hda_nid_t private_dac_nids[4]; 88 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
89 89
90}; 90};
91 91
@@ -554,10 +554,16 @@ static struct snd_kcontrol_new cxt5045_mixers[] = {
554 .get = conexant_mux_enum_get, 554 .get = conexant_mux_enum_get,
555 .put = conexant_mux_enum_put 555 .put = conexant_mux_enum_put
556 }, 556 },
557 HDA_CODEC_VOLUME("Int Mic Volume", 0x1a, 0x01, HDA_INPUT), 557 HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
558 HDA_CODEC_MUTE("Int Mic Switch", 0x1a, 0x01, HDA_INPUT), 558 HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
559 HDA_CODEC_VOLUME("Ext Mic Volume", 0x1a, 0x02, HDA_INPUT), 559 HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
560 HDA_CODEC_MUTE("Ext Mic Switch", 0x1a, 0x02, HDA_INPUT), 560 HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
561 HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
562 HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
563 HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
564 HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
565 HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
566 HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
561 HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol), 567 HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
562 { 568 {
563 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 569 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -576,16 +582,15 @@ static struct hda_verb cxt5045_init_verbs[] = {
576 {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN }, 582 {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
577 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 }, 583 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
578 /* HP, Amp */ 584 /* HP, Amp */
579 {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP }, 585 {0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
580 {0x17, AC_VERB_SET_CONNECT_SEL,0x01}, 586 {0x10, AC_VERB_SET_CONNECT_SEL, 0x1},
581 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, 587 {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
582 AC_AMP_SET_OUTPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x01}, 588 {0x11, AC_VERB_SET_CONNECT_SEL, 0x1},
583 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, 589 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
584 AC_AMP_SET_OUTPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x02}, 590 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
585 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, 591 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
586 AC_AMP_SET_OUTPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x03}, 592 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
587 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, 593 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
588 AC_AMP_SET_OUTPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x04},
589 /* Record selector: Int mic */ 594 /* Record selector: Int mic */
590 {0x1a, AC_VERB_SET_CONNECT_SEL,0x1}, 595 {0x1a, AC_VERB_SET_CONNECT_SEL,0x1},
591 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 596 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 53b0428abfc2..d9f78c809ee9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -238,7 +238,7 @@ struct alc_spec {
238 unsigned int num_kctl_alloc, num_kctl_used; 238 unsigned int num_kctl_alloc, num_kctl_used;
239 struct snd_kcontrol_new *kctl_alloc; 239 struct snd_kcontrol_new *kctl_alloc;
240 struct hda_input_mux private_imux; 240 struct hda_input_mux private_imux;
241 hda_nid_t private_dac_nids[5]; 241 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
242 242
243 /* hooks */ 243 /* hooks */
244 void (*init_hook)(struct hda_codec *codec); 244 void (*init_hook)(struct hda_codec *codec);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index bf950195107c..f9b2c435a130 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -111,6 +111,7 @@ struct sigmatel_spec {
111 unsigned int alt_switch: 1; 111 unsigned int alt_switch: 1;
112 unsigned int hp_detect: 1; 112 unsigned int hp_detect: 1;
113 unsigned int gpio_mute: 1; 113 unsigned int gpio_mute: 1;
114 unsigned int no_vol_knob :1;
114 115
115 unsigned int gpio_mask, gpio_data; 116 unsigned int gpio_mask, gpio_data;
116 117
@@ -1930,7 +1931,8 @@ static int stac92xx_auto_create_hp_ctls(struct hda_codec *codec,
1930 } 1931 }
1931 if (spec->multiout.hp_nid) { 1932 if (spec->multiout.hp_nid) {
1932 const char *pfx; 1933 const char *pfx;
1933 if (old_num_dacs == spec->multiout.num_dacs) 1934 if (old_num_dacs == spec->multiout.num_dacs &&
1935 spec->no_vol_knob)
1934 pfx = "Master"; 1936 pfx = "Master";
1935 else 1937 else
1936 pfx = "Headphone"; 1938 pfx = "Headphone";
@@ -2487,6 +2489,7 @@ static int patch_stac9200(struct hda_codec *codec)
2487 codec->spec = spec; 2489 codec->spec = spec;
2488 spec->num_pins = ARRAY_SIZE(stac9200_pin_nids); 2490 spec->num_pins = ARRAY_SIZE(stac9200_pin_nids);
2489 spec->pin_nids = stac9200_pin_nids; 2491 spec->pin_nids = stac9200_pin_nids;
2492 spec->no_vol_knob = 1;
2490 spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS, 2493 spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS,
2491 stac9200_models, 2494 stac9200_models,
2492 stac9200_cfg_tbl); 2495 stac9200_cfg_tbl);
@@ -2541,6 +2544,7 @@ static int patch_stac925x(struct hda_codec *codec)
2541 codec->spec = spec; 2544 codec->spec = spec;
2542 spec->num_pins = ARRAY_SIZE(stac925x_pin_nids); 2545 spec->num_pins = ARRAY_SIZE(stac925x_pin_nids);
2543 spec->pin_nids = stac925x_pin_nids; 2546 spec->pin_nids = stac925x_pin_nids;
2547 spec->no_vol_knob = 1;
2544 spec->board_config = snd_hda_check_board_config(codec, STAC_925x_MODELS, 2548 spec->board_config = snd_hda_check_board_config(codec, STAC_925x_MODELS,
2545 stac925x_models, 2549 stac925x_models,
2546 stac925x_cfg_tbl); 2550 stac925x_cfg_tbl);
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 33b5e1ffa817..4cdf3e6df4ba 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -114,7 +114,7 @@ struct via_spec {
114 unsigned int num_kctl_alloc, num_kctl_used; 114 unsigned int num_kctl_alloc, num_kctl_used;
115 struct snd_kcontrol_new *kctl_alloc; 115 struct snd_kcontrol_new *kctl_alloc;
116 struct hda_input_mux private_imux; 116 struct hda_input_mux private_imux;
117 hda_nid_t private_dac_nids[4]; 117 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
118 118
119#ifdef CONFIG_SND_HDA_POWER_SAVE 119#ifdef CONFIG_SND_HDA_POWER_SAVE
120 struct hda_loopback_check loopback; 120 struct hda_loopback_check loopback;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 131ec4812288..88dc840152ce 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -106,11 +106,14 @@ static void spu_write_wait(void)
106static void spu_memset(u32 toi, u32 what, int length) 106static void spu_memset(u32 toi, u32 what, int length)
107{ 107{
108 int i; 108 int i;
109 unsigned long flags;
109 snd_assert(length % 4 == 0, return); 110 snd_assert(length % 4 == 0, return);
110 for (i = 0; i < length; i++) { 111 for (i = 0; i < length; i++) {
111 if (!(i % 8)) 112 if (!(i % 8))
112 spu_write_wait(); 113 spu_write_wait();
114 local_irq_save(flags);
113 writel(what, toi + SPU_MEMORY_BASE); 115 writel(what, toi + SPU_MEMORY_BASE);
116 local_irq_restore(flags);
114 toi++; 117 toi++;
115 } 118 }
116} 119}
@@ -118,6 +121,7 @@ static void spu_memset(u32 toi, u32 what, int length)
118/* spu_memload - write to SPU address space */ 121/* spu_memload - write to SPU address space */
119static void spu_memload(u32 toi, void *from, int length) 122static void spu_memload(u32 toi, void *from, int length)
120{ 123{
124 unsigned long flags;
121 u32 *froml = from; 125 u32 *froml = from;
122 u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi); 126 u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi);
123 int i; 127 int i;
@@ -128,7 +132,9 @@ static void spu_memload(u32 toi, void *from, int length)
128 if (!(i % 8)) 132 if (!(i % 8))
129 spu_write_wait(); 133 spu_write_wait();
130 val = *froml; 134 val = *froml;
135 local_irq_save(flags);
131 writel(val, to); 136 writel(val, to);
137 local_irq_restore(flags);
132 froml++; 138 froml++;
133 to++; 139 to++;
134 } 140 }
@@ -138,28 +144,36 @@ static void spu_memload(u32 toi, void *from, int length)
138static void spu_disable(void) 144static void spu_disable(void)
139{ 145{
140 int i; 146 int i;
147 unsigned long flags;
141 u32 regval; 148 u32 regval;
142 spu_write_wait(); 149 spu_write_wait();
143 regval = readl(ARM_RESET_REGISTER); 150 regval = readl(ARM_RESET_REGISTER);
144 regval |= 1; 151 regval |= 1;
145 spu_write_wait(); 152 spu_write_wait();
153 local_irq_save(flags);
146 writel(regval, ARM_RESET_REGISTER); 154 writel(regval, ARM_RESET_REGISTER);
155 local_irq_restore(flags);
147 for (i = 0; i < 64; i++) { 156 for (i = 0; i < 64; i++) {
148 spu_write_wait(); 157 spu_write_wait();
149 regval = readl(SPU_REGISTER_BASE + (i * 0x80)); 158 regval = readl(SPU_REGISTER_BASE + (i * 0x80));
150 regval = (regval & ~0x4000) | 0x8000; 159 regval = (regval & ~0x4000) | 0x8000;
151 spu_write_wait(); 160 spu_write_wait();
161 local_irq_save(flags);
152 writel(regval, SPU_REGISTER_BASE + (i * 0x80)); 162 writel(regval, SPU_REGISTER_BASE + (i * 0x80));
163 local_irq_restore(flags);
153 } 164 }
154} 165}
155 166
156/* spu_enable - set spu registers to enable sound output */ 167/* spu_enable - set spu registers to enable sound output */
157static void spu_enable(void) 168static void spu_enable(void)
158{ 169{
170 unsigned long flags;
159 u32 regval = readl(ARM_RESET_REGISTER); 171 u32 regval = readl(ARM_RESET_REGISTER);
160 regval &= ~1; 172 regval &= ~1;
161 spu_write_wait(); 173 spu_write_wait();
174 local_irq_save(flags);
162 writel(regval, ARM_RESET_REGISTER); 175 writel(regval, ARM_RESET_REGISTER);
176 local_irq_restore(flags);
163} 177}
164 178
165/* 179/*
@@ -168,25 +182,34 @@ static void spu_enable(void)
168*/ 182*/
169static void spu_reset(void) 183static void spu_reset(void)
170{ 184{
185 unsigned long flags;
171 spu_disable(); 186 spu_disable();
172 spu_memset(0, 0, 0x200000 / 4); 187 spu_memset(0, 0, 0x200000 / 4);
173 /* Put ARM7 in endless loop */ 188 /* Put ARM7 in endless loop */
189 local_irq_save(flags);
174 ctrl_outl(0xea000002, SPU_MEMORY_BASE); 190 ctrl_outl(0xea000002, SPU_MEMORY_BASE);
191 local_irq_restore(flags);
175 spu_enable(); 192 spu_enable();
176} 193}
177 194
178/* aica_chn_start - write to spu to start playback */ 195/* aica_chn_start - write to spu to start playback */
179static void aica_chn_start(void) 196static void aica_chn_start(void)
180{ 197{
198 unsigned long flags;
181 spu_write_wait(); 199 spu_write_wait();
200 local_irq_save(flags);
182 writel(AICA_CMD_KICK | AICA_CMD_START, (u32 *) AICA_CONTROL_POINT); 201 writel(AICA_CMD_KICK | AICA_CMD_START, (u32 *) AICA_CONTROL_POINT);
202 local_irq_restore(flags);
183} 203}
184 204
185/* aica_chn_halt - write to spu to halt playback */ 205/* aica_chn_halt - write to spu to halt playback */
186static void aica_chn_halt(void) 206static void aica_chn_halt(void)
187{ 207{
208 unsigned long flags;
188 spu_write_wait(); 209 spu_write_wait();
210 local_irq_save(flags);
189 writel(AICA_CMD_KICK | AICA_CMD_STOP, (u32 *) AICA_CONTROL_POINT); 211 writel(AICA_CMD_KICK | AICA_CMD_STOP, (u32 *) AICA_CONTROL_POINT);
212 local_irq_restore(flags);
190} 213}
191 214
192/* ALSA code below */ 215/* ALSA code below */
@@ -213,12 +236,13 @@ static int aica_dma_transfer(int channels, int buffer_size,
213 int q, err, period_offset; 236 int q, err, period_offset;
214 struct snd_card_aica *dreamcastcard; 237 struct snd_card_aica *dreamcastcard;
215 struct snd_pcm_runtime *runtime; 238 struct snd_pcm_runtime *runtime;
216 err = 0; 239 unsigned long flags;
217 dreamcastcard = substream->pcm->private_data; 240 dreamcastcard = substream->pcm->private_data;
218 period_offset = dreamcastcard->clicks; 241 period_offset = dreamcastcard->clicks;
219 period_offset %= (AICA_PERIOD_NUMBER / channels); 242 period_offset %= (AICA_PERIOD_NUMBER / channels);
220 runtime = substream->runtime; 243 runtime = substream->runtime;
221 for (q = 0; q < channels; q++) { 244 for (q = 0; q < channels; q++) {
245 local_irq_save(flags);
222 err = dma_xfer(AICA_DMA_CHANNEL, 246 err = dma_xfer(AICA_DMA_CHANNEL,
223 (unsigned long) (runtime->dma_area + 247 (unsigned long) (runtime->dma_area +
224 (AICA_BUFFER_SIZE * q) / 248 (AICA_BUFFER_SIZE * q) /
@@ -228,9 +252,12 @@ static int aica_dma_transfer(int channels, int buffer_size,
228 AICA_CHANNEL0_OFFSET + q * CHANNEL_OFFSET + 252 AICA_CHANNEL0_OFFSET + q * CHANNEL_OFFSET +
229 AICA_PERIOD_SIZE * period_offset, 253 AICA_PERIOD_SIZE * period_offset,
230 buffer_size / channels, AICA_DMA_MODE); 254 buffer_size / channels, AICA_DMA_MODE);
231 if (unlikely(err < 0)) 255 if (unlikely(err < 0)) {
256 local_irq_restore(flags);
232 break; 257 break;
258 }
233 dma_wait_for_completion(AICA_DMA_CHANNEL); 259 dma_wait_for_completion(AICA_DMA_CHANNEL);
260 local_irq_restore(flags);
234 } 261 }
235 return err; 262 return err;
236} 263}
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 9785382a5f39..f8c7a120ccbb 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -400,65 +400,44 @@ static void snd_cs4231_mce_up(struct snd_cs4231 *chip)
400 400
401static void snd_cs4231_mce_down(struct snd_cs4231 *chip) 401static void snd_cs4231_mce_down(struct snd_cs4231 *chip)
402{ 402{
403 unsigned long flags; 403 unsigned long flags, timeout;
404 unsigned long end_time; 404 int reg;
405 int timeout;
406 405
407 spin_lock_irqsave(&chip->lock, flags);
408 snd_cs4231_busy_wait(chip); 406 snd_cs4231_busy_wait(chip);
407 spin_lock_irqsave(&chip->lock, flags);
409#ifdef CONFIG_SND_DEBUG 408#ifdef CONFIG_SND_DEBUG
410 if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) 409 if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT)
411 snd_printdd("mce_down [%p] - auto calibration time out (0)\n", 410 snd_printdd("mce_down [%p] - auto calibration time out (0)\n",
412 CS4231U(chip, REGSEL)); 411 CS4231U(chip, REGSEL));
413#endif 412#endif
414 chip->mce_bit &= ~CS4231_MCE; 413 chip->mce_bit &= ~CS4231_MCE;
415 timeout = __cs4231_readb(chip, CS4231U(chip, REGSEL)); 414 reg = __cs4231_readb(chip, CS4231U(chip, REGSEL));
416 __cs4231_writeb(chip, chip->mce_bit | (timeout & 0x1f), 415 __cs4231_writeb(chip, chip->mce_bit | (reg & 0x1f),
417 CS4231U(chip, REGSEL)); 416 CS4231U(chip, REGSEL));
418 if (timeout == 0x80) 417 if (reg == 0x80)
419 snd_printdd("mce_down [%p]: serious init problem - " 418 snd_printdd("mce_down [%p]: serious init problem "
420 "codec still busy\n", 419 "- codec still busy\n", chip->port);
421 chip->port); 420 if ((reg & CS4231_MCE) == 0) {
422 if ((timeout & CS4231_MCE) == 0) {
423 spin_unlock_irqrestore(&chip->lock, flags); 421 spin_unlock_irqrestore(&chip->lock, flags);
424 return; 422 return;
425 } 423 }
426 424
427 /* 425 /*
428 * Wait for (possible -- during init auto-calibration may not be set) 426 * Wait for auto-calibration (AC) process to finish, i.e. ACI to go low.
429 * calibration process to start. Needs upto 5 sample periods on AD1848
430 * which at the slowest possible rate of 5.5125 kHz means 907 us.
431 */ 427 */
432 msleep(1); 428 timeout = jiffies + msecs_to_jiffies(250);
433 429 do {
434 /* check condition up to 250ms */
435 end_time = jiffies + msecs_to_jiffies(250);
436 while (snd_cs4231_in(chip, CS4231_TEST_INIT) &
437 CS4231_CALIB_IN_PROGRESS) {
438
439 spin_unlock_irqrestore(&chip->lock, flags); 430 spin_unlock_irqrestore(&chip->lock, flags);
440 if (time_after(jiffies, end_time)) {
441 snd_printk("mce_down - "
442 "auto calibration time out (2)\n");
443 return;
444 }
445 msleep(1);
446 spin_lock_irqsave(&chip->lock, flags);
447 }
448
449 /* check condition up to 100ms */
450 end_time = jiffies + msecs_to_jiffies(100);
451 while (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) {
452 spin_unlock_irqrestore(&chip->lock, flags);
453 if (time_after(jiffies, end_time)) {
454 snd_printk("mce_down - "
455 "auto calibration time out (3)\n");
456 return;
457 }
458 msleep(1); 431 msleep(1);
459 spin_lock_irqsave(&chip->lock, flags); 432 spin_lock_irqsave(&chip->lock, flags);
460 } 433 reg = snd_cs4231_in(chip, CS4231_TEST_INIT);
434 reg &= CS4231_CALIB_IN_PROGRESS;
435 } while (reg && time_before(jiffies, timeout));
461 spin_unlock_irqrestore(&chip->lock, flags); 436 spin_unlock_irqrestore(&chip->lock, flags);
437
438 if (reg)
439 snd_printk(KERN_ERR
440 "mce_down - auto calibration time out (2)\n");
462} 441}
463 442
464static void snd_cs4231_advance_dma(struct cs4231_dma_control *dma_cont, 443static void snd_cs4231_advance_dma(struct cs4231_dma_control *dma_cont,
diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
index 743568f89907..59410f437705 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -84,6 +84,15 @@
84 USB_DEVICE_ID_MATCH_INT_CLASS | 84 USB_DEVICE_ID_MATCH_INT_CLASS |
85 USB_DEVICE_ID_MATCH_INT_SUBCLASS, 85 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
86 .idVendor = 0x046d, 86 .idVendor = 0x046d,
87 .idProduct = 0x08f5,
88 .bInterfaceClass = USB_CLASS_AUDIO,
89 .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL
90},
91{
92 .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
93 USB_DEVICE_ID_MATCH_INT_CLASS |
94 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
95 .idVendor = 0x046d,
87 .idProduct = 0x08f6, 96 .idProduct = 0x08f6,
88 .bInterfaceClass = USB_CLASS_AUDIO, 97 .bInterfaceClass = USB_CLASS_AUDIO,
89 .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL 98 .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL