aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-03 20:29:19 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-03 20:29:19 -0500
commit91d75e209bd59695f0708d66964d928d45b3b2f3 (patch)
tree32cab1359d951e4193bebb181a0f0319824a2b95
parent9976b39b5031bbf76f715893cf080b6a17683881 (diff)
parent8b0e5860cb099d7958d13b00ffbc35ad02735700 (diff)
Merge branch 'x86/core' into core/percpu
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci43
-rw-r--r--Documentation/dvb/README.flexcop205
-rw-r--r--Documentation/dvb/technisat.txt34
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/scsi/cxgb3i.txt11
-rw-r--r--Documentation/x86/boot.txt5
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/mach-davinci/board-evm.c6
-rw-r--r--arch/arm/mach-davinci/clock.c5
-rw-r--r--arch/arm/mach-davinci/usb.c1
-rw-r--r--arch/arm/mach-rpc/riscpc.c6
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/mips/Kconfig9
-rw-r--r--arch/mips/alchemy/common/time.c6
-rw-r--r--arch/mips/include/asm/seccomp.h1
-rw-r--r--arch/mips/kernel/irq.c1
-rw-r--r--arch/mips/kernel/linux32.c69
-rw-r--r--arch/mips/kernel/scall32-o32.S4
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S28
-rw-r--r--arch/mips/kernel/scall64-o32.S40
-rw-r--r--arch/mips/kernel/signal.c5
-rw-r--r--arch/mips/kernel/signal32.c28
-rw-r--r--arch/mips/kernel/syscall.c26
-rw-r--r--arch/mips/mm/cache.c5
-rw-r--r--arch/powerpc/include/asm/compat.h5
-rw-r--r--arch/powerpc/include/asm/seccomp.h4
-rw-r--r--arch/powerpc/kernel/align.c29
-rw-r--r--arch/powerpc/lib/copyuser_64.S38
-rw-r--r--arch/powerpc/lib/memcpy_64.S26
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c17
-rw-r--r--arch/sh/boards/board-ap325rxa.c53
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c4
-rw-r--r--arch/sparc/include/asm/compat.h5
-rw-r--r--arch/sparc/include/asm/seccomp.h6
-rw-r--r--arch/sparc/kernel/chmc.c1
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/boot/compressed/Makefile21
-rw-r--r--arch/x86/boot/compressed/misc.c118
-rw-r--r--arch/x86/configs/i386_defconfig6
-rw-r--r--arch/x86/configs/x86_64_defconfig6
-rw-r--r--arch/x86/include/asm/apic.h26
-rw-r--r--arch/x86/include/asm/boot.h16
-rw-r--r--arch/x86/include/asm/fixmap.h149
-rw-r--r--arch/x86/include/asm/fixmap_32.h115
-rw-r--r--arch/x86/include/asm/fixmap_64.h79
-rw-r--r--arch/x86/include/asm/iomap.h3
-rw-r--r--arch/x86/include/asm/numa_32.h6
-rw-r--r--arch/x86/include/asm/pat.h3
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/seccomp_32.h6
-rw-r--r--arch/x86/include/asm/seccomp_64.h8
-rw-r--r--arch/x86/include/asm/setup.h7
-rw-r--r--arch/x86/include/asm/system.h3
-rw-r--r--arch/x86/include/asm/uaccess_64.h26
-rw-r--r--arch/x86/include/asm/uv/uv.h3
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c41
-rw-r--r--arch/x86/kernel/apic/es7000_32.c221
-rw-r--r--arch/x86/kernel/apic/numaq_32.c12
-rw-r--r--arch/x86/kernel/apic/probe_32.c15
-rw-r--r--arch/x86/kernel/apic/probe_64.c3
-rw-r--r--arch/x86/kernel/apic/summit_32.c102
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c44
-rw-r--r--arch/x86/kernel/cpu/proc.c20
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/ioport.c11
-rw-r--r--arch/x86/kernel/process.c191
-rw-r--r--arch/x86/kernel/process_32.c190
-rw-r--r--arch/x86/kernel/process_64.c188
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/signal.c117
-rw-r--r--arch/x86/kernel/smpboot.c19
-rw-r--r--arch/x86/kernel/traps.c46
-rw-r--r--arch/x86/kernel/vsmp_64.c12
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/highmem_32.c34
-rw-r--r--arch/x86/mm/init.c49
-rw-r--r--arch/x86/mm/init_32.c61
-rw-r--r--arch/x86/mm/init_64.c39
-rw-r--r--arch/x86/mm/iomap_32.c11
-rw-r--r--arch/x86/mm/memtest.c156
-rw-r--r--arch/x86/mm/numa_32.c26
-rw-r--r--arch/x86/mm/pat.c48
-rw-r--r--arch/x86/mm/pgtable.c18
-rw-r--r--arch/x86/mm/pgtable_32.c18
-rw-r--r--arch/x86/oprofile/op_model_ppro.c14
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--block/blk-merge.c94
-rw-r--r--block/genhd.c16
-rw-r--r--crypto/ahash.c2
-rw-r--r--drivers/ata/pata_amd.c76
-rw-r--r--drivers/ata/pata_it821x.c3
-rw-r--r--drivers/ata/pata_legacy.c7
-rw-r--r--drivers/ata/sata_mv.c20
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/xen-blkfront.c30
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c76
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_fops.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c14
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_stub.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/i2c/busses/i2c-acorn.c5
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c4
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/amd74xx.c2
-rw-r--r--drivers/ide/atiixp.c4
-rw-r--r--drivers/ide/ide-cd.c35
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-gd.c26
-rw-r--r--drivers/ide/ide-gd.h2
-rw-r--r--drivers/ide/ide-tape.c29
-rw-r--r--drivers/ide/ide.c11
-rw-r--r--drivers/ide/it821x.c5
-rw-r--r--drivers/ieee1394/dma.h1
-rw-r--r--drivers/ieee1394/ieee1394_core.c3
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c31
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h2
-rw-r--r--drivers/ieee1394/iso.h1
-rw-r--r--drivers/ieee1394/nodemgr.c10
-rw-r--r--drivers/ieee1394/nodemgr.h18
-rw-r--r--drivers/input/keyboard/atkbd.c4
-rw-r--r--drivers/input/keyboard/bf54x-keys.c4
-rw-r--r--drivers/input/keyboard/corgikbd.c8
-rw-r--r--drivers/input/keyboard/omap-keypad.c8
-rw-r--r--drivers/input/keyboard/spitzkbd.c8
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/elantech.c32
-rw-r--r--drivers/input/mouse/pxa930_trkball.c2
-rw-r--r--drivers/input/mouse/synaptics.c9
-rw-r--r--drivers/input/serio/ambakmi.c6
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/serio/sa1111ps2.c4
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c2
-rw-r--r--drivers/input/touchscreen/corgi_ts.c9
-rw-r--r--drivers/input/touchscreen/tsc2007.c3
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c20
-rw-r--r--drivers/isdn/sc/shmem.c2
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-hw-filter.c1
-rw-r--r--drivers/media/dvb/b2c2/flexcop-pci.c65
-rw-r--r--drivers/media/dvb/b2c2/flexcop.c3
-rw-r--r--drivers/media/dvb/firewire/Kconfig22
-rw-r--r--drivers/media/dvb/firewire/Makefile8
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c285
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c1315
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c260
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c364
-rw-r--r--drivers/media/dvb/firewire/firedtv-fe.c247
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c190
-rw-r--r--drivers/media/dvb/firewire/firedtv.h182
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/video/pxa_camera.c26
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c13
-rw-r--r--drivers/media/video/uvc/uvc_status.c10
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/misc/hpilo.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/chips/map_rom.c8
-rw-r--r--drivers/mtd/devices/slram.c14
-rw-r--r--drivers/mtd/lpddr/Kconfig1
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c6
-rw-r--r--drivers/mtd/maps/ck804xrom.c2
-rw-r--r--drivers/mtd/maps/physmap.c38
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/Makefile2
-rw-r--r--drivers/net/atl1c/atl1c.h606
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c317
-rw-r--r--drivers/net/atl1c/atl1c_hw.c527
-rw-r--r--drivers/net/atl1c/atl1c_hw.h859
-rw-r--r--drivers/net/atl1c/atl1c_main.c2797
-rw-r--r--drivers/net/b44.c13
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/cxgb3/t3_hw.c7
-rw-r--r--drivers/net/forcedeth.c13
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/netxen/netxen_nic_main.c16
-rw-r--r--drivers/net/r8169.c114
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c6
-rw-r--r--drivers/net/smsc9420.h1
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/tg3.c4
-rw-r--r--drivers/net/usb/asix.c8
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/usb/zaurus.c5
-rw-r--r--drivers/net/veth.c60
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wireless/ath9k/main.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c8
-rw-r--r--drivers/net/wireless/libertas/ethtool.c12
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/main.c31
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c16
-rw-r--r--drivers/net/wireless/libertas/scan.c4
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c72
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c19
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c12
-rw-r--r--drivers/pci/dmar.c73
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c15
-rw-r--r--drivers/pci/intr_remapping.c21
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c48
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/quirks.c122
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h21
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c19
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h5
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c22
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c146
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h29
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c275
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h2
-rw-r--r--drivers/scsi/hptiop.c1
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/serial/sh-sci.h2
-rw-r--r--drivers/staging/panel/panel.c23
-rw-r--r--drivers/staging/rtl8187se/Kconfig1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c19
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c2
-rw-r--r--drivers/staging/winbond/wbusb.c20
-rw-r--r--drivers/usb/class/cdc-acm.c9
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/f_obex.c4
-rw-r--r--drivers/usb/gadget/file_storage.c6
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c3
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mem.c1
-rw-r--r--drivers/usb/host/ehci-sched.c56
-rw-r--r--drivers/usb/host/ehci.h6
-rw-r--r--drivers/usb/musb/davinci.c15
-rw-r--r--drivers/usb/musb/musb_core.c13
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/musb_host.c93
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/storage/unusual_devs.h4
-rw-r--r--drivers/w1/slaves/Kconfig6
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2433.c7
-rw-r--r--fs/Makefile6
-rw-r--r--fs/bio.c2
-rw-r--r--fs/btrfs/btrfs_inode.h8
-rw-r--r--fs/btrfs/ctree.h40
-rw-r--r--fs/btrfs/extent-tree.c252
-rw-r--r--fs/btrfs/file.c16
-rw-r--r--fs/btrfs/inode.c62
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/inode.c11
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/jffs2/background.c18
-rw-r--r--fs/jffs2/readinode.c42
-rw-r--r--fs/ocfs2/alloc.c27
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c12
-rw-r--r--fs/ocfs2/dlm/dlmthread.c3
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c4
-rw-r--r--fs/ocfs2/dlmglue.c11
-rw-r--r--fs/ocfs2/ocfs2.h3
-rw-r--r--fs/ocfs2/super.c8
-rw-r--r--fs/ocfs2/xattr.c27
-rw-r--r--fs/proc/inode.c4
-rw-r--r--fs/proc/page.c2
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_edid.h4
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/dcbnl.h4
-rw-r--r--include/linux/decompress/bunzip2.h10
-rw-r--r--include/linux/decompress/generic.h33
-rw-r--r--include/linux/decompress/inflate.h13
-rw-r--r--include/linux/decompress/mm.h87
-rw-r--r--include/linux/decompress/unlzma.h12
-rw-r--r--include/linux/i2c-dev.h2
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/if_vlan.h1
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/io-mapping.h49
-rw-r--r--include/linux/netfilter/xt_NFLOG.h2
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/sock.h1
-rw-r--r--init/Kconfig60
-rw-r--r--init/do_mounts_rd.c178
-rw-r--r--init/initramfs.c122
-rw-r--r--kernel/seccomp.c7
-rw-r--r--kernel/user_namespace.c21
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Makefile7
-rw-r--r--lib/decompress.c54
-rw-r--r--lib/decompress_bunzip2.c735
-rw-r--r--lib/decompress_inflate.c167
-rw-r--r--lib/decompress_unlzma.c647
-rw-r--r--lib/zlib_inflate/inflate.h4
-rw-r--r--lib/zlib_inflate/inftrees.h4
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/shmem.c43
-rw-r--r--mm/vmalloc.c13
-rw-r--r--net/8021q/vlan_core.c10
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/net_namespace.c86
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c3
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c5
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/x_tables.c199
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--scripts/Makefile.lib14
-rw-r--r--scripts/bin_size10
-rwxr-xr-xscripts/checkpatch.pl26
-rw-r--r--scripts/gen_initramfs_list.sh18
-rw-r--r--security/selinux/netlabel.c5
-rw-r--r--sound/core/oss/rate.c2
-rw-r--r--sound/pci/aw2/aw2-alsa.c2
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c1
-rw-r--r--sound/pci/hda/hda_hwdep.c15
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/pcxhr/pcxhr.h12
-rw-r--r--usr/Kconfig89
-rw-r--r--usr/Makefile36
-rw-r--r--usr/initramfs_data.S2
-rw-r--r--usr/initramfs_data.bz2.S29
-rw-r--r--usr/initramfs_data.gz.S29
-rw-r--r--usr/initramfs_data.lzma.S29
373 files changed, 14119 insertions, 3190 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index ceddcff4082a..e638e15a8895 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -1,3 +1,46 @@
1What: /sys/bus/pci/drivers/.../bind
2Date: December 2003
3Contact: linux-pci@vger.kernel.org
4Description:
5 Writing a device location to this file will cause
6 the driver to attempt to bind to the device found at
7 this location. This is useful for overriding default
8 bindings. The format for the location is: DDDD:BB:DD.F.
9 That is Domain:Bus:Device.Function and is the same as
10 found in /sys/bus/pci/devices/. For example:
11 # echo 0000:00:19.0 > /sys/bus/pci/drivers/foo/bind
12 (Note: kernels before 2.6.28 may require echo -n).
13
14What: /sys/bus/pci/drivers/.../unbind
15Date: December 2003
16Contact: linux-pci@vger.kernel.org
17Description:
18 Writing a device location to this file will cause the
19 driver to attempt to unbind from the device found at
20 this location. This may be useful when overriding default
21 bindings. The format for the location is: DDDD:BB:DD.F.
22 That is Domain:Bus:Device.Function and is the same as
23 found in /sys/bus/pci/devices/. For example:
24 # echo 0000:00:19.0 > /sys/bus/pci/drivers/foo/unbind
25 (Note: kernels before 2.6.28 may require echo -n).
26
27What: /sys/bus/pci/drivers/.../new_id
28Date: December 2003
29Contact: linux-pci@vger.kernel.org
30Description:
31 Writing a device ID to this file will attempt to
32 dynamically add a new device ID to a PCI device driver.
33 This may allow the driver to support more hardware than
34 was included in the driver's static device ID support
35 table at compile time. The format for the device ID is:
36 VVVV DDDD SVVV SDDD CCCC MMMM PPPP. That is Vendor ID,
37 Device ID, Subsystem Vendor ID, Subsystem Device ID,
38 Class, Class Mask, and Private Driver Data. The Vendor ID
39 and Device ID fields are required, the rest are optional.
40 Upon successfully adding an ID, the driver will probe
41 for the device and attempt to bind to it. For example:
42 # echo "8086 10f5" > /sys/bus/pci/drivers/foo/new_id
43
1What: /sys/bus/pci/devices/.../vpd 44What: /sys/bus/pci/devices/.../vpd
2Date: February 2008 45Date: February 2008
3Contact: Ben Hutchings <bhutchings@solarflare.com> 46Contact: Ben Hutchings <bhutchings@solarflare.com>
diff --git a/Documentation/dvb/README.flexcop b/Documentation/dvb/README.flexcop
deleted file mode 100644
index 5515469de7cf..000000000000
--- a/Documentation/dvb/README.flexcop
+++ /dev/null
@@ -1,205 +0,0 @@
1This README escorted the skystar2-driver rewriting procedure. It describes the
2state of the new flexcop-driver set and some internals are written down here
3too.
4
5This document hopefully describes things about the flexcop and its
6device-offsprings. Goal was to write an easy-to-write and easy-to-read set of
7drivers based on the skystar2.c and other information.
8
9Remark: flexcop-pci.c was a copy of skystar2.c, but every line has been
10touched and rewritten.
11
12History & News
13==============
14 2005-04-01 - correct USB ISOC transfers (thanks to Vadim Catana)
15
16
17
18
19General coding processing
20=========================
21
22We should proceed as follows (as long as no one complains):
23
240) Think before start writing code!
25
261) rewriting the skystar2.c with the help of the flexcop register descriptions
27and splitting up the files to a pci-bus-part and a flexcop-part.
28The new driver will be called b2c2-flexcop-pci.ko/b2c2-flexcop-usb.ko for the
29device-specific part and b2c2-flexcop.ko for the common flexcop-functions.
30
312) Search for errors in the leftover of flexcop-pci.c (compare with pluto2.c
32and other pci drivers)
33
343) make some beautification (see 'Improvements when rewriting (refactoring) is
35done')
36
374) Testing the new driver and maybe substitute the skystar2.c with it, to reach
38a wider tester audience.
39
405) creating an usb-bus-part using the already written flexcop code for the pci
41card.
42
43Idea: create a kernel-object for the flexcop and export all important
44functions. This option saves kernel-memory, but maybe a lot of functions have
45to be exported to kernel namespace.
46
47
48Current situation
49=================
50
510) Done :)
521) Done (some minor issues left)
532) Done
543) Not ready yet, more information is necessary
554) next to be done (see the table below)
565) USB driver is working (yes, there are some minor issues)
57
58What seems to be ready?
59-----------------------
60
611) Rewriting
621a) i2c is cut off from the flexcop-pci.c and seems to work
631b) moved tuner and demod stuff from flexcop-pci.c to flexcop-tuner-fe.c
641c) moved lnb and diseqc stuff from flexcop-pci.c to flexcop-tuner-fe.c
651e) eeprom (reading MAC address)
661d) sram (no dynamic sll size detection (commented out) (using default as JJ told me))
671f) misc. register accesses for reading parameters (e.g. resetting, revision)
681g) pid/mac filter (flexcop-hw-filter.c)
691i) dvb-stuff initialization in flexcop.c (done)
701h) dma stuff (now just using the size-irq, instead of all-together, to be done)
711j) remove flexcop initialization from flexcop-pci.c completely (done)
721l) use a well working dma IRQ method (done, see 'Known bugs and problems and TODO')
731k) cleanup flexcop-files (remove unused EXPORT_SYMBOLs, make static from
74non-static where possible, moved code to proper places)
75
762) Search for errors in the leftover of flexcop-pci.c (partially done)
775a) add MAC address reading
785c) feeding of ISOC data to the software demux (format of the isochronous data
79and speed optimization, no real error) (thanks to Vadim Catana)
80
81What to do in the near future?
82--------------------------------------
83(no special order here)
84
855) USB driver
865b) optimize isoc-transfer (submitting/killing isoc URBs when transfer is starting)
87
88Testing changes
89---------------
90
91O = item is working
92P = item is partially working
93X = item is not working
94N = item does not apply here
95<empty field> = item need to be examined
96
97 | PCI | USB
98item | mt352 | nxt2002 | stv0299 | mt312 | mt352 | nxt2002 | stv0299 | mt312
99-------+-------+---------+---------+-------+-------+---------+---------+-------
1001a) | O | | | | N | N | N | N
1011b) | O | | | | | | O |
1021c) | N | N | | | N | N | O |
1031d) | O | O
1041e) | O | O
1051f) | P
1061g) | O
1071h) | P |
1081i) | O | N
1091j) | O | N
1101l) | O | N
1112) | O | N
1125a) | N | O
1135b)* | N |
1145c) | N | O
115
116* - not done yet
117
118Known bugs and problems and TODO
119--------------------------------
120
1211g/h/l) when pid filtering is enabled on the pci card
122
123DMA usage currently:
124 The DMA is splitted in 2 equal-sized subbuffers. The Flexcop writes to first
125 address and triggers an IRQ when it's full and starts writing to the second
126 address. When the second address is full, the IRQ is triggered again, and
127 the flexcop writes to first address again, and so on.
128 The buffersize of each address is currently 640*188 bytes.
129
130 Problem is, when using hw-pid-filtering and doing some low-bandwidth
131 operation (like scanning) the buffers won't be filled enough to trigger
132 the IRQ. That's why:
133
134 When PID filtering is activated, the timer IRQ is used. Every 1.97 ms the IRQ
135 is triggered. Is the current write address of DMA1 different to the one
136 during the last IRQ, then the data is passed to the demuxer.
137
138 There is an additional DMA-IRQ-method: packet count IRQ. This isn't
139 implemented correctly yet.
140
141 The solution is to disable HW PID filtering, but I don't know how the DVB
142 API software demux behaves on slow systems with 45MBit/s TS.
143
144Solved bugs :)
145--------------
1461g) pid-filtering (somehow pid index 4 and 5 (EMM_PID and ECM_PID) aren't
147working)
148SOLUTION: also index 0 was affected, because net_translation is done for
149these indexes by default
150
1515b) isochronous transfer does only work in the first attempt (for the Sky2PC
152USB, Air2PC is working) SOLUTION: the flexcop was going asleep and never really
153woke up again (don't know if this need fixes, see
154flexcop-fe-tuner.c:flexcop_sleep)
155
156NEWS: when the driver is loaded and unloaded and loaded again (w/o doing
157anything in the while the driver is loaded the first time), no transfers take
158place anymore.
159
160Improvements when rewriting (refactoring) is done
161=================================================
162
163- split sleeping of the flexcop (misc_204.ACPI3_sig = 1;) from lnb_control
164 (enable sleeping for other demods than dvb-s)
165- add support for CableStar (stv0297 Microtune 203x/ALPS) (almost done, incompatibilities with the Nexus-CA)
166
167Debugging
168---------
169- add verbose debugging to skystar2.c (dump the reg_dw_data) and compare it
170 with this flexcop, this is important, because i2c is now using the
171 flexcop_ibi_value union from flexcop-reg.h (do you have a better idea for
172 that, please tell us so).
173
174Everything which is identical in the following table, can be put into a common
175flexcop-module.
176
177 PCI USB
178-------------------------------------------------------------------------------
179Different:
180Register access: accessing IO memory USB control message
181I2C bus: I2C bus of the FC USB control message
182Data transfer: DMA isochronous transfer
183EEPROM transfer: through i2c bus not clear yet
184
185Identical:
186Streaming: accessing registers
187PID Filtering: accessing registers
188Sram destinations: accessing registers
189Tuner/Demod: I2C bus
190DVB-stuff: can be written for common use
191
192Acknowledgements (just for the rewriting part)
193================
194
195Bjarne Steinsbo thought a lot in the first place of the pci part for this code
196sharing idea.
197
198Andreas Oberritter for providing a recent PCI initialization template
199(pluto2.c).
200
201Boleslaw Ciesielski for pointing out a problem with firmware loader.
202
203Vadim Catana for correcting the USB transfer.
204
205comments, critics and ideas to linux-dvb@linuxtv.org.
diff --git a/Documentation/dvb/technisat.txt b/Documentation/dvb/technisat.txt
index cdf6ee4b2da1..3f435ffb289c 100644
--- a/Documentation/dvb/technisat.txt
+++ b/Documentation/dvb/technisat.txt
@@ -1,5 +1,5 @@
1How to set up the Technisat devices 1How to set up the Technisat/B2C2 Flexcop devices
2=================================== 2================================================
3 3
41) Find out what device you have 41) Find out what device you have
5================================ 5================================
@@ -16,54 +16,60 @@ DVB: registering frontend 0 (Conexant CX24123/CX24109)...
16 16
17If the Technisat is the only TV device in your box get rid of unnecessary modules and check this one: 17If the Technisat is the only TV device in your box get rid of unnecessary modules and check this one:
18"Multimedia devices" => "Customise analog and hybrid tuner modules to build" 18"Multimedia devices" => "Customise analog and hybrid tuner modules to build"
19In this directory uncheck every driver which is activated there. 19In this directory uncheck every driver which is activated there (except "Simple tuner support" for case 9 only).
20 20
21Then please activate: 21Then please activate:
222a) Main module part: 222a) Main module part:
23 23
24a.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" 24a.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters"
25b.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" => "Technisat/B2C2 Air/Sky/Cable2PC PCI" in case of a PCI card OR 25b.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" => "Technisat/B2C2 Air/Sky/Cable2PC PCI" in case of a PCI card
26OR
26c.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" => "Technisat/B2C2 Air/Sky/Cable2PC USB" in case of an USB 1.1 adapter 27c.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" => "Technisat/B2C2 Air/Sky/Cable2PC USB" in case of an USB 1.1 adapter
27d.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" => "Enable debug for the B2C2 FlexCop drivers" 28d.)"Multimedia devices" => "DVB/ATSC adapters" => "Technisat/B2C2 FlexcopII(b) and FlexCopIII adapters" => "Enable debug for the B2C2 FlexCop drivers"
28Notice: d.) is helpful for troubleshooting 29Notice: d.) is helpful for troubleshooting
29 30
302b) Frontend module part: 312b) Frontend module part:
31 32
321.) Revision 2.3: 331.) SkyStar DVB-S Revision 2.3:
33a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 34a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
34b.)"Multimedia devices" => "Customise DVB frontends" => "Zarlink VP310/MT312/ZL10313 based" 35b.)"Multimedia devices" => "Customise DVB frontends" => "Zarlink VP310/MT312/ZL10313 based"
35 36
362.) Revision 2.6: 372.) SkyStar DVB-S Revision 2.6:
37a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 38a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
38b.)"Multimedia devices" => "Customise DVB frontends" => "ST STV0299 based" 39b.)"Multimedia devices" => "Customise DVB frontends" => "ST STV0299 based"
39 40
403.) Revision 2.7: 413.) SkyStar DVB-S Revision 2.7:
41a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 42a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
42b.)"Multimedia devices" => "Customise DVB frontends" => "Samsung S5H1420 based" 43b.)"Multimedia devices" => "Customise DVB frontends" => "Samsung S5H1420 based"
43c.)"Multimedia devices" => "Customise DVB frontends" => "Integrant ITD1000 Zero IF tuner for DVB-S/DSS" 44c.)"Multimedia devices" => "Customise DVB frontends" => "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
44d.)"Multimedia devices" => "Customise DVB frontends" => "ISL6421 SEC controller" 45d.)"Multimedia devices" => "Customise DVB frontends" => "ISL6421 SEC controller"
45 46
464.) Revision 2.8: 474.) SkyStar DVB-S Revision 2.8:
47a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 48a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
48b.)"Multimedia devices" => "Customise DVB frontends" => "Conexant CX24113/CX24128 tuner for DVB-S/DSS" 49b.)"Multimedia devices" => "Customise DVB frontends" => "Conexant CX24113/CX24128 tuner for DVB-S/DSS"
49c.)"Multimedia devices" => "Customise DVB frontends" => "Conexant CX24123 based" 50c.)"Multimedia devices" => "Customise DVB frontends" => "Conexant CX24123 based"
50d.)"Multimedia devices" => "Customise DVB frontends" => "ISL6421 SEC controller" 51d.)"Multimedia devices" => "Customise DVB frontends" => "ISL6421 SEC controller"
51 52
525.) DVB-T card: 535.) AirStar DVB-T card:
53a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 54a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
54b.)"Multimedia devices" => "Customise DVB frontends" => "Zarlink MT352 based" 55b.)"Multimedia devices" => "Customise DVB frontends" => "Zarlink MT352 based"
55 56
566.) DVB-C card: 576.) CableStar DVB-C card:
57a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 58a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
58b.)"Multimedia devices" => "Customise DVB frontends" => "ST STV0297 based" 59b.)"Multimedia devices" => "Customise DVB frontends" => "ST STV0297 based"
59 60
607.) ATSC card 1st generation: 617.) AirStar ATSC card 1st generation:
61a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 62a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
62b.)"Multimedia devices" => "Customise DVB frontends" => "Broadcom BCM3510" 63b.)"Multimedia devices" => "Customise DVB frontends" => "Broadcom BCM3510"
63 64
648.) ATSC card 2nd generation: 658.) AirStar ATSC card 2nd generation:
65a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build" 66a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
66b.)"Multimedia devices" => "Customise DVB frontends" => "NxtWave Communications NXT2002/NXT2004 based" 67b.)"Multimedia devices" => "Customise DVB frontends" => "NxtWave Communications NXT2002/NXT2004 based"
67c.)"Multimedia devices" => "Customise DVB frontends" => "LG Electronics LGDT3302/LGDT3303 based" 68c.)"Multimedia devices" => "Customise DVB frontends" => "Generic I2C PLL based tuners"
68 69
69Author: Uwe Bugla <uwe.bugla@gmx.de> December 2008 709.) AirStar ATSC card 3rd generation:
71a.)"Multimedia devices" => "Customise DVB frontends" => "Customise the frontend modules to build"
72b.)"Multimedia devices" => "Customise DVB frontends" => "LG Electronics LGDT3302/LGDT3303 based"
73c.)"Multimedia devices" => "Customise analog and hybrid tuner modules to build" => "Simple tuner support"
74
75Author: Uwe Bugla <uwe.bugla@gmx.de> February 2009
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f6d5d5b9b2b1..28de395fa096 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -868,8 +868,10 @@ and is between 256 and 4096 characters. It is defined in the file
868 icn= [HW,ISDN] 868 icn= [HW,ISDN]
869 Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]] 869 Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
870 870
871 ide= [HW] (E)IDE subsystem 871 ide-core.nodma= [HW] (E)IDE subsystem
872 Format: ide=nodma or ide=doubler 872 Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc
873 .vlb_clock .pci_clock .noflush .noprobe .nowerr .cdrom
874 .chs .ignore_cable are additional options
873 See Documentation/ide/ide.txt. 875 See Documentation/ide/ide.txt.
874 876
875 idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed 877 idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
@@ -1308,8 +1310,13 @@ and is between 256 and 4096 characters. It is defined in the file
1308 1310
1309 memtest= [KNL,X86] Enable memtest 1311 memtest= [KNL,X86] Enable memtest
1310 Format: <integer> 1312 Format: <integer>
1311 range: 0,4 : pattern number
1312 default : 0 <disable> 1313 default : 0 <disable>
1314 Specifies the number of memtest passes to be
1315 performed. Each pass selects another test
1316 pattern from a given set of patterns. Memtest
1317 fills the memory with this pattern, validates
1318 memory contents and reserves bad memory
1319 regions that are detected.
1313 1320
1314 meye.*= [HW] Set MotionEye Camera parameters 1321 meye.*= [HW] Set MotionEye Camera parameters
1315 See Documentation/video4linux/meye.txt. 1322 See Documentation/video4linux/meye.txt.
diff --git a/Documentation/scsi/cxgb3i.txt b/Documentation/scsi/cxgb3i.txt
index 8141fa01978e..7ac8032ee9b2 100644
--- a/Documentation/scsi/cxgb3i.txt
+++ b/Documentation/scsi/cxgb3i.txt
@@ -4,7 +4,7 @@ Introduction
4============ 4============
5 5
6The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc. 6The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc.
7series of products) supports iSCSI acceleration and iSCSI Direct Data Placement 7series of products) support iSCSI acceleration and iSCSI Direct Data Placement
8(DDP) where the hardware handles the expensive byte touching operations, such 8(DDP) where the hardware handles the expensive byte touching operations, such
9as CRC computation and verification, and direct DMA to the final host memory 9as CRC computation and verification, and direct DMA to the final host memory
10destination: 10destination:
@@ -31,9 +31,9 @@ destination:
31 the TCP segments onto the wire. It handles TCP retransmission if 31 the TCP segments onto the wire. It handles TCP retransmission if
32 needed. 32 needed.
33 33
34 On receving, S3 h/w recovers the iSCSI PDU by reassembling TCP 34 On receiving, S3 h/w recovers the iSCSI PDU by reassembling TCP
35 segments, separating the header and data, calculating and verifying 35 segments, separating the header and data, calculating and verifying
36 the digests, then forwards the header to the host. The payload data, 36 the digests, then forwarding the header to the host. The payload data,
37 if possible, will be directly placed into the pre-posted host DDP 37 if possible, will be directly placed into the pre-posted host DDP
38 buffer. Otherwise, the payload data will be sent to the host too. 38 buffer. Otherwise, the payload data will be sent to the host too.
39 39
@@ -68,9 +68,8 @@ The following steps need to be taken to accelerates the open-iscsi initiator:
68 sure the ip address is unique in the network. 68 sure the ip address is unique in the network.
69 69
703. edit /etc/iscsi/iscsid.conf 703. edit /etc/iscsi/iscsid.conf
71 The default setting for MaxRecvDataSegmentLength (131072) is too big, 71 The default setting for MaxRecvDataSegmentLength (131072) is too big;
72 replace "node.conn[0].iscsi.MaxRecvDataSegmentLength" to be a value no 72 replace with a value no bigger than 15360 (for example 8192):
73 bigger than 15360 (for example 8192):
74 73
75 node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192 74 node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192
76 75
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 12299697b7cd..e0203662f9e9 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -543,7 +543,10 @@ Protocol: 2.08+
543 543
544 The payload may be compressed. The format of both the compressed and 544 The payload may be compressed. The format of both the compressed and
545 uncompressed data should be determined using the standard magic 545 uncompressed data should be determined using the standard magic
546 numbers. Currently only gzip compressed ELF is used. 546 numbers. The currently supported compression formats are gzip
547 (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
548 (magic number 5D 00). The uncompressed payload is currently always ELF
549 (magic number 7F 45 4C 46).
547 550
548Field name: payload_length 551Field name: payload_length
549Type: read 552Type: read
diff --git a/MAINTAINERS b/MAINTAINERS
index 59fd2d1d94a7..1c2ca1dc66f2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2464,7 +2464,7 @@ S: Maintained
2464 2464
2465ISDN SUBSYSTEM 2465ISDN SUBSYSTEM
2466P: Karsten Keil 2466P: Karsten Keil
2467M: kkeil@suse.de 2467M: isdn@linux-pingi.de
2468L: isdn4linux@listserv.isdn4linux.de (subscribers-only) 2468L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
2469W: http://www.isdn4linux.de 2469W: http://www.isdn4linux.de
2470T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git 2470T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git
diff --git a/arch/arm/mach-davinci/board-evm.c b/arch/arm/mach-davinci/board-evm.c
index a957d239a683..38b6a9ce2a93 100644
--- a/arch/arm/mach-davinci/board-evm.c
+++ b/arch/arm/mach-davinci/board-evm.c
@@ -311,6 +311,9 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c)
311 gpio_request(gpio + 7, "nCF_SEL"); 311 gpio_request(gpio + 7, "nCF_SEL");
312 gpio_direction_output(gpio + 7, 1); 312 gpio_direction_output(gpio + 7, 1);
313 313
314 /* irlml6401 sustains over 3A, switches 5V in under 8 msec */
315 setup_usb(500, 8);
316
314 return 0; 317 return 0;
315} 318}
316 319
@@ -417,9 +420,6 @@ static __init void davinci_evm_init(void)
417 platform_add_devices(davinci_evm_devices, 420 platform_add_devices(davinci_evm_devices,
418 ARRAY_SIZE(davinci_evm_devices)); 421 ARRAY_SIZE(davinci_evm_devices));
419 evm_init_i2c(); 422 evm_init_i2c();
420
421 /* irlml6401 sustains over 3A, switches 5V in under 8 msec */
422 setup_usb(500, 8);
423} 423}
424 424
425static __init void davinci_evm_irq_init(void) 425static __init void davinci_evm_irq_init(void)
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 28f6dbc95bd7..abb92b7eca0c 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -231,6 +231,11 @@ static struct clk davinci_clks[] = {
231 .lpsc = DAVINCI_LPSC_GPIO, 231 .lpsc = DAVINCI_LPSC_GPIO,
232 }, 232 },
233 { 233 {
234 .name = "usb",
235 .rate = &commonrate,
236 .lpsc = DAVINCI_LPSC_USB,
237 },
238 {
234 .name = "AEMIFCLK", 239 .name = "AEMIFCLK",
235 .rate = &commonrate, 240 .rate = &commonrate,
236 .lpsc = DAVINCI_LPSC_AEMIF, 241 .lpsc = DAVINCI_LPSC_AEMIF,
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 867ead2559ad..69680784448a 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -47,6 +47,7 @@ static struct musb_hdrc_platform_data usb_data = {
47#elif defined(CONFIG_USB_MUSB_HOST) 47#elif defined(CONFIG_USB_MUSB_HOST)
48 .mode = MUSB_HOST, 48 .mode = MUSB_HOST,
49#endif 49#endif
50 .clock = "usb",
50 .config = &musb_config, 51 .config = &musb_config,
51}; 52};
52 53
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index e88d417736af..c7fc01e9d1f6 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -19,6 +19,7 @@
19#include <linux/serial_8250.h> 19#include <linux/serial_8250.h>
20#include <linux/ata_platform.h> 20#include <linux/ata_platform.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/i2c.h>
22 23
23#include <asm/elf.h> 24#include <asm/elf.h>
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
@@ -201,8 +202,13 @@ static struct platform_device *devs[] __initdata = {
201 &pata_device, 202 &pata_device,
202}; 203};
203 204
205static struct i2c_board_info i2c_rtc = {
206 I2C_BOARD_INFO("pcf8583", 0x50)
207};
208
204static int __init rpc_init(void) 209static int __init rpc_init(void)
205{ 210{
211 i2c_register_board_info(0, &i2c_rtc, 1);
206 return platform_add_devices(devs, ARRAY_SIZE(devs)); 212 return platform_add_devices(devs, ARRAY_SIZE(devs));
207} 213}
208 214
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 4eb45c012498..153e727a6e8e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -638,6 +638,17 @@ config DMAR
638 and include PCI device scope covered by these DMA 638 and include PCI device scope covered by these DMA
639 remapping devices. 639 remapping devices.
640 640
641config DMAR_DEFAULT_ON
642 def_bool y
643 prompt "Enable DMA Remapping Devices by default"
644 depends on DMAR
645 help
646 Selecting this option will enable a DMAR device at boot time if
647 one is found. If this option is not selected, DMAR support can
648 be enabled by passing intel_iommu=on to the kernel. It is
649 recommended you say N here while the DMAR code remains
650 experimental.
651
641endmenu 652endmenu
642 653
643endif 654endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 006ad366a454..166e0d839fa0 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -507,7 +507,7 @@ static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
507 if (trigger == IOSAPIC_EDGE) 507 if (trigger == IOSAPIC_EDGE)
508 return -EINVAL; 508 return -EINVAL;
509 509
510 for (i = 0; i <= NR_IRQS; i++) { 510 for (i = 0; i < NR_IRQS; i++) {
511 info = &iosapic_intr_info[i]; 511 info = &iosapic_intr_info[i];
512 if (info->trigger == trigger && info->polarity == pol && 512 if (info->trigger == trigger && info->polarity == pol &&
513 (info->dmode == IOSAPIC_FIXED || 513 (info->dmode == IOSAPIC_FIXED ||
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 67810b77d998..b6c0e63a0bf6 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -2149,7 +2149,7 @@ unw_remove_unwind_table (void *handle)
2149 2149
2150 /* next, remove hash table entries for this table */ 2150 /* next, remove hash table entries for this table */
2151 2151
2152 for (index = 0; index <= UNW_HASH_SIZE; ++index) { 2152 for (index = 0; index < UNW_HASH_SIZE; ++index) {
2153 tmp = unw.cache + unw.hash[index]; 2153 tmp = unw.cache + unw.hash[index];
2154 if (unw.hash[index] >= UNW_CACHE_SIZE 2154 if (unw.hash[index] >= UNW_CACHE_SIZE
2155 || tmp->ip < table->start || tmp->ip >= table->end) 2155 || tmp->ip < table->start || tmp->ip >= table->end)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 600eef3f3ac7..e61465a18c7e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -603,7 +603,7 @@ config CAVIUM_OCTEON_SIMULATOR
603 select SYS_SUPPORTS_64BIT_KERNEL 603 select SYS_SUPPORTS_64BIT_KERNEL
604 select SYS_SUPPORTS_BIG_ENDIAN 604 select SYS_SUPPORTS_BIG_ENDIAN
605 select SYS_SUPPORTS_HIGHMEM 605 select SYS_SUPPORTS_HIGHMEM
606 select CPU_CAVIUM_OCTEON 606 select SYS_HAS_CPU_CAVIUM_OCTEON
607 help 607 help
608 The Octeon simulator is software performance model of the Cavium 608 The Octeon simulator is software performance model of the Cavium
609 Octeon Processor. It supports simulating Octeon processors on x86 609 Octeon Processor. It supports simulating Octeon processors on x86
@@ -618,7 +618,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
618 select SYS_SUPPORTS_BIG_ENDIAN 618 select SYS_SUPPORTS_BIG_ENDIAN
619 select SYS_SUPPORTS_HIGHMEM 619 select SYS_SUPPORTS_HIGHMEM
620 select SYS_HAS_EARLY_PRINTK 620 select SYS_HAS_EARLY_PRINTK
621 select CPU_CAVIUM_OCTEON 621 select SYS_HAS_CPU_CAVIUM_OCTEON
622 select SWAP_IO_SPACE 622 select SWAP_IO_SPACE
623 help 623 help
624 This option supports all of the Octeon reference boards from Cavium 624 This option supports all of the Octeon reference boards from Cavium
@@ -1234,6 +1234,7 @@ config CPU_SB1
1234 1234
1235config CPU_CAVIUM_OCTEON 1235config CPU_CAVIUM_OCTEON
1236 bool "Cavium Octeon processor" 1236 bool "Cavium Octeon processor"
1237 depends on SYS_HAS_CPU_CAVIUM_OCTEON
1237 select IRQ_CPU 1238 select IRQ_CPU
1238 select IRQ_CPU_OCTEON 1239 select IRQ_CPU_OCTEON
1239 select CPU_HAS_PREFETCH 1240 select CPU_HAS_PREFETCH
@@ -1314,6 +1315,9 @@ config SYS_HAS_CPU_RM9000
1314config SYS_HAS_CPU_SB1 1315config SYS_HAS_CPU_SB1
1315 bool 1316 bool
1316 1317
1318config SYS_HAS_CPU_CAVIUM_OCTEON
1319 bool
1320
1317# 1321#
1318# CPU may reorder R->R, R->W, W->R, W->W 1322# CPU may reorder R->R, R->W, W->R, W->W
1319# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC 1323# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -1387,6 +1391,7 @@ config 32BIT
1387config 64BIT 1391config 64BIT
1388 bool "64-bit kernel" 1392 bool "64-bit kernel"
1389 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL 1393 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
1394 select HAVE_SYSCALL_WRAPPERS
1390 help 1395 help
1391 Select this option if you want to build a 64-bit kernel. 1396 Select this option if you want to build a 64-bit kernel.
1392 1397
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 6fd441d16af5..f58d4ffb8945 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -118,7 +118,7 @@ void __init plat_time_init(void)
118 * setup counter 1 (RTC) to tick at full speed 118 * setup counter 1 (RTC) to tick at full speed
119 */ 119 */
120 t = 0xffffff; 120 t = 0xffffff;
121 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && t--) 121 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t)
122 asm volatile ("nop"); 122 asm volatile ("nop");
123 if (!t) 123 if (!t)
124 goto cntr_err; 124 goto cntr_err;
@@ -127,7 +127,7 @@ void __init plat_time_init(void)
127 au_sync(); 127 au_sync();
128 128
129 t = 0xffffff; 129 t = 0xffffff;
130 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--) 130 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
131 asm volatile ("nop"); 131 asm volatile ("nop");
132 if (!t) 132 if (!t)
133 goto cntr_err; 133 goto cntr_err;
@@ -135,7 +135,7 @@ void __init plat_time_init(void)
135 au_sync(); 135 au_sync();
136 136
137 t = 0xffffff; 137 t = 0xffffff;
138 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--) 138 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
139 asm volatile ("nop"); 139 asm volatile ("nop");
140 if (!t) 140 if (!t)
141 goto cntr_err; 141 goto cntr_err;
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 36ed44070256..a6772e9507f5 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -1,6 +1,5 @@
1#ifndef __ASM_SECCOMP_H 1#ifndef __ASM_SECCOMP_H
2 2
3#include <linux/thread_info.h>
4#include <linux/unistd.h> 3#include <linux/unistd.h>
5 4
6#define __NR_seccomp_read __NR_read 5#define __NR_seccomp_read __NR_read
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a0ff2b66e22b..4b4007b3083a 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -111,7 +111,6 @@ int show_interrupts(struct seq_file *p, void *v)
111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
112#endif 112#endif
113 seq_printf(p, " %14s", irq_desc[i].chip->name); 113 seq_printf(p, " %14s", irq_desc[i].chip->name);
114 seq_printf(p, "-%-8s", irq_desc[i].name);
115 seq_printf(p, " %s", action->name); 114 seq_printf(p, " %s", action->name);
116 115
117 for (action=action->next; action; action = action->next) 116 for (action=action->next; action; action = action->next)
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index aa2c55e3b55f..2f8452b404c7 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/binfmts.h> 33#include <linux/binfmts.h>
34#include <linux/security.h> 34#include <linux/security.h>
35#include <linux/syscalls.h>
35#include <linux/compat.h> 36#include <linux/compat.h>
36#include <linux/vfs.h> 37#include <linux/vfs.h>
37#include <linux/ipc.h> 38#include <linux/ipc.h>
@@ -63,9 +64,9 @@
63#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL)) 64#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
64#endif 65#endif
65 66
66asmlinkage unsigned long 67SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
67sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 68 unsigned long, prot, unsigned long, flags, unsigned long, fd,
68 unsigned long flags, unsigned long fd, unsigned long pgoff) 69 unsigned long, pgoff)
69{ 70{
70 struct file * file = NULL; 71 struct file * file = NULL;
71 unsigned long error; 72 unsigned long error;
@@ -121,21 +122,21 @@ struct rlimit32 {
121 int rlim_max; 122 int rlim_max;
122}; 123};
123 124
124asmlinkage long sys32_truncate64(const char __user * path, 125SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
125 unsigned long __dummy, int a2, int a3) 126 unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
126{ 127{
127 return sys_truncate(path, merge_64(a2, a3)); 128 return sys_truncate(path, merge_64(a2, a3));
128} 129}
129 130
130asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy, 131SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
131 int a2, int a3) 132 unsigned long, a2, unsigned long, a3)
132{ 133{
133 return sys_ftruncate(fd, merge_64(a2, a3)); 134 return sys_ftruncate(fd, merge_64(a2, a3));
134} 135}
135 136
136asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high, 137SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high,
137 unsigned int offset_low, loff_t __user * result, 138 unsigned long, offset_low, loff_t __user *, result,
138 unsigned int origin) 139 unsigned long, origin)
139{ 140{
140 return sys_llseek(fd, offset_high, offset_low, result, origin); 141 return sys_llseek(fd, offset_high, offset_low, result, origin);
141} 142}
@@ -144,20 +145,20 @@ asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
144 lseek back to original location. They fail just like lseek does on 145 lseek back to original location. They fail just like lseek does on
145 non-seekable files. */ 146 non-seekable files. */
146 147
147asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf, 148SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
148 size_t count, u32 unused, u64 a4, u64 a5) 149 unsigned long, unused, unsigned long, a4, unsigned long, a5)
149{ 150{
150 return sys_pread64(fd, buf, count, merge_64(a4, a5)); 151 return sys_pread64(fd, buf, count, merge_64(a4, a5));
151} 152}
152 153
153asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf, 154SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
154 size_t count, u32 unused, u64 a4, u64 a5) 155 size_t, count, u32, unused, u64, a4, u64, a5)
155{ 156{
156 return sys_pwrite64(fd, buf, count, merge_64(a4, a5)); 157 return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
157} 158}
158 159
159asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, 160SYSCALL_DEFINE2(32_sched_rr_get_interval, compat_pid_t, pid,
160 struct compat_timespec __user *interval) 161 struct compat_timespec __user *, interval)
161{ 162{
162 struct timespec t; 163 struct timespec t;
163 int ret; 164 int ret;
@@ -174,8 +175,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
174 175
175#ifdef CONFIG_SYSVIPC 176#ifdef CONFIG_SYSVIPC
176 177
177asmlinkage long 178SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
178sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) 179 unsigned long, ptr, unsigned long, fifth)
179{ 180{
180 int version, err; 181 int version, err;
181 182
@@ -233,8 +234,8 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
233 234
234#else 235#else
235 236
236asmlinkage long 237SYSCALL_DEFINE6(32_ipc, u32, call, int, first, int, second, int, third,
237sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) 238 u32, ptr, u32 fifth)
238{ 239{
239 return -ENOSYS; 240 return -ENOSYS;
240} 241}
@@ -242,7 +243,7 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
242#endif /* CONFIG_SYSVIPC */ 243#endif /* CONFIG_SYSVIPC */
243 244
244#ifdef CONFIG_MIPS32_N32 245#ifdef CONFIG_MIPS32_N32
245asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) 246SYSCALL_DEFINE4(n32_semctl, int, semid, int, semnum, int, cmd, u32, arg)
246{ 247{
247 /* compat_sys_semctl expects a pointer to union semun */ 248 /* compat_sys_semctl expects a pointer to union semun */
248 u32 __user *uptr = compat_alloc_user_space(sizeof(u32)); 249 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
@@ -251,13 +252,14 @@ asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
251 return compat_sys_semctl(semid, semnum, cmd, uptr); 252 return compat_sys_semctl(semid, semnum, cmd, uptr);
252} 253}
253 254
254asmlinkage long sysn32_msgsnd(int msqid, u32 msgp, unsigned msgsz, int msgflg) 255SYSCALL_DEFINE4(n32_msgsnd, int, msqid, u32, msgp, unsigned int, msgsz,
256 int, msgflg)
255{ 257{
256 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp)); 258 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp));
257} 259}
258 260
259asmlinkage long sysn32_msgrcv(int msqid, u32 msgp, size_t msgsz, int msgtyp, 261SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
260 int msgflg) 262 int, msgtyp, int, msgflg)
261{ 263{
262 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64, 264 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64,
263 compat_ptr(msgp)); 265 compat_ptr(msgp));
@@ -277,7 +279,7 @@ struct sysctl_args32
277 279
278#ifdef CONFIG_SYSCTL_SYSCALL 280#ifdef CONFIG_SYSCTL_SYSCALL
279 281
280asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args) 282SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
281{ 283{
282 struct sysctl_args32 tmp; 284 struct sysctl_args32 tmp;
283 int error; 285 int error;
@@ -316,9 +318,16 @@ asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
316 return error; 318 return error;
317} 319}
318 320
321#else
322
323SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
324{
325 return -ENOSYS;
326}
327
319#endif /* CONFIG_SYSCTL_SYSCALL */ 328#endif /* CONFIG_SYSCTL_SYSCALL */
320 329
321asmlinkage long sys32_newuname(struct new_utsname __user * name) 330SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name)
322{ 331{
323 int ret = 0; 332 int ret = 0;
324 333
@@ -334,7 +343,7 @@ asmlinkage long sys32_newuname(struct new_utsname __user * name)
334 return ret; 343 return ret;
335} 344}
336 345
337asmlinkage int sys32_personality(unsigned long personality) 346SYSCALL_DEFINE1(32_personality, unsigned long, personality)
338{ 347{
339 int ret; 348 int ret;
340 personality &= 0xffffffff; 349 personality &= 0xffffffff;
@@ -357,7 +366,7 @@ struct ustat32 {
357 366
358extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf); 367extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
359 368
360asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 369SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
361{ 370{
362 int err; 371 int err;
363 struct ustat tmp; 372 struct ustat tmp;
@@ -381,8 +390,8 @@ out:
381 return err; 390 return err;
382} 391}
383 392
384asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, 393SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
385 s32 count) 394 compat_off_t __user *, offset, s32, count)
386{ 395{
387 mm_segment_t old_fs = get_fs(); 396 mm_segment_t old_fs = get_fs();
388 int ret; 397 int ret;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 51d1ba415b90..9ab70c3b5be6 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -399,7 +399,7 @@ einval: li v0, -ENOSYS
399 sys sys_swapon 2 399 sys sys_swapon 2
400 sys sys_reboot 3 400 sys sys_reboot 3
401 sys sys_old_readdir 3 401 sys sys_old_readdir 3
402 sys old_mmap 6 /* 4090 */ 402 sys sys_mips_mmap 6 /* 4090 */
403 sys sys_munmap 2 403 sys sys_munmap 2
404 sys sys_truncate 2 404 sys sys_truncate 2
405 sys sys_ftruncate 2 405 sys sys_ftruncate 2
@@ -519,7 +519,7 @@ einval: li v0, -ENOSYS
519 sys sys_sendfile 4 519 sys sys_sendfile 4
520 sys sys_ni_syscall 0 520 sys sys_ni_syscall 0
521 sys sys_ni_syscall 0 521 sys sys_ni_syscall 0
522 sys sys_mmap2 6 /* 4210 */ 522 sys sys_mips_mmap2 6 /* 4210 */
523 sys sys_truncate64 4 523 sys sys_truncate64 4
524 sys sys_ftruncate64 4 524 sys sys_ftruncate64 4
525 sys sys_stat64 2 525 sys sys_stat64 2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a9e171618994..9b4698667154 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -207,7 +207,7 @@ sys_call_table:
207 PTR sys_newlstat 207 PTR sys_newlstat
208 PTR sys_poll 208 PTR sys_poll
209 PTR sys_lseek 209 PTR sys_lseek
210 PTR old_mmap 210 PTR sys_mips_mmap
211 PTR sys_mprotect /* 5010 */ 211 PTR sys_mprotect /* 5010 */
212 PTR sys_munmap 212 PTR sys_munmap
213 PTR sys_brk 213 PTR sys_brk
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 30f3b6317a83..7438e92f8a01 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -129,12 +129,12 @@ EXPORT(sysn32_call_table)
129 PTR sys_newlstat 129 PTR sys_newlstat
130 PTR sys_poll 130 PTR sys_poll
131 PTR sys_lseek 131 PTR sys_lseek
132 PTR old_mmap 132 PTR sys_mips_mmap
133 PTR sys_mprotect /* 6010 */ 133 PTR sys_mprotect /* 6010 */
134 PTR sys_munmap 134 PTR sys_munmap
135 PTR sys_brk 135 PTR sys_brk
136 PTR sys32_rt_sigaction 136 PTR sys_32_rt_sigaction
137 PTR sys32_rt_sigprocmask 137 PTR sys_32_rt_sigprocmask
138 PTR compat_sys_ioctl /* 6015 */ 138 PTR compat_sys_ioctl /* 6015 */
139 PTR sys_pread64 139 PTR sys_pread64
140 PTR sys_pwrite64 140 PTR sys_pwrite64
@@ -159,7 +159,7 @@ EXPORT(sysn32_call_table)
159 PTR compat_sys_setitimer 159 PTR compat_sys_setitimer
160 PTR sys_alarm 160 PTR sys_alarm
161 PTR sys_getpid 161 PTR sys_getpid
162 PTR sys32_sendfile 162 PTR sys_32_sendfile
163 PTR sys_socket /* 6040 */ 163 PTR sys_socket /* 6040 */
164 PTR sys_connect 164 PTR sys_connect
165 PTR sys_accept 165 PTR sys_accept
@@ -181,14 +181,14 @@ EXPORT(sysn32_call_table)
181 PTR sys_exit 181 PTR sys_exit
182 PTR compat_sys_wait4 182 PTR compat_sys_wait4
183 PTR sys_kill /* 6060 */ 183 PTR sys_kill /* 6060 */
184 PTR sys32_newuname 184 PTR sys_32_newuname
185 PTR sys_semget 185 PTR sys_semget
186 PTR sys_semop 186 PTR sys_semop
187 PTR sysn32_semctl 187 PTR sys_n32_semctl
188 PTR sys_shmdt /* 6065 */ 188 PTR sys_shmdt /* 6065 */
189 PTR sys_msgget 189 PTR sys_msgget
190 PTR sysn32_msgsnd 190 PTR sys_n32_msgsnd
191 PTR sysn32_msgrcv 191 PTR sys_n32_msgrcv
192 PTR compat_sys_msgctl 192 PTR compat_sys_msgctl
193 PTR compat_sys_fcntl /* 6070 */ 193 PTR compat_sys_fcntl /* 6070 */
194 PTR sys_flock 194 PTR sys_flock
@@ -245,15 +245,15 @@ EXPORT(sysn32_call_table)
245 PTR sys_getsid 245 PTR sys_getsid
246 PTR sys_capget 246 PTR sys_capget
247 PTR sys_capset 247 PTR sys_capset
248 PTR sys32_rt_sigpending /* 6125 */ 248 PTR sys_32_rt_sigpending /* 6125 */
249 PTR compat_sys_rt_sigtimedwait 249 PTR compat_sys_rt_sigtimedwait
250 PTR sys32_rt_sigqueueinfo 250 PTR sys_32_rt_sigqueueinfo
251 PTR sysn32_rt_sigsuspend 251 PTR sysn32_rt_sigsuspend
252 PTR sys32_sigaltstack 252 PTR sys32_sigaltstack
253 PTR compat_sys_utime /* 6130 */ 253 PTR compat_sys_utime /* 6130 */
254 PTR sys_mknod 254 PTR sys_mknod
255 PTR sys32_personality 255 PTR sys_32_personality
256 PTR sys32_ustat 256 PTR sys_32_ustat
257 PTR compat_sys_statfs 257 PTR compat_sys_statfs
258 PTR compat_sys_fstatfs /* 6135 */ 258 PTR compat_sys_fstatfs /* 6135 */
259 PTR sys_sysfs 259 PTR sys_sysfs
@@ -265,14 +265,14 @@ EXPORT(sysn32_call_table)
265 PTR sys_sched_getscheduler 265 PTR sys_sched_getscheduler
266 PTR sys_sched_get_priority_max 266 PTR sys_sched_get_priority_max
267 PTR sys_sched_get_priority_min 267 PTR sys_sched_get_priority_min
268 PTR sys32_sched_rr_get_interval /* 6145 */ 268 PTR sys_32_sched_rr_get_interval /* 6145 */
269 PTR sys_mlock 269 PTR sys_mlock
270 PTR sys_munlock 270 PTR sys_munlock
271 PTR sys_mlockall 271 PTR sys_mlockall
272 PTR sys_munlockall 272 PTR sys_munlockall
273 PTR sys_vhangup /* 6150 */ 273 PTR sys_vhangup /* 6150 */
274 PTR sys_pivot_root 274 PTR sys_pivot_root
275 PTR sys32_sysctl 275 PTR sys_32_sysctl
276 PTR sys_prctl 276 PTR sys_prctl
277 PTR compat_sys_adjtimex 277 PTR compat_sys_adjtimex
278 PTR compat_sys_setrlimit /* 6155 */ 278 PTR compat_sys_setrlimit /* 6155 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index fefef4af8595..b0fef4ff9827 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,12 +265,12 @@ sys_call_table:
265 PTR sys_olduname 265 PTR sys_olduname
266 PTR sys_umask /* 4060 */ 266 PTR sys_umask /* 4060 */
267 PTR sys_chroot 267 PTR sys_chroot
268 PTR sys32_ustat 268 PTR sys_32_ustat
269 PTR sys_dup2 269 PTR sys_dup2
270 PTR sys_getppid 270 PTR sys_getppid
271 PTR sys_getpgrp /* 4065 */ 271 PTR sys_getpgrp /* 4065 */
272 PTR sys_setsid 272 PTR sys_setsid
273 PTR sys32_sigaction 273 PTR sys_32_sigaction
274 PTR sys_sgetmask 274 PTR sys_sgetmask
275 PTR sys_ssetmask 275 PTR sys_ssetmask
276 PTR sys_setreuid /* 4070 */ 276 PTR sys_setreuid /* 4070 */
@@ -293,7 +293,7 @@ sys_call_table:
293 PTR sys_swapon 293 PTR sys_swapon
294 PTR sys_reboot 294 PTR sys_reboot
295 PTR compat_sys_old_readdir 295 PTR compat_sys_old_readdir
296 PTR old_mmap /* 4090 */ 296 PTR sys_mips_mmap /* 4090 */
297 PTR sys_munmap 297 PTR sys_munmap
298 PTR sys_truncate 298 PTR sys_truncate
299 PTR sys_ftruncate 299 PTR sys_ftruncate
@@ -320,12 +320,12 @@ sys_call_table:
320 PTR compat_sys_wait4 320 PTR compat_sys_wait4
321 PTR sys_swapoff /* 4115 */ 321 PTR sys_swapoff /* 4115 */
322 PTR compat_sys_sysinfo 322 PTR compat_sys_sysinfo
323 PTR sys32_ipc 323 PTR sys_32_ipc
324 PTR sys_fsync 324 PTR sys_fsync
325 PTR sys32_sigreturn 325 PTR sys32_sigreturn
326 PTR sys32_clone /* 4120 */ 326 PTR sys32_clone /* 4120 */
327 PTR sys_setdomainname 327 PTR sys_setdomainname
328 PTR sys32_newuname 328 PTR sys_32_newuname
329 PTR sys_ni_syscall /* sys_modify_ldt */ 329 PTR sys_ni_syscall /* sys_modify_ldt */
330 PTR compat_sys_adjtimex 330 PTR compat_sys_adjtimex
331 PTR sys_mprotect /* 4125 */ 331 PTR sys_mprotect /* 4125 */
@@ -339,11 +339,11 @@ sys_call_table:
339 PTR sys_fchdir 339 PTR sys_fchdir
340 PTR sys_bdflush 340 PTR sys_bdflush
341 PTR sys_sysfs /* 4135 */ 341 PTR sys_sysfs /* 4135 */
342 PTR sys32_personality 342 PTR sys_32_personality
343 PTR sys_ni_syscall /* for afs_syscall */ 343 PTR sys_ni_syscall /* for afs_syscall */
344 PTR sys_setfsuid 344 PTR sys_setfsuid
345 PTR sys_setfsgid 345 PTR sys_setfsgid
346 PTR sys32_llseek /* 4140 */ 346 PTR sys_32_llseek /* 4140 */
347 PTR compat_sys_getdents 347 PTR compat_sys_getdents
348 PTR compat_sys_select 348 PTR compat_sys_select
349 PTR sys_flock 349 PTR sys_flock
@@ -356,7 +356,7 @@ sys_call_table:
356 PTR sys_ni_syscall /* 4150 */ 356 PTR sys_ni_syscall /* 4150 */
357 PTR sys_getsid 357 PTR sys_getsid
358 PTR sys_fdatasync 358 PTR sys_fdatasync
359 PTR sys32_sysctl 359 PTR sys_32_sysctl
360 PTR sys_mlock 360 PTR sys_mlock
361 PTR sys_munlock /* 4155 */ 361 PTR sys_munlock /* 4155 */
362 PTR sys_mlockall 362 PTR sys_mlockall
@@ -368,7 +368,7 @@ sys_call_table:
368 PTR sys_sched_yield 368 PTR sys_sched_yield
369 PTR sys_sched_get_priority_max 369 PTR sys_sched_get_priority_max
370 PTR sys_sched_get_priority_min 370 PTR sys_sched_get_priority_min
371 PTR sys32_sched_rr_get_interval /* 4165 */ 371 PTR sys_32_sched_rr_get_interval /* 4165 */
372 PTR compat_sys_nanosleep 372 PTR compat_sys_nanosleep
373 PTR sys_mremap 373 PTR sys_mremap
374 PTR sys_accept 374 PTR sys_accept
@@ -397,25 +397,25 @@ sys_call_table:
397 PTR sys_getresgid 397 PTR sys_getresgid
398 PTR sys_prctl 398 PTR sys_prctl
399 PTR sys32_rt_sigreturn 399 PTR sys32_rt_sigreturn
400 PTR sys32_rt_sigaction 400 PTR sys_32_rt_sigaction
401 PTR sys32_rt_sigprocmask /* 4195 */ 401 PTR sys_32_rt_sigprocmask /* 4195 */
402 PTR sys32_rt_sigpending 402 PTR sys_32_rt_sigpending
403 PTR compat_sys_rt_sigtimedwait 403 PTR compat_sys_rt_sigtimedwait
404 PTR sys32_rt_sigqueueinfo 404 PTR sys_32_rt_sigqueueinfo
405 PTR sys32_rt_sigsuspend 405 PTR sys32_rt_sigsuspend
406 PTR sys32_pread /* 4200 */ 406 PTR sys_32_pread /* 4200 */
407 PTR sys32_pwrite 407 PTR sys_32_pwrite
408 PTR sys_chown 408 PTR sys_chown
409 PTR sys_getcwd 409 PTR sys_getcwd
410 PTR sys_capget 410 PTR sys_capget
411 PTR sys_capset /* 4205 */ 411 PTR sys_capset /* 4205 */
412 PTR sys32_sigaltstack 412 PTR sys32_sigaltstack
413 PTR sys32_sendfile 413 PTR sys_32_sendfile
414 PTR sys_ni_syscall 414 PTR sys_ni_syscall
415 PTR sys_ni_syscall 415 PTR sys_ni_syscall
416 PTR sys32_mmap2 /* 4210 */ 416 PTR sys_mips_mmap2 /* 4210 */
417 PTR sys32_truncate64 417 PTR sys_32_truncate64
418 PTR sys32_ftruncate64 418 PTR sys_32_ftruncate64
419 PTR sys_newstat 419 PTR sys_newstat
420 PTR sys_newlstat 420 PTR sys_newlstat
421 PTR sys_newfstat /* 4215 */ 421 PTR sys_newfstat /* 4215 */
@@ -481,7 +481,7 @@ sys_call_table:
481 PTR compat_sys_mq_notify /* 4275 */ 481 PTR compat_sys_mq_notify /* 4275 */
482 PTR compat_sys_mq_getsetattr 482 PTR compat_sys_mq_getsetattr
483 PTR sys_ni_syscall /* sys_vserver */ 483 PTR sys_ni_syscall /* sys_vserver */
484 PTR sys32_waitid 484 PTR sys_32_waitid
485 PTR sys_ni_syscall /* available, was setaltroot */ 485 PTR sys_ni_syscall /* available, was setaltroot */
486 PTR sys_add_key /* 4280 */ 486 PTR sys_add_key /* 4280 */
487 PTR sys_request_key 487 PTR sys_request_key
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index a4e106c56ab5..830c5ef9932b 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -19,6 +19,7 @@
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/unistd.h> 20#include <linux/unistd.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/syscalls.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23 24
24#include <asm/abi.h> 25#include <asm/abi.h>
@@ -338,8 +339,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
338} 339}
339 340
340#ifdef CONFIG_TRAD_SIGNALS 341#ifdef CONFIG_TRAD_SIGNALS
341asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act, 342SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
342 struct sigaction __user *oact) 343 struct sigaction __user *, oact)
343{ 344{
344 struct k_sigaction new_ka, old_ka; 345 struct k_sigaction new_ka, old_ka;
345 int ret; 346 int ret;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 652709b353ad..2e74075ac0ca 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -349,8 +349,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
349 return -ERESTARTNOHAND; 349 return -ERESTARTNOHAND;
350} 350}
351 351
352asmlinkage int sys32_sigaction(int sig, const struct sigaction32 __user *act, 352SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act,
353 struct sigaction32 __user *oact) 353 struct sigaction32 __user *, oact)
354{ 354{
355 struct k_sigaction new_ka, old_ka; 355 struct k_sigaction new_ka, old_ka;
356 int ret; 356 int ret;
@@ -704,9 +704,9 @@ struct mips_abi mips_abi_32 = {
704 .restart = __NR_O32_restart_syscall 704 .restart = __NR_O32_restart_syscall
705}; 705};
706 706
707asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, 707SYSCALL_DEFINE4(32_rt_sigaction, int, sig,
708 struct sigaction32 __user *oact, 708 const struct sigaction32 __user *, act,
709 unsigned int sigsetsize) 709 struct sigaction32 __user *, oact, unsigned int, sigsetsize)
710{ 710{
711 struct k_sigaction new_sa, old_sa; 711 struct k_sigaction new_sa, old_sa;
712 int ret = -EINVAL; 712 int ret = -EINVAL;
@@ -748,8 +748,8 @@ out:
748 return ret; 748 return ret;
749} 749}
750 750
751asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, 751SYSCALL_DEFINE4(32_rt_sigprocmask, int, how, compat_sigset_t __user *, set,
752 compat_sigset_t __user *oset, unsigned int sigsetsize) 752 compat_sigset_t __user *, oset, unsigned int, sigsetsize)
753{ 753{
754 sigset_t old_set, new_set; 754 sigset_t old_set, new_set;
755 int ret; 755 int ret;
@@ -770,8 +770,8 @@ asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
770 return ret; 770 return ret;
771} 771}
772 772
773asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset, 773SYSCALL_DEFINE2(32_rt_sigpending, compat_sigset_t __user *, uset,
774 unsigned int sigsetsize) 774 unsigned int, sigsetsize)
775{ 775{
776 int ret; 776 int ret;
777 sigset_t set; 777 sigset_t set;
@@ -787,7 +787,8 @@ asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
787 return ret; 787 return ret;
788} 788}
789 789
790asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) 790SYSCALL_DEFINE3(32_rt_sigqueueinfo, int, pid, int, sig,
791 compat_siginfo_t __user *, uinfo)
791{ 792{
792 siginfo_t info; 793 siginfo_t info;
793 int ret; 794 int ret;
@@ -802,10 +803,9 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *
802 return ret; 803 return ret;
803} 804}
804 805
805asmlinkage long 806SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid,
806sys32_waitid(int which, compat_pid_t pid, 807 compat_siginfo_t __user *, uinfo, int, options,
807 compat_siginfo_t __user *uinfo, int options, 808 struct compat_rusage __user *, uru)
808 struct compat_rusage __user *uru)
809{ 809{
810 siginfo_t info; 810 siginfo_t info;
811 struct rusage ru; 811 struct rusage ru;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 37970d9b2186..8cf384644040 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -152,9 +152,9 @@ out:
152 return error; 152 return error;
153} 153}
154 154
155asmlinkage unsigned long 155SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
156old_mmap(unsigned long addr, unsigned long len, int prot, 156 unsigned long, prot, unsigned long, flags, unsigned long,
157 int flags, int fd, off_t offset) 157 fd, off_t, offset)
158{ 158{
159 unsigned long result; 159 unsigned long result;
160 160
@@ -168,9 +168,9 @@ out:
168 return result; 168 return result;
169} 169}
170 170
171asmlinkage unsigned long 171SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
172sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 172 unsigned long, prot, unsigned long, flags, unsigned long, fd,
173 unsigned long flags, unsigned long fd, unsigned long pgoff) 173 unsigned long, pgoff)
174{ 174{
175 if (pgoff & (~PAGE_MASK >> 12)) 175 if (pgoff & (~PAGE_MASK >> 12))
176 return -EINVAL; 176 return -EINVAL;
@@ -240,7 +240,7 @@ out:
240/* 240/*
241 * Compacrapability ... 241 * Compacrapability ...
242 */ 242 */
243asmlinkage int sys_uname(struct old_utsname __user * name) 243SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
244{ 244{
245 if (name && !copy_to_user(name, utsname(), sizeof (*name))) 245 if (name && !copy_to_user(name, utsname(), sizeof (*name)))
246 return 0; 246 return 0;
@@ -250,7 +250,7 @@ asmlinkage int sys_uname(struct old_utsname __user * name)
250/* 250/*
251 * Compacrapability ... 251 * Compacrapability ...
252 */ 252 */
253asmlinkage int sys_olduname(struct oldold_utsname __user * name) 253SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
254{ 254{
255 int error; 255 int error;
256 256
@@ -279,7 +279,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
279 return error; 279 return error;
280} 280}
281 281
282asmlinkage int sys_set_thread_area(unsigned long addr) 282SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
283{ 283{
284 struct thread_info *ti = task_thread_info(current); 284 struct thread_info *ti = task_thread_info(current);
285 285
@@ -290,7 +290,7 @@ asmlinkage int sys_set_thread_area(unsigned long addr)
290 return 0; 290 return 0;
291} 291}
292 292
293asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) 293asmlinkage int _sys_sysmips(long cmd, long arg1, long arg2, long arg3)
294{ 294{
295 switch (cmd) { 295 switch (cmd) {
296 case MIPS_ATOMIC_SET: 296 case MIPS_ATOMIC_SET:
@@ -325,8 +325,8 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
325 * 325 *
326 * This is really horribly ugly. 326 * This is really horribly ugly.
327 */ 327 */
328asmlinkage int sys_ipc(unsigned int call, int first, int second, 328SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second,
329 unsigned long third, void __user *ptr, long fifth) 329 unsigned long, third, void __user *, ptr, long, fifth)
330{ 330{
331 int version, ret; 331 int version, ret;
332 332
@@ -411,7 +411,7 @@ asmlinkage int sys_ipc(unsigned int call, int first, int second,
411/* 411/*
412 * No implemented yet ... 412 * No implemented yet ...
413 */ 413 */
414asmlinkage int sys_cachectl(char *addr, int nbytes, int op) 414SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
415{ 415{
416 return -ENOSYS; 416 return -ENOSYS;
417} 417}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 98ad0a82c29e..694d51f523d1 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -13,6 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/syscalls.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17 18
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
@@ -58,8 +59,8 @@ EXPORT_SYMBOL(_dma_cache_wback_inv);
58 * We could optimize the case where the cache argument is not BCACHE but 59 * We could optimize the case where the cache argument is not BCACHE but
59 * that seems very atypical use ... 60 * that seems very atypical use ...
60 */ 61 */
61asmlinkage int sys_cacheflush(unsigned long addr, 62SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
62 unsigned long bytes, unsigned int cache) 63 unsigned int, cache)
63{ 64{
64 if (bytes == 0) 65 if (bytes == 0)
65 return 0; 66 return 0;
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index d811a8cd7b58..4774c2f92232 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -210,5 +210,10 @@ struct compat_shmid64_ds {
210 compat_ulong_t __unused6; 210 compat_ulong_t __unused6;
211}; 211};
212 212
213static inline int is_compat_task(void)
214{
215 return test_thread_flag(TIF_32BIT);
216}
217
213#endif /* __KERNEL__ */ 218#endif /* __KERNEL__ */
214#endif /* _ASM_POWERPC_COMPAT_H */ 219#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
index 853765eb1f65..00c1d9133cfe 100644
--- a/arch/powerpc/include/asm/seccomp.h
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_POWERPC_SECCOMP_H 1#ifndef _ASM_POWERPC_SECCOMP_H
2#define _ASM_POWERPC_SECCOMP_H 2#define _ASM_POWERPC_SECCOMP_H
3 3
4#ifdef __KERNEL__
5#include <linux/thread_info.h>
6#endif
7
8#include <linux/unistd.h> 4#include <linux/unistd.h>
9 5
10#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ada06924a423..73cb6a3229ae 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, 367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
368 unsigned int flags) 368 unsigned int flags)
369{ 369{
370 char *ptr = (char *) &current->thread.TS_FPR(reg); 370 char *ptr0 = (char *) &current->thread.TS_FPR(reg);
371 int i, ret; 371 char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
372 int i, ret, sw = 0;
372 373
373 if (!(flags & F)) 374 if (!(flags & F))
374 return 0; 375 return 0;
375 if (reg & 1) 376 if (reg & 1)
376 return 0; /* invalid form: FRS/FRT must be even */ 377 return 0; /* invalid form: FRS/FRT must be even */
377 if (!(flags & SW)) { 378 if (flags & SW)
378 /* not byte-swapped - easy */ 379 sw = 7;
379 if (!(flags & ST)) 380 ret = 0;
380 ret = __copy_from_user(ptr, addr, 16); 381 for (i = 0; i < 8; ++i) {
381 else 382 if (!(flags & ST)) {
382 ret = __copy_to_user(addr, ptr, 16); 383 ret |= __get_user(ptr0[i^sw], addr + i);
383 } else { 384 ret |= __get_user(ptr1[i^sw], addr + i + 8);
384 /* each FPR value is byte-swapped separately */ 385 } else {
385 ret = 0; 386 ret |= __put_user(ptr0[i^sw], addr + i);
386 for (i = 0; i < 16; ++i) { 387 ret |= __put_user(ptr1[i^sw], addr + i + 8);
387 if (!(flags & ST))
388 ret |= __get_user(ptr[i^7], addr + i);
389 else
390 ret |= __put_user(ptr[i^7], addr + i);
391 } 388 }
392 } 389 }
393 if (ret) 390 if (ret)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 70693a5c12a1..693b14a778fa 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -62,18 +62,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
6272: std r8,8(r3) 6272: std r8,8(r3)
63 beq+ 3f 63 beq+ 3f
64 addi r3,r3,16 64 addi r3,r3,16
6523: ld r9,8(r4)
66.Ldo_tail: 65.Ldo_tail:
67 bf cr7*4+1,1f 66 bf cr7*4+1,1f
68 rotldi r9,r9,32 6723: lwz r9,8(r4)
68 addi r4,r4,4
6973: stw r9,0(r3) 6973: stw r9,0(r3)
70 addi r3,r3,4 70 addi r3,r3,4
711: bf cr7*4+2,2f 711: bf cr7*4+2,2f
72 rotldi r9,r9,16 7244: lhz r9,8(r4)
73 addi r4,r4,2
7374: sth r9,0(r3) 7474: sth r9,0(r3)
74 addi r3,r3,2 75 addi r3,r3,2
752: bf cr7*4+3,3f 762: bf cr7*4+3,3f
76 rotldi r9,r9,8 7745: lbz r9,8(r4)
7775: stb r9,0(r3) 7875: stb r9,0(r3)
783: li r3,0 793: li r3,0
79 blr 80 blr
@@ -141,11 +142,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
1416: cmpwi cr1,r5,8 1426: cmpwi cr1,r5,8
142 addi r3,r3,32 143 addi r3,r3,32
143 sld r9,r9,r10 144 sld r9,r9,r10
144 ble cr1,.Ldo_tail 145 ble cr1,7f
14534: ld r0,8(r4) 14634: ld r0,8(r4)
146 srd r7,r0,r11 147 srd r7,r0,r11
147 or r9,r7,r9 148 or r9,r7,r9
148 b .Ldo_tail 1497:
150 bf cr7*4+1,1f
151 rotldi r9,r9,32
15294: stw r9,0(r3)
153 addi r3,r3,4
1541: bf cr7*4+2,2f
155 rotldi r9,r9,16
15695: sth r9,0(r3)
157 addi r3,r3,2
1582: bf cr7*4+3,3f
159 rotldi r9,r9,8
16096: stb r9,0(r3)
1613: li r3,0
162 blr
149 163
150.Ldst_unaligned: 164.Ldst_unaligned:
151 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ 165 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
@@ -218,7 +232,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
218121: 232121:
219132: 233132:
220 addi r3,r3,8 234 addi r3,r3,8
221123:
222134: 235134:
223135: 236135:
224138: 237138:
@@ -226,6 +239,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
226140: 239140:
227141: 240141:
228142: 241142:
242123:
243144:
244145:
229 245
230/* 246/*
231 * here we have had a fault on a load and r3 points to the first 247 * here we have had a fault on a load and r3 points to the first
@@ -309,6 +325,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
309187: 325187:
310188: 326188:
311189: 327189:
328194:
329195:
330196:
3121: 3311:
313 ld r6,-24(r1) 332 ld r6,-24(r1)
314 ld r5,-8(r1) 333 ld r5,-8(r1)
@@ -329,7 +348,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
329 .llong 72b,172b 348 .llong 72b,172b
330 .llong 23b,123b 349 .llong 23b,123b
331 .llong 73b,173b 350 .llong 73b,173b
351 .llong 44b,144b
332 .llong 74b,174b 352 .llong 74b,174b
353 .llong 45b,145b
333 .llong 75b,175b 354 .llong 75b,175b
334 .llong 24b,124b 355 .llong 24b,124b
335 .llong 25b,125b 356 .llong 25b,125b
@@ -347,6 +368,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
347 .llong 79b,179b 368 .llong 79b,179b
348 .llong 80b,180b 369 .llong 80b,180b
349 .llong 34b,134b 370 .llong 34b,134b
371 .llong 94b,194b
372 .llong 95b,195b
373 .llong 96b,196b
350 .llong 35b,135b 374 .llong 35b,135b
351 .llong 81b,181b 375 .llong 81b,181b
352 .llong 36b,136b 376 .llong 36b,136b
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index fe2d34e5332d..e178922b2c21 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -53,18 +53,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
533: std r8,8(r3) 533: std r8,8(r3)
54 beq 3f 54 beq 3f
55 addi r3,r3,16 55 addi r3,r3,16
56 ld r9,8(r4)
57.Ldo_tail: 56.Ldo_tail:
58 bf cr7*4+1,1f 57 bf cr7*4+1,1f
59 rotldi r9,r9,32 58 lwz r9,8(r4)
59 addi r4,r4,4
60 stw r9,0(r3) 60 stw r9,0(r3)
61 addi r3,r3,4 61 addi r3,r3,4
621: bf cr7*4+2,2f 621: bf cr7*4+2,2f
63 rotldi r9,r9,16 63 lhz r9,8(r4)
64 addi r4,r4,2
64 sth r9,0(r3) 65 sth r9,0(r3)
65 addi r3,r3,2 66 addi r3,r3,2
662: bf cr7*4+3,3f 672: bf cr7*4+3,3f
67 rotldi r9,r9,8 68 lbz r9,8(r4)
68 stb r9,0(r3) 69 stb r9,0(r3)
693: ld r3,48(r1) /* return dest pointer */ 703: ld r3,48(r1) /* return dest pointer */
70 blr 71 blr
@@ -133,11 +134,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
133 cmpwi cr1,r5,8 134 cmpwi cr1,r5,8
134 addi r3,r3,32 135 addi r3,r3,32
135 sld r9,r9,r10 136 sld r9,r9,r10
136 ble cr1,.Ldo_tail 137 ble cr1,6f
137 ld r0,8(r4) 138 ld r0,8(r4)
138 srd r7,r0,r11 139 srd r7,r0,r11
139 or r9,r7,r9 140 or r9,r7,r9
140 b .Ldo_tail 1416:
142 bf cr7*4+1,1f
143 rotldi r9,r9,32
144 stw r9,0(r3)
145 addi r3,r3,4
1461: bf cr7*4+2,2f
147 rotldi r9,r9,16
148 sth r9,0(r3)
149 addi r3,r3,2
1502: bf cr7*4+3,3f
151 rotldi r9,r9,8
152 stb r9,0(r3)
1533: ld r3,48(r1) /* return dest pointer */
154 blr
141 155
142.Ldst_unaligned: 156.Ldst_unaligned:
143 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 157 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 77fae5f64f2e..5558d932b4d5 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -204,6 +204,23 @@ static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
204{ 204{
205 u32 ma, pcila, pciha; 205 u32 ma, pcila, pciha;
206 206
207 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
208 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
209 * address are actually hard wired to a value that appears to depend
210 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
211 *
212 * The trick here is we just crop those top bits and ignore them when
213 * programming the chip. That means the device-tree has to be right
214 * for the specific part used (we don't print a warning if it's wrong
215 * but on the other hand, you'll crash quickly enough), but at least
216 * this code should work whatever the hard coded value is
217 */
218 plb_addr &= 0xffffffffull;
219
220 /* Note: Due to the above hack, the test below doesn't actually test
221 * if you address is above 4G, but it tests that address and
222 * (address + size) are both contained in the same 4G
223 */
207 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || 224 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
208 size < 0x1000 || (plb_addr & (size - 1)) != 0) { 225 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
209 printk(KERN_WARNING "%s: Resource out of range\n", 226 printk(KERN_WARNING "%s: Resource out of range\n",
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 7c35787d29b4..72da416f6162 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -22,7 +22,6 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
24#include <linux/spi/spi_gpio.h> 24#include <linux/spi/spi_gpio.h>
25#include <media/ov772x.h>
26#include <media/soc_camera_platform.h> 25#include <media/soc_camera_platform.h>
27#include <media/sh_mobile_ceu.h> 26#include <media/sh_mobile_ceu.h>
28#include <video/sh_mobile_lcdc.h> 27#include <video/sh_mobile_lcdc.h>
@@ -224,7 +223,6 @@ static void camera_power(int val)
224} 223}
225 224
226#ifdef CONFIG_I2C 225#ifdef CONFIG_I2C
227/* support for the old ncm03j camera */
228static unsigned char camera_ncm03j_magic[] = 226static unsigned char camera_ncm03j_magic[] =
229{ 227{
230 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8, 228 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8,
@@ -245,23 +243,6 @@ static unsigned char camera_ncm03j_magic[] =
245 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F, 243 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F,
246}; 244};
247 245
248static int camera_probe(void)
249{
250 struct i2c_adapter *a = i2c_get_adapter(0);
251 struct i2c_msg msg;
252 int ret;
253
254 camera_power(1);
255 msg.addr = 0x6e;
256 msg.buf = camera_ncm03j_magic;
257 msg.len = 2;
258 msg.flags = 0;
259 ret = i2c_transfer(a, &msg, 1);
260 camera_power(0);
261
262 return ret;
263}
264
265static int camera_set_capture(struct soc_camera_platform_info *info, 246static int camera_set_capture(struct soc_camera_platform_info *info,
266 int enable) 247 int enable)
267{ 248{
@@ -313,35 +294,8 @@ static struct platform_device camera_device = {
313 .platform_data = &camera_info, 294 .platform_data = &camera_info,
314 }, 295 },
315}; 296};
316
317static int __init camera_setup(void)
318{
319 if (camera_probe() > 0)
320 platform_device_register(&camera_device);
321
322 return 0;
323}
324late_initcall(camera_setup);
325
326#endif /* CONFIG_I2C */ 297#endif /* CONFIG_I2C */
327 298
328static int ov7725_power(struct device *dev, int mode)
329{
330 camera_power(0);
331 if (mode)
332 camera_power(1);
333
334 return 0;
335}
336
337static struct ov772x_camera_info ov7725_info = {
338 .buswidth = SOCAM_DATAWIDTH_8,
339 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
340 .link = {
341 .power = ov7725_power,
342 },
343};
344
345static struct sh_mobile_ceu_info sh_mobile_ceu_info = { 299static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
346 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | 300 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
347 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, 301 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8,
@@ -392,6 +346,9 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
392 &ap325rxa_nor_flash_device, 346 &ap325rxa_nor_flash_device,
393 &lcdc_device, 347 &lcdc_device,
394 &ceu_device, 348 &ceu_device,
349#ifdef CONFIG_I2C
350 &camera_device,
351#endif
395 &nand_flash_device, 352 &nand_flash_device,
396 &sdcard_cn3_device, 353 &sdcard_cn3_device,
397}; 354};
@@ -400,10 +357,6 @@ static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
400 { 357 {
401 I2C_BOARD_INFO("pcf8563", 0x51), 358 I2C_BOARD_INFO("pcf8563", 0x51),
402 }, 359 },
403 {
404 I2C_BOARD_INFO("ov772x", 0x21),
405 .platform_data = &ov7725_info,
406 },
407}; 360};
408 361
409static struct spi_board_info ap325rxa_spi_devices[] = { 362static struct spi_board_info ap325rxa_spi_devices[] = {
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index 020a96fe961a..4a5e59732334 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -18,8 +18,8 @@
18#include <asm/freq.h> 18#include <asm/freq.h>
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21const static int pll1rate[]={1,2,3,4,6,8}; 21static const int pll1rate[]={1,2,3,4,6,8};
22const static int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 0) 25#if (CONFIG_SH_CLK_MD == 0)
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index f260b58f5ce9..0e706257918f 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -240,4 +240,9 @@ struct compat_shmid64_ds {
240 unsigned int __unused2; 240 unsigned int __unused2;
241}; 241};
242 242
243static inline int is_compat_task(void)
244{
245 return test_thread_flag(TIF_32BIT);
246}
247
243#endif /* _ASM_SPARC64_COMPAT_H */ 248#endif /* _ASM_SPARC64_COMPAT_H */
diff --git a/arch/sparc/include/asm/seccomp.h b/arch/sparc/include/asm/seccomp.h
index 7fcd9968192b..adca1bce41d4 100644
--- a/arch/sparc/include/asm/seccomp.h
+++ b/arch/sparc/include/asm/seccomp.h
@@ -1,11 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2 2
3#include <linux/thread_info.h> /* already defines TIF_32BIT */
4
5#ifndef TIF_32BIT
6#error "unexpected TIF_32BIT on sparc64"
7#endif
8
9#include <linux/unistd.h> 3#include <linux/unistd.h>
10 4
11#define __NR_seccomp_read __NR_read 5#define __NR_seccomp_read __NR_read
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 3b9f4d6e14a9..e1a9598e2a4d 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code,
306 buf[1] = '?'; 306 buf[1] = '?';
307 buf[2] = '?'; 307 buf[2] = '?';
308 buf[3] = '\0'; 308 buf[3] = '\0';
309 return 0;
309 } 310 }
310 p = dp->controller; 311 p = dp->controller;
311 prop = &p->layout; 312 prop = &p->layout;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8015641478bd..f5cef3fbf9a5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -40,6 +40,9 @@ config X86
40 select HAVE_GENERIC_DMA_COHERENT if X86_32 40 select HAVE_GENERIC_DMA_COHERENT if X86_32
41 select HAVE_EFFICIENT_UNALIGNED_ACCESS 41 select HAVE_EFFICIENT_UNALIGNED_ACCESS
42 select USER_STACKTRACE_SUPPORT 42 select USER_STACKTRACE_SUPPORT
43 select HAVE_KERNEL_GZIP
44 select HAVE_KERNEL_BZIP2
45 select HAVE_KERNEL_LZMA
43 46
44config ARCH_DEFCONFIG 47config ARCH_DEFCONFIG
45 string 48 string
@@ -1825,7 +1828,7 @@ config DMAR
1825 remapping devices. 1828 remapping devices.
1826 1829
1827config DMAR_DEFAULT_ON 1830config DMAR_DEFAULT_ON
1828 def_bool n 1831 def_bool y
1829 prompt "Enable DMA Remapping Devices by default" 1832 prompt "Enable DMA Remapping Devices by default"
1830 depends on DMAR 1833 depends on DMAR
1831 help 1834 help
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 1771c804e02f..3ca4c194b8e5 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
8 8
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y)
47ifdef CONFIG_RELOCATABLE 47ifdef CONFIG_RELOCATABLE
48$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE 48$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
49 $(call if_changed,gzip) 49 $(call if_changed,gzip)
50$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
51 $(call if_changed,bzip2)
52$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
53 $(call if_changed,lzma)
50else 54else
51$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 55$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
52 $(call if_changed,gzip) 56 $(call if_changed,gzip)
57$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
58 $(call if_changed,bzip2)
59$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
60 $(call if_changed,lzma)
53endif 61endif
54LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T 62LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
55 63
56else 64else
65
57$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 66$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
58 $(call if_changed,gzip) 67 $(call if_changed,gzip)
68$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
69 $(call if_changed,bzip2)
70$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
71 $(call if_changed,lzma)
59 72
60LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T 73LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
61endif 74endif
62 75
63$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE 76suffix_$(CONFIG_KERNEL_GZIP) = gz
77suffix_$(CONFIG_KERNEL_BZIP2) = bz2
78suffix_$(CONFIG_KERNEL_LZMA) = lzma
79
80$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
64 $(call if_changed,ld) 81 $(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index da062216948a..e45be73684ff 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -116,71 +116,13 @@
116/* 116/*
117 * gzip declarations 117 * gzip declarations
118 */ 118 */
119
120#define OF(args) args
121#define STATIC static 119#define STATIC static
122 120
123#undef memset 121#undef memset
124#undef memcpy 122#undef memcpy
125#define memzero(s, n) memset((s), 0, (n)) 123#define memzero(s, n) memset((s), 0, (n))
126 124
127typedef unsigned char uch;
128typedef unsigned short ush;
129typedef unsigned long ulg;
130
131/*
132 * Window size must be at least 32k, and a power of two.
133 * We don't actually have a window just a huge output buffer,
134 * so we report a 2G window size, as that should always be
135 * larger than our output buffer:
136 */
137#define WSIZE 0x80000000
138
139/* Input buffer: */
140static unsigned char *inbuf;
141
142/* Sliding window buffer (and final output buffer): */
143static unsigned char *window;
144
145/* Valid bytes in inbuf: */
146static unsigned insize;
147
148/* Index of next byte to be processed in inbuf: */
149static unsigned inptr;
150
151/* Bytes in output buffer: */
152static unsigned outcnt;
153
154/* gzip flag byte */
155#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
156#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
157#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
158#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
159#define COMMENT 0x10 /* bit 4 set: file comment present */
160#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
161#define RESERVED 0xC0 /* bit 6, 7: reserved */
162
163#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
164
165/* Diagnostic functions */
166#ifdef DEBUG
167# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
168# define Trace(x) do { fprintf x; } while (0)
169# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
170# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
171# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
172# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
173#else
174# define Assert(cond, msg)
175# define Trace(x)
176# define Tracev(x)
177# define Tracevv(x)
178# define Tracec(c, x)
179# define Tracecv(c, x)
180#endif
181 125
182static int fill_inbuf(void);
183static void flush_window(void);
184static void error(char *m); 126static void error(char *m);
185 127
186/* 128/*
@@ -189,13 +131,8 @@ static void error(char *m);
189static struct boot_params *real_mode; /* Pointer to real-mode data */ 131static struct boot_params *real_mode; /* Pointer to real-mode data */
190static int quiet; 132static int quiet;
191 133
192extern unsigned char input_data[];
193extern int input_len;
194
195static long bytes_out;
196
197static void *memset(void *s, int c, unsigned n); 134static void *memset(void *s, int c, unsigned n);
198static void *memcpy(void *dest, const void *src, unsigned n); 135void *memcpy(void *dest, const void *src, unsigned n);
199 136
200static void __putstr(int, const char *); 137static void __putstr(int, const char *);
201#define putstr(__x) __putstr(0, __x) 138#define putstr(__x) __putstr(0, __x)
@@ -213,7 +150,17 @@ static char *vidmem;
213static int vidport; 150static int vidport;
214static int lines, cols; 151static int lines, cols;
215 152
216#include "../../../../lib/inflate.c" 153#ifdef CONFIG_KERNEL_GZIP
154#include "../../../../lib/decompress_inflate.c"
155#endif
156
157#ifdef CONFIG_KERNEL_BZIP2
158#include "../../../../lib/decompress_bunzip2.c"
159#endif
160
161#ifdef CONFIG_KERNEL_LZMA
162#include "../../../../lib/decompress_unlzma.c"
163#endif
217 164
218static void scroll(void) 165static void scroll(void)
219{ 166{
@@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n)
282 return s; 229 return s;
283} 230}
284 231
285static void *memcpy(void *dest, const void *src, unsigned n) 232void *memcpy(void *dest, const void *src, unsigned n)
286{ 233{
287 int i; 234 int i;
288 const char *s = src; 235 const char *s = src;
@@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n)
293 return dest; 240 return dest;
294} 241}
295 242
296/* ===========================================================================
297 * Fill the input buffer. This is called only when the buffer is empty
298 * and at least one byte is really needed.
299 */
300static int fill_inbuf(void)
301{
302 error("ran out of input data");
303 return 0;
304}
305
306/* ===========================================================================
307 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
308 * (Used for the decompressed data only.)
309 */
310static void flush_window(void)
311{
312 /* With my window equal to my output buffer
313 * I only need to compute the crc here.
314 */
315 unsigned long c = crc; /* temporary variable */
316 unsigned n;
317 unsigned char *in, ch;
318
319 in = window;
320 for (n = 0; n < outcnt; n++) {
321 ch = *in++;
322 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
323 }
324 crc = c;
325 bytes_out += (unsigned long)outcnt;
326 outcnt = 0;
327}
328 243
329static void error(char *x) 244static void error(char *x)
330{ 245{
@@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
407 lines = real_mode->screen_info.orig_video_lines; 322 lines = real_mode->screen_info.orig_video_lines;
408 cols = real_mode->screen_info.orig_video_cols; 323 cols = real_mode->screen_info.orig_video_cols;
409 324
410 window = output; /* Output buffer (Normally at 1M) */
411 free_mem_ptr = heap; /* Heap */ 325 free_mem_ptr = heap; /* Heap */
412 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; 326 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
413 inbuf = input_data; /* Input buffer */
414 insize = input_len;
415 inptr = 0;
416 327
417#ifdef CONFIG_X86_64 328#ifdef CONFIG_X86_64
418 if ((unsigned long)output & (__KERNEL_ALIGN - 1)) 329 if ((unsigned long)output & (__KERNEL_ALIGN - 1))
@@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
430#endif 341#endif
431#endif 342#endif
432 343
433 makecrc();
434 if (!quiet) 344 if (!quiet)
435 putstr("\nDecompressing Linux... "); 345 putstr("\nDecompressing Linux... ");
436 gunzip(); 346 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
437 parse_elf(output); 347 parse_elf(output);
438 if (!quiet) 348 if (!quiet)
439 putstr("done.\nBooting the kernel.\n"); 349 putstr("done.\nBooting the kernel.\n");
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5c023f6f652c..235b81d0f6f2 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:57 2009 4# Tue Feb 24 15:50:58 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
266CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
267CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
269# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_NONFATAL=y
271CONFIG_X86_MCE_P4THERMAL=y
270CONFIG_VM86=y 272CONFIG_VM86=y
271# CONFIG_TOSHIBA is not set 273# CONFIG_TOSHIBA is not set
272# CONFIG_I8K is not set 274# CONFIG_I8K is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 4157cc4a2bde..9fe5d212ab4c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:29 2009 4# Tue Feb 24 15:44:16 2009
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
266CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
267CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
269# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_INTEL=y
271CONFIG_X86_MCE_AMD=y
270# CONFIG_I8K is not set 272# CONFIG_I8K is not set
271CONFIG_MICROCODE=y 273CONFIG_MICROCODE=y
272CONFIG_MICROCODE_INTEL=y 274CONFIG_MICROCODE_INTEL=y
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a6208dc74633..4ef949c1972e 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -75,7 +75,14 @@ static inline void default_inquire_remote_apic(int apicid)
75#define setup_secondary_clock setup_secondary_APIC_clock 75#define setup_secondary_clock setup_secondary_APIC_clock
76#endif 76#endif
77 77
78#ifdef CONFIG_X86_VSMP
78extern int is_vsmp_box(void); 79extern int is_vsmp_box(void);
80#else
81static inline int is_vsmp_box(void)
82{
83 return 0;
84}
85#endif
79extern void xapic_wait_icr_idle(void); 86extern void xapic_wait_icr_idle(void);
80extern u32 safe_xapic_wait_icr_idle(void); 87extern u32 safe_xapic_wait_icr_idle(void);
81extern void xapic_icr_write(u32, u32); 88extern void xapic_icr_write(u32, u32);
@@ -306,7 +313,7 @@ struct apic {
306 void (*send_IPI_self)(int vector); 313 void (*send_IPI_self)(int vector);
307 314
308 /* wakeup_secondary_cpu */ 315 /* wakeup_secondary_cpu */
309 int (*wakeup_cpu)(int apicid, unsigned long start_eip); 316 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
310 317
311 int trampoline_phys_low; 318 int trampoline_phys_low;
312 int trampoline_phys_high; 319 int trampoline_phys_high;
@@ -324,8 +331,21 @@ struct apic {
324 u32 (*safe_wait_icr_idle)(void); 331 u32 (*safe_wait_icr_idle)(void);
325}; 332};
326 333
334/*
335 * Pointer to the local APIC driver in use on this system (there's
336 * always just one such driver in use - the kernel decides via an
337 * early probing process which one it picks - and then sticks to it):
338 */
327extern struct apic *apic; 339extern struct apic *apic;
328 340
341/*
342 * APIC functionality to boot other CPUs - only used on SMP:
343 */
344#ifdef CONFIG_SMP
345extern atomic_t init_deasserted;
346extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
347#endif
348
329static inline u32 apic_read(u32 reg) 349static inline u32 apic_read(u32 reg)
330{ 350{
331 return apic->read(reg); 351 return apic->read(reg);
@@ -384,9 +404,7 @@ static inline unsigned default_get_apic_id(unsigned long x)
384#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 404#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
385#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 405#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
386 406
387#ifdef CONFIG_X86_32 407#ifdef CONFIG_X86_64
388extern void es7000_update_apic_to_cluster(void);
389#else
390extern struct apic apic_flat; 408extern struct apic apic_flat;
391extern struct apic apic_physflat; 409extern struct apic apic_physflat;
392extern struct apic apic_x2apic_cluster; 410extern struct apic apic_x2apic_cluster;
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index dd61616cb73d..6526cf08b0e4 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -10,17 +10,31 @@
10#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 10#define EXTENDED_VGA 0xfffe /* 80x50 mode */
11#define ASK_VGA 0xfffd /* ask for it at bootup */ 11#define ASK_VGA 0xfffd /* ask for it at bootup */
12 12
13#ifdef __KERNEL__
14
13/* Physical address where kernel should be loaded. */ 15/* Physical address where kernel should be loaded. */
14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 16#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
17 19
20#ifdef CONFIG_KERNEL_BZIP2
21#define BOOT_HEAP_SIZE 0x400000
22#else /* !CONFIG_KERNEL_BZIP2 */
23
18#ifdef CONFIG_X86_64 24#ifdef CONFIG_X86_64
19#define BOOT_HEAP_SIZE 0x7000 25#define BOOT_HEAP_SIZE 0x7000
20#define BOOT_STACK_SIZE 0x4000
21#else 26#else
22#define BOOT_HEAP_SIZE 0x4000 27#define BOOT_HEAP_SIZE 0x4000
28#endif
29
30#endif /* !CONFIG_KERNEL_BZIP2 */
31
32#ifdef CONFIG_X86_64
33#define BOOT_STACK_SIZE 0x4000
34#else
23#define BOOT_STACK_SIZE 0x1000 35#define BOOT_STACK_SIZE 0x1000
24#endif 36#endif
25 37
38#endif /* __KERNEL__ */
39
26#endif /* _ASM_X86_BOOT_H */ 40#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 23696d44a0af..dca8f03da5b2 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -1,11 +1,155 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12 */
13
1#ifndef _ASM_X86_FIXMAP_H 14#ifndef _ASM_X86_FIXMAP_H
2#define _ASM_X86_FIXMAP_H 15#define _ASM_X86_FIXMAP_H
3 16
17#ifndef __ASSEMBLY__
18#include <linux/kernel.h>
19#include <asm/acpi.h>
20#include <asm/apicdef.h>
21#include <asm/page.h>
22#ifdef CONFIG_X86_32
23#include <linux/threads.h>
24#include <asm/kmap_types.h>
25#else
26#include <asm/vsyscall.h>
27#ifdef CONFIG_EFI
28#include <asm/efi.h>
29#endif
30#endif
31
32/*
33 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
34 * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
35 * Because of this, FIXADDR_TOP x86 integration was left as later work.
36 */
37#ifdef CONFIG_X86_32
38/* used by vmalloc.c, vsyscall.lds.S.
39 *
40 * Leave one empty page between vmalloc'ed areas and
41 * the start of the fixmap.
42 */
43extern unsigned long __FIXADDR_TOP;
44#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
45
46#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
47#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
48#else
49#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
50
51/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
52#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
53#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
54#endif
55
56
57/*
58 * Here we define all the compile-time 'special' virtual
59 * addresses. The point is to have a constant address at
60 * compile time, but to set the physical address only
61 * in the boot process.
62 * for x86_32: We allocate these special addresses
63 * from the end of virtual memory (0xfffff000) backwards.
64 * Also this lets us do fail-safe vmalloc(), we
65 * can guarantee that these special addresses and
66 * vmalloc()-ed addresses never overlap.
67 *
68 * These 'compile-time allocated' memory buffers are
69 * fixed-size 4k pages (or larger if used with an increment
70 * higher than 1). Use set_fixmap(idx,phys) to associate
71 * physical memory with fixmap indices.
72 *
73 * TLB entries of such buffers will not be flushed across
74 * task switches.
75 */
76enum fixed_addresses {
4#ifdef CONFIG_X86_32 77#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 78 FIX_HOLE,
79 FIX_VDSO,
6#else 80#else
7# include "fixmap_64.h" 81 VSYSCALL_LAST_PAGE,
82 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
83 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
84 VSYSCALL_HPET,
8#endif 85#endif
86 FIX_DBGP_BASE,
87 FIX_EARLYCON_MEM_BASE,
88#ifdef CONFIG_X86_LOCAL_APIC
89 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
90#endif
91#ifdef CONFIG_X86_IO_APIC
92 FIX_IO_APIC_BASE_0,
93 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
94#endif
95#ifdef CONFIG_X86_64
96#ifdef CONFIG_EFI
97 FIX_EFI_IO_MAP_LAST_PAGE,
98 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
99 + MAX_EFI_IO_PAGES - 1,
100#endif
101#endif
102#ifdef CONFIG_X86_VISWS_APIC
103 FIX_CO_CPU, /* Cobalt timer */
104 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
105 FIX_LI_PCIA, /* Lithium PCI Bridge A */
106 FIX_LI_PCIB, /* Lithium PCI Bridge B */
107#endif
108#ifdef CONFIG_X86_F00F_BUG
109 FIX_F00F_IDT, /* Virtual mapping for IDT */
110#endif
111#ifdef CONFIG_X86_CYCLONE_TIMER
112 FIX_CYCLONE_TIMER, /*cyclone timer register*/
113#endif
114#ifdef CONFIG_X86_32
115 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
116 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
117#ifdef CONFIG_PCI_MMCONFIG
118 FIX_PCIE_MCFG,
119#endif
120#endif
121#ifdef CONFIG_PARAVIRT
122 FIX_PARAVIRT_BOOTMAP,
123#endif
124 __end_of_permanent_fixed_addresses,
125#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
126 FIX_OHCI1394_BASE,
127#endif
128 /*
129 * 256 temporary boot-time mappings, used by early_ioremap(),
130 * before ioremap() is functional.
131 *
132 * We round it up to the next 256 pages boundary so that we
133 * can have a single pgd entry and a single pte table:
134 */
135#define NR_FIX_BTMAPS 64
136#define FIX_BTMAPS_SLOTS 4
137 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
138 (__end_of_permanent_fixed_addresses & 255),
139 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
140#ifdef CONFIG_X86_32
141 FIX_WP_TEST,
142#endif
143 __end_of_fixed_addresses
144};
145
146
147extern void reserve_top_address(unsigned long reserve);
148
149#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
150#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
151#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
152#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
9 153
10extern int fixmaps_set; 154extern int fixmaps_set;
11 155
@@ -69,4 +213,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
69 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 213 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
70 return __virt_to_fix(vaddr); 214 return __virt_to_fix(vaddr);
71} 215}
216#endif /* !__ASSEMBLY__ */
72#endif /* _ASM_X86_FIXMAP_H */ 217#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
deleted file mode 100644
index 047d9bab2b31..000000000000
--- a/arch/x86/include/asm/fixmap_32.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef _ASM_X86_FIXMAP_32_H
14#define _ASM_X86_FIXMAP_32_H
15
16
17/* used by vmalloc.c, vsyscall.lds.S.
18 *
19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap.
21 */
22extern unsigned long __FIXADDR_TOP;
23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
25
26#ifndef __ASSEMBLY__
27#include <linux/kernel.h>
28#include <asm/acpi.h>
29#include <asm/apicdef.h>
30#include <asm/page.h>
31#include <linux/threads.h>
32#include <asm/kmap_types.h>
33
34/*
35 * Here we define all the compile-time 'special' virtual
36 * addresses. The point is to have a constant address at
37 * compile time, but to set the physical address only
38 * in the boot process. We allocate these special addresses
39 * from the end of virtual memory (0xfffff000) backwards.
40 * Also this lets us do fail-safe vmalloc(), we
41 * can guarantee that these special addresses and
42 * vmalloc()-ed addresses never overlap.
43 *
44 * these 'compile-time allocated' memory buffers are
45 * fixed-size 4k pages. (or larger if used with an increment
46 * highger than 1) use fixmap_set(idx,phys) to associate
47 * physical memory with fixmap indices.
48 *
49 * TLB entries of such buffers will not be flushed across
50 * task switches.
51 */
52enum fixed_addresses {
53 FIX_HOLE,
54 FIX_VDSO,
55 FIX_DBGP_BASE,
56 FIX_EARLYCON_MEM_BASE,
57#ifdef CONFIG_X86_LOCAL_APIC
58 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
59#endif
60#ifdef CONFIG_X86_IO_APIC
61 FIX_IO_APIC_BASE_0,
62 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
63#endif
64#ifdef CONFIG_X86_VISWS_APIC
65 FIX_CO_CPU, /* Cobalt timer */
66 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
67 FIX_LI_PCIA, /* Lithium PCI Bridge A */
68 FIX_LI_PCIB, /* Lithium PCI Bridge B */
69#endif
70#ifdef CONFIG_X86_F00F_BUG
71 FIX_F00F_IDT, /* Virtual mapping for IDT */
72#endif
73#ifdef CONFIG_X86_CYCLONE_TIMER
74 FIX_CYCLONE_TIMER, /*cyclone timer register*/
75#endif
76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
78#ifdef CONFIG_PCI_MMCONFIG
79 FIX_PCIE_MCFG,
80#endif
81#ifdef CONFIG_PARAVIRT
82 FIX_PARAVIRT_BOOTMAP,
83#endif
84 __end_of_permanent_fixed_addresses,
85 /*
86 * 256 temporary boot-time mappings, used by early_ioremap(),
87 * before ioremap() is functional.
88 *
89 * We round it up to the next 256 pages boundary so that we
90 * can have a single pgd entry and a single pte table:
91 */
92#define NR_FIX_BTMAPS 64
93#define FIX_BTMAPS_SLOTS 4
94 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
95 (__end_of_permanent_fixed_addresses & 255),
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
97 FIX_WP_TEST,
98#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
99 FIX_OHCI1394_BASE,
100#endif
101 __end_of_fixed_addresses
102};
103
104extern void reserve_top_address(unsigned long reserve);
105
106
107#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
108
109#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
110#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
111#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
112#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
113
114#endif /* !__ASSEMBLY__ */
115#endif /* _ASM_X86_FIXMAP_32_H */
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
deleted file mode 100644
index 298d9ba3faeb..000000000000
--- a/arch/x86/include/asm/fixmap_64.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef _ASM_X86_FIXMAP_64_H
12#define _ASM_X86_FIXMAP_64_H
13
14#include <linux/kernel.h>
15#include <asm/acpi.h>
16#include <asm/apicdef.h>
17#include <asm/page.h>
18#include <asm/vsyscall.h>
19#include <asm/efi.h>
20
21/*
22 * Here we define all the compile-time 'special' virtual
23 * addresses. The point is to have a constant address at
24 * compile time, but to set the physical address only
25 * in the boot process.
26 *
27 * These 'compile-time allocated' memory buffers are
28 * fixed-size 4k pages (or larger if used with an increment
29 * higher than 1). Use set_fixmap(idx,phys) to associate
30 * physical memory with fixmap indices.
31 *
32 * TLB entries of such buffers will not be flushed across
33 * task switches.
34 */
35
36enum fixed_addresses {
37 VSYSCALL_LAST_PAGE,
38 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
39 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
40 VSYSCALL_HPET,
41 FIX_DBGP_BASE,
42 FIX_EARLYCON_MEM_BASE,
43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
44 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
46 FIX_EFI_IO_MAP_LAST_PAGE,
47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
48 + MAX_EFI_IO_PAGES - 1,
49#ifdef CONFIG_PARAVIRT
50 FIX_PARAVIRT_BOOTMAP,
51#endif
52 __end_of_permanent_fixed_addresses,
53#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
54 FIX_OHCI1394_BASE,
55#endif
56 /*
57 * 256 temporary boot-time mappings, used by early_ioremap(),
58 * before ioremap() is functional.
59 *
60 * We round it up to the next 256 pages boundary so that we
61 * can have a single pgd entry and a single pte table:
62 */
63#define NR_FIX_BTMAPS 64
64#define FIX_BTMAPS_SLOTS 4
65 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
66 (__end_of_permanent_fixed_addresses & 255),
67 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
68 __end_of_fixed_addresses
69};
70
71#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
72#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
73#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
74
75/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
76#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
77#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
78
79#endif /* _ASM_X86_FIXMAP_64_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c1f06289b14b..86af26091d6c 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -23,6 +23,9 @@
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25 25
26int
27is_io_mapping_possible(resource_size_t base, unsigned long size);
28
26void * 29void *
27iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
28 31
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index e9f5db796244..a37229011b56 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -4,8 +4,12 @@
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 5extern void numa_remove_cpu(int cpu);
6 6
7#ifdef CONFIG_NUMA 7#ifdef CONFIG_HIGHMEM
8extern void set_highmem_pages_init(void); 8extern void set_highmem_pages_init(void);
9#else
10static inline void set_highmem_pages_init(void)
11{
12}
9#endif 13#endif
10 14
11#endif /* _ASM_X86_NUMA_32_H */ 15#endif /* _ASM_X86_NUMA_32_H */
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 9709fdff6615..b0e70056838e 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
15 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
16extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
17 17
18extern int kernel_map_sync_memtype(u64 base, unsigned long size,
19 unsigned long flag);
20
18#endif /* _ASM_X86_PAT_H */ 21#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c7a98f738210..76139506c3e4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -248,7 +248,6 @@ struct x86_hw_tss {
248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
250#define INVALID_IO_BITMAP_OFFSET 0x8000 250#define INVALID_IO_BITMAP_OFFSET 0x8000
251#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
252 251
253struct tss_struct { 252struct tss_struct {
254 /* 253 /*
@@ -263,11 +262,6 @@ struct tss_struct {
263 * be within the limit. 262 * be within the limit.
264 */ 263 */
265 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 264 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
266 /*
267 * Cache the current maximum and the last task that used the bitmap:
268 */
269 unsigned long io_bitmap_max;
270 struct thread_struct *io_bitmap_owner;
271 265
272 /* 266 /*
273 * .. and then another 0x100 bytes for the emergency kernel stack: 267 * .. and then another 0x100 bytes for the emergency kernel stack:
diff --git a/arch/x86/include/asm/seccomp_32.h b/arch/x86/include/asm/seccomp_32.h
index a6ad87b352c4..b811d6f5780c 100644
--- a/arch/x86/include/asm/seccomp_32.h
+++ b/arch/x86/include/asm/seccomp_32.h
@@ -1,12 +1,6 @@
1#ifndef _ASM_X86_SECCOMP_32_H 1#ifndef _ASM_X86_SECCOMP_32_H
2#define _ASM_X86_SECCOMP_32_H 2#define _ASM_X86_SECCOMP_32_H
3 3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on i386"
8#endif
9
10#include <linux/unistd.h> 4#include <linux/unistd.h>
11 5
12#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
diff --git a/arch/x86/include/asm/seccomp_64.h b/arch/x86/include/asm/seccomp_64.h
index 4171bb794e9e..84ec1bd161a5 100644
--- a/arch/x86/include/asm/seccomp_64.h
+++ b/arch/x86/include/asm/seccomp_64.h
@@ -1,14 +1,6 @@
1#ifndef _ASM_X86_SECCOMP_64_H 1#ifndef _ASM_X86_SECCOMP_64_H
2#define _ASM_X86_SECCOMP_64_H 2#define _ASM_X86_SECCOMP_64_H
3 3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on x86_64"
8#else
9#define TIF_32BIT TIF_IA32
10#endif
11
12#include <linux/unistd.h> 4#include <linux/unistd.h>
13#include <asm/ia32_unistd.h> 5#include <asm/ia32_unistd.h>
14 6
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 66801cb72f69..05c6f6b11fd5 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -31,7 +31,6 @@ struct x86_quirks {
31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
32 unsigned short oemsize); 32 unsigned short oemsize);
33 int (*setup_ioapic_ids)(void); 33 int (*setup_ioapic_ids)(void);
34 int (*update_apic)(void);
35}; 34};
36 35
37extern void x86_quirk_pre_intr_init(void); 36extern void x86_quirk_pre_intr_init(void);
@@ -65,7 +64,11 @@ extern void x86_quirk_time_init(void);
65#include <asm/bootparam.h> 64#include <asm/bootparam.h>
66 65
67/* Interrupt control for vSMPowered x86_64 systems */ 66/* Interrupt control for vSMPowered x86_64 systems */
67#ifdef CONFIG_X86_VSMP
68void vsmp_init(void); 68void vsmp_init(void);
69#else
70static inline void vsmp_init(void) { }
71#endif
69 72
70void setup_bios_corruption_check(void); 73void setup_bios_corruption_check(void);
71 74
@@ -77,8 +80,6 @@ static inline void visws_early_detect(void) { }
77static inline int is_visws_box(void) { return 0; } 80static inline int is_visws_box(void) { return 0; }
78#endif 81#endif
79 82
80extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
81extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
82extern struct x86_quirks *x86_quirks; 83extern struct x86_quirks *x86_quirks;
83extern unsigned long saved_video_mode; 84extern unsigned long saved_video_mode;
84 85
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c00bfdbdd456..643c59b4bc6e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -20,6 +20,9 @@
20struct task_struct; /* one of the stranger aspects of C forward declarations */ 20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev, 21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next); 22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
23 26
24#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
25 28
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 987a2c10fe20..8cc687326eb8 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -188,30 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
188extern long __copy_user_nocache(void *dst, const void __user *src, 188extern long __copy_user_nocache(void *dst, const void __user *src,
189 unsigned size, int zerorest); 189 unsigned size, int zerorest);
190 190
191static inline int __copy_from_user_nocache(void *dst, const void __user *src, 191static inline int
192 unsigned size) 192__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
193{ 193{
194 might_sleep(); 194 might_sleep();
195 /* 195 return __copy_user_nocache(dst, src, size, 1);
196 * In practice this limit means that large file write()s
197 * which get chunked to 4K copies get handled via
198 * non-temporal stores here. Smaller writes get handled
199 * via regular __copy_from_user():
200 */
201 if (likely(size >= PAGE_SIZE))
202 return __copy_user_nocache(dst, src, size, 1);
203 else
204 return __copy_from_user(dst, src, size);
205} 196}
206 197
207static inline int __copy_from_user_inatomic_nocache(void *dst, 198static inline int
208 const void __user *src, 199__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
209 unsigned size) 200 unsigned size)
210{ 201{
211 if (likely(size >= PAGE_SIZE)) 202 return __copy_user_nocache(dst, src, size, 0);
212 return __copy_user_nocache(dst, src, size, 0);
213 else
214 return __copy_from_user_inatomic(dst, src, size);
215} 203}
216 204
217unsigned long 205unsigned long
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 8242bf965812..c0a01b5d985b 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,7 +12,6 @@ extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void); 12extern int is_uv_system(void);
13extern void uv_cpu_init(void); 13extern void uv_cpu_init(void);
14extern void uv_system_init(void); 14extern void uv_system_init(void);
15extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 15extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 16 struct mm_struct *mm,
18 unsigned long va, 17 unsigned long va,
@@ -24,8 +23,6 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
24static inline int is_uv_system(void) { return 0; } 23static inline int is_uv_system(void) { return 0; }
25static inline void uv_cpu_init(void) { } 24static inline void uv_cpu_init(void) { }
26static inline void uv_system_init(void) { } 25static inline void uv_system_init(void) { }
27static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
28{ return 1; }
29static inline const struct cpumask * 26static inline const struct cpumask *
30uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, 27uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
31 unsigned long va, unsigned int cpu) 28 unsigned long va, unsigned int cpu)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index de5657c039e9..95f216bbfaf1 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -70,7 +70,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
73obj-y += vsmp_64.o 73obj-$(CONFIG_X86_VSMP) += vsmp_64.o
74obj-$(CONFIG_KPROBES) += kprobes.o 74obj-$(CONFIG_KPROBES) += kprobes.o
75obj-$(CONFIG_MODULES) += module_$(BITS).o 75obj-$(CONFIG_MODULES) += module_$(BITS).o
76obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o 76obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a84ac7b570e6..6907b8e85d52 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
498 */ 498 */
499void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 499void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
500{ 500{
501 unsigned long flags;
502 char *vaddr; 501 char *vaddr;
503 int nr_pages = 2; 502 int nr_pages = 2;
504 struct page *pages[2]; 503 struct page *pages[2];
505 int i; 504 int i;
506 505
506 might_sleep();
507 if (!core_kernel_text((unsigned long)addr)) { 507 if (!core_kernel_text((unsigned long)addr)) {
508 pages[0] = vmalloc_to_page(addr); 508 pages[0] = vmalloc_to_page(addr);
509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
517 nr_pages = 1; 517 nr_pages = 1;
518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
519 BUG_ON(!vaddr); 519 BUG_ON(!vaddr);
520 local_irq_save(flags); 520 local_irq_disable();
521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
522 local_irq_restore(flags); 522 local_irq_enable();
523 vunmap(vaddr); 523 vunmap(vaddr);
524 sync_core(); 524 sync_core();
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but 525 /* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 3b002995e145..f933822dba18 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -222,7 +222,6 @@ struct apic apic_flat = {
222 .send_IPI_all = flat_send_IPI_all, 222 .send_IPI_all = flat_send_IPI_all,
223 .send_IPI_self = apic_send_IPI_self, 223 .send_IPI_self = apic_send_IPI_self,
224 224
225 .wakeup_cpu = NULL,
226 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 225 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
227 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 226 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
228 .wait_for_init_deassert = NULL, 227 .wait_for_init_deassert = NULL,
@@ -373,7 +372,6 @@ struct apic apic_physflat = {
373 .send_IPI_all = physflat_send_IPI_all, 372 .send_IPI_all = physflat_send_IPI_all,
374 .send_IPI_self = apic_send_IPI_self, 373 .send_IPI_self = apic_send_IPI_self,
375 374
376 .wakeup_cpu = NULL,
377 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 375 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
378 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 376 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
379 .wait_for_init_deassert = NULL, 377 .wait_for_init_deassert = NULL,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 0b1093394fdf..d806ecaa948f 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -16,17 +16,17 @@
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/ipi.h> 17#include <asm/ipi.h>
18 18
19static inline unsigned bigsmp_get_apic_id(unsigned long x) 19static unsigned bigsmp_get_apic_id(unsigned long x)
20{ 20{
21 return (x >> 24) & 0xFF; 21 return (x >> 24) & 0xFF;
22} 22}
23 23
24static inline int bigsmp_apic_id_registered(void) 24static int bigsmp_apic_id_registered(void)
25{ 25{
26 return 1; 26 return 1;
27} 27}
28 28
29static inline const cpumask_t *bigsmp_target_cpus(void) 29static const cpumask_t *bigsmp_target_cpus(void)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 return &cpu_online_map; 32 return &cpu_online_map;
@@ -35,13 +35,12 @@ static inline const cpumask_t *bigsmp_target_cpus(void)
35#endif 35#endif
36} 36}
37 37
38static inline unsigned long 38static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
39bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
40{ 39{
41 return 0; 40 return 0;
42} 41}
43 42
44static inline unsigned long bigsmp_check_apicid_present(int bit) 43static unsigned long bigsmp_check_apicid_present(int bit)
45{ 44{
46 return 1; 45 return 1;
47} 46}
@@ -64,7 +63,7 @@ static inline unsigned long calculate_ldr(int cpu)
64 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
65 * document number 292116). So here it goes... 64 * document number 292116). So here it goes...
66 */ 65 */
67static inline void bigsmp_init_apic_ldr(void) 66static void bigsmp_init_apic_ldr(void)
68{ 67{
69 unsigned long val; 68 unsigned long val;
70 int cpu = smp_processor_id(); 69 int cpu = smp_processor_id();
@@ -74,19 +73,19 @@ static inline void bigsmp_init_apic_ldr(void)
74 apic_write(APIC_LDR, val); 73 apic_write(APIC_LDR, val);
75} 74}
76 75
77static inline void bigsmp_setup_apic_routing(void) 76static void bigsmp_setup_apic_routing(void)
78{ 77{
79 printk(KERN_INFO 78 printk(KERN_INFO
80 "Enabling APIC mode: Physflat. Using %d I/O APICs\n", 79 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
81 nr_ioapics); 80 nr_ioapics);
82} 81}
83 82
84static inline int bigsmp_apicid_to_node(int logical_apicid) 83static int bigsmp_apicid_to_node(int logical_apicid)
85{ 84{
86 return apicid_2_node[hard_smp_processor_id()]; 85 return apicid_2_node[hard_smp_processor_id()];
87} 86}
88 87
89static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) 88static int bigsmp_cpu_present_to_apicid(int mps_cpu)
90{ 89{
91 if (mps_cpu < nr_cpu_ids) 90 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 91 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
@@ -94,7 +93,7 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
94 return BAD_APICID; 93 return BAD_APICID;
95} 94}
96 95
97static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) 96static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
98{ 97{
99 return physid_mask_of_physid(phys_apicid); 98 return physid_mask_of_physid(phys_apicid);
100} 99}
@@ -107,29 +106,24 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu)
107 return cpu_physical_id(cpu); 106 return cpu_physical_id(cpu);
108} 107}
109 108
110static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) 109static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
111{ 110{
112 /* For clustered we don't have a good way to do this yet - hack */ 111 /* For clustered we don't have a good way to do this yet - hack */
113 return physids_promote(0xFFL); 112 return physids_promote(0xFFL);
114} 113}
115 114
116static inline void bigsmp_setup_portio_remap(void) 115static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
117{
118}
119
120static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
121{ 116{
122 return 1; 117 return 1;
123} 118}
124 119
125/* As we are using single CPU as destination, pick only one CPU here */ 120/* As we are using single CPU as destination, pick only one CPU here */
126static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) 121static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
127{ 122{
128 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); 123 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
129} 124}
130 125
131static inline unsigned int 126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
132bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask) 127 const struct cpumask *andmask)
134{ 128{
135 int cpu; 129 int cpu;
@@ -148,7 +142,7 @@ bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
148 return BAD_APICID; 142 return BAD_APICID;
149} 143}
150 144
151static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 145static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
152{ 146{
153 return cpuid_apic >> index_msb; 147 return cpuid_apic >> index_msb;
154} 148}
@@ -158,12 +152,12 @@ static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
158 default_send_IPI_mask_sequence_phys(mask, vector); 152 default_send_IPI_mask_sequence_phys(mask, vector);
159} 153}
160 154
161static inline void bigsmp_send_IPI_allbutself(int vector) 155static void bigsmp_send_IPI_allbutself(int vector)
162{ 156{
163 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); 157 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
164} 158}
165 159
166static inline void bigsmp_send_IPI_all(int vector) 160static void bigsmp_send_IPI_all(int vector)
167{ 161{
168 bigsmp_send_IPI_mask(cpu_online_mask, vector); 162 bigsmp_send_IPI_mask(cpu_online_mask, vector);
169} 163}
@@ -256,7 +250,6 @@ struct apic apic_bigsmp = {
256 .send_IPI_all = bigsmp_send_IPI_all, 250 .send_IPI_all = bigsmp_send_IPI_all,
257 .send_IPI_self = default_send_IPI_self, 251 .send_IPI_self = default_send_IPI_self,
258 252
259 .wakeup_cpu = NULL,
260 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 253 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
261 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 254 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
262 255
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 320f2d2e4e54..19588f2770ee 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -163,22 +163,17 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
163 return 0; 163 return 0;
164} 164}
165 165
166static int __init es7000_update_apic(void) 166static int es7000_apic_is_cluster(void)
167{ 167{
168 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
169
170 /* MPENTIUMIII */ 168 /* MPENTIUMIII */
171 if (boot_cpu_data.x86 == 6 && 169 if (boot_cpu_data.x86 == 6 &&
172 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { 170 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
173 es7000_update_apic_to_cluster(); 171 return 1;
174 apic->wait_for_init_deassert = NULL;
175 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
176 }
177 172
178 return 0; 173 return 0;
179} 174}
180 175
181static void __init setup_unisys(void) 176static void setup_unisys(void)
182{ 177{
183 /* 178 /*
184 * Determine the generation of the ES7000 currently running. 179 * Determine the generation of the ES7000 currently running.
@@ -192,14 +187,12 @@ static void __init setup_unisys(void)
192 else 187 else
193 es7000_plat = ES7000_CLASSIC; 188 es7000_plat = ES7000_CLASSIC;
194 ioapic_renumber_irq = es7000_rename_gsi; 189 ioapic_renumber_irq = es7000_rename_gsi;
195
196 x86_quirks->update_apic = es7000_update_apic;
197} 190}
198 191
199/* 192/*
200 * Parse the OEM Table: 193 * Parse the OEM Table:
201 */ 194 */
202static int __init parse_unisys_oem(char *oemptr) 195static int parse_unisys_oem(char *oemptr)
203{ 196{
204 int i; 197 int i;
205 int success = 0; 198 int success = 0;
@@ -261,7 +254,7 @@ static int __init parse_unisys_oem(char *oemptr)
261} 254}
262 255
263#ifdef CONFIG_ACPI 256#ifdef CONFIG_ACPI
264static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) 257static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
265{ 258{
266 struct acpi_table_header *header = NULL; 259 struct acpi_table_header *header = NULL;
267 struct es7000_oem_table *table; 260 struct es7000_oem_table *table;
@@ -292,7 +285,7 @@ static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
292 return 0; 285 return 0;
293} 286}
294 287
295static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 288static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
296{ 289{
297 if (!oem_addr) 290 if (!oem_addr)
298 return; 291 return;
@@ -310,8 +303,10 @@ static int es7000_check_dsdt(void)
310 return 0; 303 return 0;
311} 304}
312 305
306static int es7000_acpi_ret;
307
313/* Hook from generic ACPI tables.c */ 308/* Hook from generic ACPI tables.c */
314static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 309static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
315{ 310{
316 unsigned long oem_addr = 0; 311 unsigned long oem_addr = 0;
317 int check_dsdt; 312 int check_dsdt;
@@ -332,10 +327,26 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
332 */ 327 */
333 unmap_unisys_acpi_oem_table(oem_addr); 328 unmap_unisys_acpi_oem_table(oem_addr);
334 } 329 }
335 return ret; 330
331 es7000_acpi_ret = ret;
332
333 return ret && !es7000_apic_is_cluster();
336} 334}
335
336static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
337{
338 int ret = es7000_acpi_ret;
339
340 return ret && es7000_apic_is_cluster();
341}
342
337#else /* !CONFIG_ACPI: */ 343#else /* !CONFIG_ACPI: */
338static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 344static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
345{
346 return 0;
347}
348
349static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
339{ 350{
340 return 0; 351 return 0;
341} 352}
@@ -349,8 +360,7 @@ static void es7000_spin(int n)
349 rep_nop(); 360 rep_nop();
350} 361}
351 362
352static int __init 363static int es7000_mip_write(struct mip_reg *mip_reg)
353es7000_mip_write(struct mip_reg *mip_reg)
354{ 364{
355 int status = 0; 365 int status = 0;
356 int spin; 366 int spin;
@@ -383,7 +393,7 @@ es7000_mip_write(struct mip_reg *mip_reg)
383 return status; 393 return status;
384} 394}
385 395
386static void __init es7000_enable_apic_mode(void) 396static void es7000_enable_apic_mode(void)
387{ 397{
388 struct mip_reg es7000_mip_reg; 398 struct mip_reg es7000_mip_reg;
389 int mip_status; 399 int mip_status;
@@ -416,11 +426,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
416 426
417static void es7000_wait_for_init_deassert(atomic_t *deassert) 427static void es7000_wait_for_init_deassert(atomic_t *deassert)
418{ 428{
419#ifndef CONFIG_ES7000_CLUSTERED_APIC
420 while (!atomic_read(deassert)) 429 while (!atomic_read(deassert))
421 cpu_relax(); 430 cpu_relax();
422#endif
423 return;
424} 431}
425 432
426static unsigned int es7000_get_apic_id(unsigned long x) 433static unsigned int es7000_get_apic_id(unsigned long x)
@@ -565,72 +572,24 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
565 return 1; 572 return 1;
566} 573}
567 574
568static unsigned int
569es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
570{
571 int cpus_found = 0;
572 int num_bits_set;
573 int apicid;
574 int cpu;
575
576 num_bits_set = cpumask_weight(cpumask);
577 /* Return id to all */
578 if (num_bits_set == nr_cpu_ids)
579 return 0xFF;
580 /*
581 * The cpus in the mask must all be on the apic cluster. If are not
582 * on the same apicid cluster return default value of target_cpus():
583 */
584 cpu = cpumask_first(cpumask);
585 apicid = es7000_cpu_to_logical_apicid(cpu);
586
587 while (cpus_found < num_bits_set) {
588 if (cpumask_test_cpu(cpu, cpumask)) {
589 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
590
591 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
592 WARN(1, "Not a valid mask!");
593
594 return 0xFF;
595 }
596 apicid = new_apicid;
597 cpus_found++;
598 }
599 cpu++;
600 }
601 return apicid;
602}
603
604static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) 575static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
605{ 576{
606 int cpus_found = 0; 577 unsigned int round = 0;
607 int num_bits_set; 578 int cpu, uninitialized_var(apicid);
608 int apicid;
609 int cpu;
610 579
611 num_bits_set = cpus_weight(*cpumask);
612 /* Return id to all */
613 if (num_bits_set == nr_cpu_ids)
614 return es7000_cpu_to_logical_apicid(0);
615 /* 580 /*
616 * The cpus in the mask must all be on the apic cluster. If are not 581 * The cpus in the mask must all be on the apic cluster.
617 * on the same apicid cluster return default value of target_cpus():
618 */ 582 */
619 cpu = first_cpu(*cpumask); 583 for_each_cpu(cpu, cpumask) {
620 apicid = es7000_cpu_to_logical_apicid(cpu); 584 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
621 while (cpus_found < num_bits_set) {
622 if (cpu_isset(cpu, *cpumask)) {
623 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
624 585
625 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 586 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
626 printk("%s: Not a valid mask!\n", __func__); 587 WARN(1, "Not a valid mask!");
627 588
628 return es7000_cpu_to_logical_apicid(0); 589 return BAD_APICID;
629 }
630 apicid = new_apicid;
631 cpus_found++;
632 } 590 }
633 cpu++; 591 apicid = new_apicid;
592 round++;
634 } 593 }
635 return apicid; 594 return apicid;
636} 595}
@@ -659,37 +618,103 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
659 return cpuid_apic >> index_msb; 618 return cpuid_apic >> index_msb;
660} 619}
661 620
662void __init es7000_update_apic_to_cluster(void)
663{
664 apic->target_cpus = target_cpus_cluster;
665 apic->irq_delivery_mode = dest_LowestPrio;
666 /* logical delivery broadcast to all procs: */
667 apic->irq_dest_mode = 1;
668
669 apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
670
671 apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
672}
673
674static int probe_es7000(void) 621static int probe_es7000(void)
675{ 622{
676 /* probed later in mptable/ACPI hooks */ 623 /* probed later in mptable/ACPI hooks */
677 return 0; 624 return 0;
678} 625}
679 626
680static __init int 627static int es7000_mps_ret;
681es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 628static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem,
629 char *productid)
682{ 630{
631 int ret = 0;
632
683 if (mpc->oemptr) { 633 if (mpc->oemptr) {
684 struct mpc_oemtable *oem_table = 634 struct mpc_oemtable *oem_table =
685 (struct mpc_oemtable *)mpc->oemptr; 635 (struct mpc_oemtable *)mpc->oemptr;
686 636
687 if (!strncmp(oem, "UNISYS", 6)) 637 if (!strncmp(oem, "UNISYS", 6))
688 return parse_unisys_oem((char *)oem_table); 638 ret = parse_unisys_oem((char *)oem_table);
689 } 639 }
690 return 0; 640
641 es7000_mps_ret = ret;
642
643 return ret && !es7000_apic_is_cluster();
691} 644}
692 645
646static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
647 char *productid)
648{
649 int ret = es7000_mps_ret;
650
651 return ret && es7000_apic_is_cluster();
652}
653
654struct apic apic_es7000_cluster = {
655
656 .name = "es7000",
657 .probe = probe_es7000,
658 .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster,
659 .apic_id_registered = es7000_apic_id_registered,
660
661 .irq_delivery_mode = dest_LowestPrio,
662 /* logical delivery broadcast to all procs: */
663 .irq_dest_mode = 1,
664
665 .target_cpus = target_cpus_cluster,
666 .disable_esr = 1,
667 .dest_logical = 0,
668 .check_apicid_used = es7000_check_apicid_used,
669 .check_apicid_present = es7000_check_apicid_present,
670
671 .vector_allocation_domain = es7000_vector_allocation_domain,
672 .init_apic_ldr = es7000_init_apic_ldr_cluster,
673
674 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
675 .setup_apic_routing = es7000_setup_apic_routing,
676 .multi_timer_check = NULL,
677 .apicid_to_node = es7000_apicid_to_node,
678 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
679 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
680 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
681 .setup_portio_remap = NULL,
682 .check_phys_apicid_present = es7000_check_phys_apicid_present,
683 .enable_apic_mode = es7000_enable_apic_mode,
684 .phys_pkg_id = es7000_phys_pkg_id,
685 .mps_oem_check = es7000_mps_oem_check_cluster,
686
687 .get_apic_id = es7000_get_apic_id,
688 .set_apic_id = NULL,
689 .apic_id_mask = 0xFF << 24,
690
691 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
692 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
693
694 .send_IPI_mask = es7000_send_IPI_mask,
695 .send_IPI_mask_allbutself = NULL,
696 .send_IPI_allbutself = es7000_send_IPI_allbutself,
697 .send_IPI_all = es7000_send_IPI_all,
698 .send_IPI_self = default_send_IPI_self,
699
700 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip,
701
702 .trampoline_phys_low = 0x467,
703 .trampoline_phys_high = 0x469,
704
705 .wait_for_init_deassert = NULL,
706
707 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
708 .smp_callin_clear_local_apic = NULL,
709 .inquire_remote_apic = default_inquire_remote_apic,
710
711 .read = native_apic_mem_read,
712 .write = native_apic_mem_write,
713 .icr_read = native_apic_icr_read,
714 .icr_write = native_apic_icr_write,
715 .wait_icr_idle = native_apic_wait_icr_idle,
716 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
717};
693 718
694struct apic apic_es7000 = { 719struct apic apic_es7000 = {
695 720
@@ -737,8 +762,6 @@ struct apic apic_es7000 = {
737 .send_IPI_all = es7000_send_IPI_all, 762 .send_IPI_all = es7000_send_IPI_all,
738 .send_IPI_self = default_send_IPI_self, 763 .send_IPI_self = default_send_IPI_self,
739 764
740 .wakeup_cpu = NULL,
741
742 .trampoline_phys_low = 0x467, 765 .trampoline_phys_low = 0x467,
743 .trampoline_phys_high = 0x469, 766 .trampoline_phys_high = 0x469,
744 767
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d9d6d61eed82..ba2fc6465534 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -69,7 +69,7 @@ struct mpc_trans {
69/* x86_quirks member */ 69/* x86_quirks member */
70static int mpc_record; 70static int mpc_record;
71 71
72static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; 72static struct mpc_trans *translation_table[MAX_MPC_ENTRY];
73 73
74int mp_bus_id_to_node[MAX_MP_BUSSES]; 74int mp_bus_id_to_node[MAX_MP_BUSSES];
75int mp_bus_id_to_local[MAX_MP_BUSSES]; 75int mp_bus_id_to_local[MAX_MP_BUSSES];
@@ -256,13 +256,6 @@ static int __init numaq_setup_ioapic_ids(void)
256 return 1; 256 return 1;
257} 257}
258 258
259static int __init numaq_update_apic(void)
260{
261 apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
262
263 return 0;
264}
265
266static struct x86_quirks numaq_x86_quirks __initdata = { 259static struct x86_quirks numaq_x86_quirks __initdata = {
267 .arch_pre_time_init = numaq_pre_time_init, 260 .arch_pre_time_init = numaq_pre_time_init,
268 .arch_time_init = NULL, 261 .arch_time_init = NULL,
@@ -278,7 +271,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
278 .mpc_oem_pci_bus = mpc_oem_pci_bus, 271 .mpc_oem_pci_bus = mpc_oem_pci_bus,
279 .smp_read_mpc_oem = smp_read_mpc_oem, 272 .smp_read_mpc_oem = smp_read_mpc_oem,
280 .setup_ioapic_ids = numaq_setup_ioapic_ids, 273 .setup_ioapic_ids = numaq_setup_ioapic_ids,
281 .update_apic = numaq_update_apic,
282}; 274};
283 275
284static __init void early_check_numaq(void) 276static __init void early_check_numaq(void)
@@ -546,7 +538,7 @@ struct apic apic_numaq = {
546 .send_IPI_all = numaq_send_IPI_all, 538 .send_IPI_all = numaq_send_IPI_all,
547 .send_IPI_self = default_send_IPI_self, 539 .send_IPI_self = default_send_IPI_self,
548 540
549 .wakeup_cpu = NULL, 541 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi,
550 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, 542 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
551 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, 543 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
552 544
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 3a730fa574bb..141c99a1c264 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -138,7 +138,6 @@ struct apic apic_default = {
138 .send_IPI_all = default_send_IPI_all, 138 .send_IPI_all = default_send_IPI_all,
139 .send_IPI_self = default_send_IPI_self, 139 .send_IPI_self = default_send_IPI_self,
140 140
141 .wakeup_cpu = NULL,
142 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 141 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
143 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 142 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
144 143
@@ -159,6 +158,7 @@ extern struct apic apic_numaq;
159extern struct apic apic_summit; 158extern struct apic apic_summit;
160extern struct apic apic_bigsmp; 159extern struct apic apic_bigsmp;
161extern struct apic apic_es7000; 160extern struct apic apic_es7000;
161extern struct apic apic_es7000_cluster;
162extern struct apic apic_default; 162extern struct apic apic_default;
163 163
164struct apic *apic = &apic_default; 164struct apic *apic = &apic_default;
@@ -176,6 +176,7 @@ static struct apic *apic_probe[] __initdata = {
176#endif 176#endif
177#ifdef CONFIG_X86_ES7000 177#ifdef CONFIG_X86_ES7000
178 &apic_es7000, 178 &apic_es7000,
179 &apic_es7000_cluster,
179#endif 180#endif
180 &apic_default, /* must be last */ 181 &apic_default, /* must be last */
181 NULL, 182 NULL,
@@ -197,9 +198,6 @@ static int __init parse_apic(char *arg)
197 } 198 }
198 } 199 }
199 200
200 if (x86_quirks->update_apic)
201 x86_quirks->update_apic();
202
203 /* Parsed again by __setup for debug/verbose */ 201 /* Parsed again by __setup for debug/verbose */
204 return 0; 202 return 0;
205} 203}
@@ -218,8 +216,6 @@ void __init generic_bigsmp_probe(void)
218 if (!cmdline_apic && apic == &apic_default) { 216 if (!cmdline_apic && apic == &apic_default) {
219 if (apic_bigsmp.probe()) { 217 if (apic_bigsmp.probe()) {
220 apic = &apic_bigsmp; 218 apic = &apic_bigsmp;
221 if (x86_quirks->update_apic)
222 x86_quirks->update_apic();
223 printk(KERN_INFO "Overriding APIC driver with %s\n", 219 printk(KERN_INFO "Overriding APIC driver with %s\n",
224 apic->name); 220 apic->name);
225 } 221 }
@@ -240,9 +236,6 @@ void __init generic_apic_probe(void)
240 /* Not visible without early console */ 236 /* Not visible without early console */
241 if (!apic_probe[i]) 237 if (!apic_probe[i])
242 panic("Didn't find an APIC driver"); 238 panic("Didn't find an APIC driver");
243
244 if (x86_quirks->update_apic)
245 x86_quirks->update_apic();
246 } 239 }
247 printk(KERN_INFO "Using APIC driver %s\n", apic->name); 240 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
248} 241}
@@ -262,8 +255,6 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
262 255
263 if (!cmdline_apic) { 256 if (!cmdline_apic) {
264 apic = apic_probe[i]; 257 apic = apic_probe[i];
265 if (x86_quirks->update_apic)
266 x86_quirks->update_apic();
267 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 258 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
268 apic->name); 259 apic->name);
269 } 260 }
@@ -284,8 +275,6 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
284 275
285 if (!cmdline_apic) { 276 if (!cmdline_apic) {
286 apic = apic_probe[i]; 277 apic = apic_probe[i];
287 if (x86_quirks->update_apic)
288 x86_quirks->update_apic();
289 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 278 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
290 apic->name); 279 apic->name);
291 } 280 }
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index e7c163661c77..8d7748efe6a8 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -68,9 +68,6 @@ void __init default_setup_apic_routing(void)
68 apic = &apic_physflat; 68 apic = &apic_physflat;
69 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 69 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
70 } 70 }
71
72 if (x86_quirks->update_apic)
73 x86_quirks->update_apic();
74} 71}
75 72
76/* Same for both flat and physical. */ 73/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index cfe7b09015d8..aac52fa873ff 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -48,7 +48,7 @@
48#include <linux/gfp.h> 48#include <linux/gfp.h>
49#include <linux/smp.h> 49#include <linux/smp.h>
50 50
51static inline unsigned summit_get_apic_id(unsigned long x) 51static unsigned summit_get_apic_id(unsigned long x)
52{ 52{
53 return (x >> 24) & 0xFF; 53 return (x >> 24) & 0xFF;
54} 54}
@@ -58,7 +58,7 @@ static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
58 default_send_IPI_mask_sequence_logical(mask, vector); 58 default_send_IPI_mask_sequence_logical(mask, vector);
59} 59}
60 60
61static inline void summit_send_IPI_allbutself(int vector) 61static void summit_send_IPI_allbutself(int vector)
62{ 62{
63 cpumask_t mask = cpu_online_map; 63 cpumask_t mask = cpu_online_map;
64 cpu_clear(smp_processor_id(), mask); 64 cpu_clear(smp_processor_id(), mask);
@@ -67,7 +67,7 @@ static inline void summit_send_IPI_allbutself(int vector)
67 summit_send_IPI_mask(&mask, vector); 67 summit_send_IPI_mask(&mask, vector);
68} 68}
69 69
70static inline void summit_send_IPI_all(int vector) 70static void summit_send_IPI_all(int vector)
71{ 71{
72 summit_send_IPI_mask(&cpu_online_map, vector); 72 summit_send_IPI_mask(&cpu_online_map, vector);
73} 73}
@@ -77,13 +77,13 @@ static inline void summit_send_IPI_all(int vector)
77extern int use_cyclone; 77extern int use_cyclone;
78 78
79#ifdef CONFIG_X86_SUMMIT_NUMA 79#ifdef CONFIG_X86_SUMMIT_NUMA
80extern void setup_summit(void); 80static void setup_summit(void);
81#else 81#else
82#define setup_summit() {} 82static inline void setup_summit(void) {}
83#endif 83#endif
84 84
85static inline int 85static int summit_mps_oem_check(struct mpc_table *mpc, char *oem,
86summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 86 char *productid)
87{ 87{
88 if (!strncmp(oem, "IBM ENSW", 8) && 88 if (!strncmp(oem, "IBM ENSW", 8) &&
89 (!strncmp(productid, "VIGIL SMP", 9) 89 (!strncmp(productid, "VIGIL SMP", 9)
@@ -98,7 +98,7 @@ summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
98} 98}
99 99
100/* Hook from generic ACPI tables.c */ 100/* Hook from generic ACPI tables.c */
101static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 101static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
102{ 102{
103 if (!strncmp(oem_id, "IBM", 3) && 103 if (!strncmp(oem_id, "IBM", 3) &&
104 (!strncmp(oem_table_id, "SERVIGIL", 8) 104 (!strncmp(oem_table_id, "SERVIGIL", 8)
@@ -186,7 +186,7 @@ static inline int is_WPEG(struct rio_detail *rio){
186 186
187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) 187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
188 188
189static inline const cpumask_t *summit_target_cpus(void) 189static const cpumask_t *summit_target_cpus(void)
190{ 190{
191 /* CPU_MASK_ALL (0xff) has undefined behaviour with 191 /* CPU_MASK_ALL (0xff) has undefined behaviour with
192 * dest_LowestPrio mode logical clustered apic interrupt routing 192 * dest_LowestPrio mode logical clustered apic interrupt routing
@@ -195,19 +195,18 @@ static inline const cpumask_t *summit_target_cpus(void)
195 return &cpumask_of_cpu(0); 195 return &cpumask_of_cpu(0);
196} 196}
197 197
198static inline unsigned long 198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
199summit_check_apicid_used(physid_mask_t bitmap, int apicid)
200{ 199{
201 return 0; 200 return 0;
202} 201}
203 202
204/* we don't use the phys_cpu_present_map to indicate apicid presence */ 203/* we don't use the phys_cpu_present_map to indicate apicid presence */
205static inline unsigned long summit_check_apicid_present(int bit) 204static unsigned long summit_check_apicid_present(int bit)
206{ 205{
207 return 1; 206 return 1;
208} 207}
209 208
210static inline void summit_init_apic_ldr(void) 209static void summit_init_apic_ldr(void)
211{ 210{
212 unsigned long val, id; 211 unsigned long val, id;
213 int count = 0; 212 int count = 0;
@@ -234,18 +233,18 @@ static inline void summit_init_apic_ldr(void)
234 apic_write(APIC_LDR, val); 233 apic_write(APIC_LDR, val);
235} 234}
236 235
237static inline int summit_apic_id_registered(void) 236static int summit_apic_id_registered(void)
238{ 237{
239 return 1; 238 return 1;
240} 239}
241 240
242static inline void summit_setup_apic_routing(void) 241static void summit_setup_apic_routing(void)
243{ 242{
244 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", 243 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
245 nr_ioapics); 244 nr_ioapics);
246} 245}
247 246
248static inline int summit_apicid_to_node(int logical_apicid) 247static int summit_apicid_to_node(int logical_apicid)
249{ 248{
250#ifdef CONFIG_SMP 249#ifdef CONFIG_SMP
251 return apicid_2_node[hard_smp_processor_id()]; 250 return apicid_2_node[hard_smp_processor_id()];
@@ -266,7 +265,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu)
266#endif 265#endif
267} 266}
268 267
269static inline int summit_cpu_present_to_apicid(int mps_cpu) 268static int summit_cpu_present_to_apicid(int mps_cpu)
270{ 269{
271 if (mps_cpu < nr_cpu_ids) 270 if (mps_cpu < nr_cpu_ids)
272 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 271 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
@@ -274,64 +273,44 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu)
274 return BAD_APICID; 273 return BAD_APICID;
275} 274}
276 275
277static inline physid_mask_t 276static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
278summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
279{ 277{
280 /* For clustered we don't have a good way to do this yet - hack */ 278 /* For clustered we don't have a good way to do this yet - hack */
281 return physids_promote(0x0F); 279 return physids_promote(0x0F);
282} 280}
283 281
284static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) 282static physid_mask_t summit_apicid_to_cpu_present(int apicid)
285{ 283{
286 return physid_mask_of_physid(0); 284 return physid_mask_of_physid(0);
287} 285}
288 286
289static inline void summit_setup_portio_remap(void) 287static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
290{
291}
292
293static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
294{ 288{
295 return 1; 289 return 1;
296} 290}
297 291
298static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) 292static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
299{ 293{
300 int cpus_found = 0; 294 unsigned int round = 0;
301 int num_bits_set; 295 int cpu, apicid = 0;
302 int apicid; 296
303 int cpu;
304
305 num_bits_set = cpus_weight(*cpumask);
306 /* Return id to all */
307 if (num_bits_set >= nr_cpu_ids)
308 return 0xFF;
309 /* 297 /*
310 * The cpus in the mask must all be on the apic cluster. If are not 298 * The cpus in the mask must all be on the apic cluster.
311 * on the same apicid cluster return default value of target_cpus():
312 */ 299 */
313 cpu = first_cpu(*cpumask); 300 for_each_cpu(cpu, cpumask) {
314 apicid = summit_cpu_to_logical_apicid(cpu); 301 int new_apicid = summit_cpu_to_logical_apicid(cpu);
315 302
316 while (cpus_found < num_bits_set) { 303 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
317 if (cpu_isset(cpu, *cpumask)) { 304 printk("%s: Not a valid mask!\n", __func__);
318 int new_apicid = summit_cpu_to_logical_apicid(cpu); 305 return BAD_APICID;
319
320 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
321 printk ("%s: Not a valid mask!\n", __func__);
322
323 return 0xFF;
324 }
325 apicid = apicid | new_apicid;
326 cpus_found++;
327 } 306 }
328 cpu++; 307 apicid |= new_apicid;
308 round++;
329 } 309 }
330 return apicid; 310 return apicid;
331} 311}
332 312
333static inline unsigned int 313static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
334summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
335 const struct cpumask *andmask) 314 const struct cpumask *andmask)
336{ 315{
337 int apicid = summit_cpu_to_logical_apicid(0); 316 int apicid = summit_cpu_to_logical_apicid(0);
@@ -356,7 +335,7 @@ summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
356 * 335 *
357 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. 336 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
358 */ 337 */
359static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) 338static int summit_phys_pkg_id(int cpuid_apic, int index_msb)
360{ 339{
361 return hard_smp_processor_id() >> index_msb; 340 return hard_smp_processor_id() >> index_msb;
362} 341}
@@ -381,15 +360,15 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
381} 360}
382 361
383#ifdef CONFIG_X86_SUMMIT_NUMA 362#ifdef CONFIG_X86_SUMMIT_NUMA
384static struct rio_table_hdr *rio_table_hdr __initdata; 363static struct rio_table_hdr *rio_table_hdr;
385static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; 364static struct scal_detail *scal_devs[MAX_NUMNODES];
386static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; 365static struct rio_detail *rio_devs[MAX_NUMNODES*4];
387 366
388#ifndef CONFIG_X86_NUMAQ 367#ifndef CONFIG_X86_NUMAQ
389static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; 368static int mp_bus_id_to_node[MAX_MP_BUSSES];
390#endif 369#endif
391 370
392static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) 371static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
393{ 372{
394 int twister = 0, node = 0; 373 int twister = 0, node = 0;
395 int i, bus, num_buses; 374 int i, bus, num_buses;
@@ -451,7 +430,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
451 return bus; 430 return bus;
452} 431}
453 432
454static int __init build_detail_arrays(void) 433static int build_detail_arrays(void)
455{ 434{
456 unsigned long ptr; 435 unsigned long ptr;
457 int i, scal_detail_size, rio_detail_size; 436 int i, scal_detail_size, rio_detail_size;
@@ -485,7 +464,7 @@ static int __init build_detail_arrays(void)
485 return 1; 464 return 1;
486} 465}
487 466
488void __init setup_summit(void) 467void setup_summit(void)
489{ 468{
490 unsigned long ptr; 469 unsigned long ptr;
491 unsigned short offset; 470 unsigned short offset;
@@ -583,7 +562,6 @@ struct apic apic_summit = {
583 .send_IPI_all = summit_send_IPI_all, 562 .send_IPI_all = summit_send_IPI_all,
584 .send_IPI_self = default_send_IPI_self, 563 .send_IPI_self = default_send_IPI_self,
585 564
586 .wakeup_cpu = NULL,
587 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 565 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
588 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 566 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
589 567
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 354b9c45601d..8fb87b6dd633 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -224,7 +224,6 @@ struct apic apic_x2apic_cluster = {
224 .send_IPI_all = x2apic_send_IPI_all, 224 .send_IPI_all = x2apic_send_IPI_all,
225 .send_IPI_self = x2apic_send_IPI_self, 225 .send_IPI_self = x2apic_send_IPI_self,
226 226
227 .wakeup_cpu = NULL,
228 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 227 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
229 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 228 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
230 .wait_for_init_deassert = NULL, 229 .wait_for_init_deassert = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 5bcb174409bc..23625b9f98b2 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -213,7 +213,6 @@ struct apic apic_x2apic_phys = {
213 .send_IPI_all = x2apic_send_IPI_all, 213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self, 214 .send_IPI_self = x2apic_send_IPI_self,
215 215
216 .wakeup_cpu = NULL,
217 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 216 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
218 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 217 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
219 .wait_for_init_deassert = NULL, 218 .wait_for_init_deassert = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 20b4ad07c3a1..1bd6da1f8fad 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -7,28 +7,28 @@
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/cpu.h>
14#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/proc_fs.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/ctype.h> 17#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/module.h>
20#include <linux/hardirq.h>
21#include <linux/timer.h> 19#include <linux/timer.h>
22#include <linux/proc_fs.h> 20#include <linux/cpu.h>
23#include <asm/current.h> 21#include <linux/init.h>
24#include <asm/smp.h> 22
25#include <asm/apic.h>
26#include <asm/ipi.h>
27#include <asm/pgtable.h>
28#include <asm/uv/uv.h>
29#include <asm/uv/uv_mmrs.h> 23#include <asm/uv/uv_mmrs.h>
30#include <asm/uv/uv_hub.h> 24#include <asm/uv/uv_hub.h>
25#include <asm/current.h>
26#include <asm/pgtable.h>
31#include <asm/uv/bios.h> 27#include <asm/uv/bios.h>
28#include <asm/uv/uv.h>
29#include <asm/apic.h>
30#include <asm/ipi.h>
31#include <asm/smp.h>
32 32
33DEFINE_PER_CPU(int, x2apic_extra_bits); 33DEFINE_PER_CPU(int, x2apic_extra_bits);
34 34
@@ -91,24 +91,28 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
91 cpumask_set_cpu(cpu, retmask); 91 cpumask_set_cpu(cpu, retmask);
92} 92}
93 93
94int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 94static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
95{ 95{
96#ifdef CONFIG_SMP
96 unsigned long val; 97 unsigned long val;
97 int pnode; 98 int pnode;
98 99
99 pnode = uv_apicid_to_pnode(phys_apicid); 100 pnode = uv_apicid_to_pnode(phys_apicid);
100 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 101 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
101 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 102 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
102 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 103 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
103 APIC_DM_INIT; 104 APIC_DM_INIT;
104 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 105 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
105 mdelay(10); 106 mdelay(10);
106 107
107 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 108 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
108 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 109 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
109 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 110 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
110 APIC_DM_STARTUP; 111 APIC_DM_STARTUP;
111 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 112 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
113
114 atomic_set(&init_deasserted, 1);
115#endif
112 return 0; 116 return 0;
113} 117}
114 118
@@ -285,7 +289,7 @@ struct apic apic_x2apic_uv_x = {
285 .send_IPI_all = uv_send_IPI_all, 289 .send_IPI_all = uv_send_IPI_all,
286 .send_IPI_self = uv_send_IPI_self, 290 .send_IPI_self = uv_send_IPI_self,
287 291
288 .wakeup_cpu = NULL, 292 .wakeup_secondary_cpu = uv_wakeup_secondary,
289 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 293 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
290 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 294 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
291 .wait_for_init_deassert = NULL, 295 .wait_for_init_deassert = NULL,
@@ -365,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift,
365 paddr = base << shift; 369 paddr = base << shift;
366 bytes = (1UL << shift) * (max_pnode + 1); 370 bytes = (1UL << shift) * (max_pnode + 1);
367 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 371 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
368 paddr + bytes); 372 paddr + bytes);
369 if (map_type == map_uc) 373 if (map_type == map_uc)
370 init_extra_mapping_uc(paddr, bytes); 374 init_extra_mapping_uc(paddr, bytes);
371 else 375 else
@@ -528,7 +532,7 @@ late_initcall(uv_init_heartbeat);
528 532
529/* 533/*
530 * Called on each cpu to initialize the per_cpu UV data area. 534 * Called on each cpu to initialize the per_cpu UV data area.
531 * ZZZ hotplug not supported yet 535 * FIXME: hotplug not supported yet
532 */ 536 */
533void __cpuinit uv_cpu_init(void) 537void __cpuinit uv_cpu_init(void)
534{ 538{
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 01b1244ef1c0..d67e0e48bc2d 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -7,11 +7,10 @@
7/* 7/*
8 * Get CPU information for use by the procfs. 8 * Get CPU information for use by the procfs.
9 */ 9 */
10#ifdef CONFIG_X86_32
11static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, 10static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
12 unsigned int cpu) 11 unsigned int cpu)
13{ 12{
14#ifdef CONFIG_X86_HT 13#ifdef CONFIG_SMP
15 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
16 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
17 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
@@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
24#endif 23#endif
25} 24}
26 25
26#ifdef CONFIG_X86_32
27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 /* 29 /*
@@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
50 c->wp_works_ok ? "yes" : "no"); 50 c->wp_works_ok ? "yes" : "no");
51} 51}
52#else 52#else
53static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
54 unsigned int cpu)
55{
56#ifdef CONFIG_SMP
57 if (c->x86_max_cores * smp_num_siblings > 1) {
58 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
59 seq_printf(m, "siblings\t: %d\n",
60 cpus_weight(per_cpu(cpu_core_map, cpu)));
61 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
62 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
63 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
64 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
65 }
66#endif
67}
68
69static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 53static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
70{ 54{
71 seq_printf(m, 55 seq_printf(m,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e85826829cf2..508bec1cee27 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
858 */ 858 */
859void __init reserve_early(u64 start, u64 end, char *name) 859void __init reserve_early(u64 start, u64 end, char *name)
860{ 860{
861 if (start >= end)
862 return;
863
861 drop_overlaps_that_are_ok(start, end); 864 drop_overlaps_that_are_ok(start, end);
862 __reserve_early(start, end, name, 0); 865 __reserve_early(start, end, name, 0);
863} 866}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index e41980a373ab..99c4d308f16b 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
85 85
86 t->io_bitmap_max = bytes; 86 t->io_bitmap_max = bytes;
87 87
88#ifdef CONFIG_X86_32
89 /*
90 * Sets the lazy trigger so that the next I/O operation will
91 * reload the correct bitmap.
92 * Reset the owner so that a process switch will not set
93 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
94 */
95 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
96 tss->io_bitmap_owner = NULL;
97#else
98 /* Update the TSS: */ 88 /* Update the TSS: */
99 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); 89 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
100#endif
101 90
102 put_cpu(); 91 put_cpu();
103 92
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 87b69d4fac16..6afa5232dbb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -1,8 +1,8 @@
1#include <linux/errno.h> 1#include <linux/errno.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <asm/idle.h>
5#include <linux/smp.h> 4#include <linux/smp.h>
5#include <linux/prctl.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/module.h> 8#include <linux/module.h>
@@ -11,6 +11,9 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/apic.h> 13#include <asm/apic.h>
14#include <asm/idle.h>
15#include <asm/uaccess.h>
16#include <asm/i387.h>
14 17
15unsigned long idle_halt; 18unsigned long idle_halt;
16EXPORT_SYMBOL(idle_halt); 19EXPORT_SYMBOL(idle_halt);
@@ -56,6 +59,192 @@ void arch_task_cache_init(void)
56} 59}
57 60
58/* 61/*
62 * Free current thread data structures etc..
63 */
64void exit_thread(void)
65{
66 struct task_struct *me = current;
67 struct thread_struct *t = &me->thread;
68
69 if (me->thread.io_bitmap_ptr) {
70 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
71
72 kfree(t->io_bitmap_ptr);
73 t->io_bitmap_ptr = NULL;
74 clear_thread_flag(TIF_IO_BITMAP);
75 /*
76 * Careful, clear this in the TSS too:
77 */
78 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
79 t->io_bitmap_max = 0;
80 put_cpu();
81 }
82
83 ds_exit_thread(current);
84}
85
86void flush_thread(void)
87{
88 struct task_struct *tsk = current;
89
90#ifdef CONFIG_X86_64
91 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
92 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
93 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
94 clear_tsk_thread_flag(tsk, TIF_IA32);
95 } else {
96 set_tsk_thread_flag(tsk, TIF_IA32);
97 current_thread_info()->status |= TS_COMPAT;
98 }
99 }
100#endif
101
102 clear_tsk_thread_flag(tsk, TIF_DEBUG);
103
104 tsk->thread.debugreg0 = 0;
105 tsk->thread.debugreg1 = 0;
106 tsk->thread.debugreg2 = 0;
107 tsk->thread.debugreg3 = 0;
108 tsk->thread.debugreg6 = 0;
109 tsk->thread.debugreg7 = 0;
110 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
111 /*
112 * Forget coprocessor state..
113 */
114 tsk->fpu_counter = 0;
115 clear_fpu(tsk);
116 clear_used_math();
117}
118
119static void hard_disable_TSC(void)
120{
121 write_cr4(read_cr4() | X86_CR4_TSD);
122}
123
124void disable_TSC(void)
125{
126 preempt_disable();
127 if (!test_and_set_thread_flag(TIF_NOTSC))
128 /*
129 * Must flip the CPU state synchronously with
130 * TIF_NOTSC in the current running context.
131 */
132 hard_disable_TSC();
133 preempt_enable();
134}
135
136static void hard_enable_TSC(void)
137{
138 write_cr4(read_cr4() & ~X86_CR4_TSD);
139}
140
141static void enable_TSC(void)
142{
143 preempt_disable();
144 if (test_and_clear_thread_flag(TIF_NOTSC))
145 /*
146 * Must flip the CPU state synchronously with
147 * TIF_NOTSC in the current running context.
148 */
149 hard_enable_TSC();
150 preempt_enable();
151}
152
153int get_tsc_mode(unsigned long adr)
154{
155 unsigned int val;
156
157 if (test_thread_flag(TIF_NOTSC))
158 val = PR_TSC_SIGSEGV;
159 else
160 val = PR_TSC_ENABLE;
161
162 return put_user(val, (unsigned int __user *)adr);
163}
164
165int set_tsc_mode(unsigned int val)
166{
167 if (val == PR_TSC_SIGSEGV)
168 disable_TSC();
169 else if (val == PR_TSC_ENABLE)
170 enable_TSC();
171 else
172 return -EINVAL;
173
174 return 0;
175}
176
177void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
178 struct tss_struct *tss)
179{
180 struct thread_struct *prev, *next;
181
182 prev = &prev_p->thread;
183 next = &next_p->thread;
184
185 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
186 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
187 ds_switch_to(prev_p, next_p);
188 else if (next->debugctlmsr != prev->debugctlmsr)
189 update_debugctlmsr(next->debugctlmsr);
190
191 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
192 set_debugreg(next->debugreg0, 0);
193 set_debugreg(next->debugreg1, 1);
194 set_debugreg(next->debugreg2, 2);
195 set_debugreg(next->debugreg3, 3);
196 /* no 4 and 5 */
197 set_debugreg(next->debugreg6, 6);
198 set_debugreg(next->debugreg7, 7);
199 }
200
201 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
202 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
203 /* prev and next are different */
204 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
205 hard_disable_TSC();
206 else
207 hard_enable_TSC();
208 }
209
210 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
211 /*
212 * Copy the relevant range of the IO bitmap.
213 * Normally this is 128 bytes or less:
214 */
215 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
216 max(prev->io_bitmap_max, next->io_bitmap_max));
217 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
218 /*
219 * Clear any possible leftover bits:
220 */
221 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
222 }
223}
224
225int sys_fork(struct pt_regs *regs)
226{
227 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
228}
229
230/*
231 * This is trivial, and on the face of it looks like it
232 * could equally well be done in user mode.
233 *
234 * Not so, for quite unobvious reasons - register pressure.
235 * In user mode vfork() cannot have a stack frame, and if
236 * done by calling the "clone()" system call directly, you
237 * do not have enough call-clobbered registers to hold all
238 * the information you need.
239 */
240int sys_vfork(struct pt_regs *regs)
241{
242 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
243 NULL, NULL);
244}
245
246
247/*
59 * Idle related variables and functions 248 * Idle related variables and functions
60 */ 249 */
61unsigned long boot_option_idle_override = 0; 250unsigned long boot_option_idle_override = 0;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 646da41a620a..14014d766cad 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -230,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
230} 230}
231EXPORT_SYMBOL(kernel_thread); 231EXPORT_SYMBOL(kernel_thread);
232 232
233/*
234 * Free current thread data structures etc..
235 */
236void exit_thread(void)
237{
238 /* The process may have allocated an io port bitmap... nuke it. */
239 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
240 struct task_struct *tsk = current;
241 struct thread_struct *t = &tsk->thread;
242 int cpu = get_cpu();
243 struct tss_struct *tss = &per_cpu(init_tss, cpu);
244
245 kfree(t->io_bitmap_ptr);
246 t->io_bitmap_ptr = NULL;
247 clear_thread_flag(TIF_IO_BITMAP);
248 /*
249 * Careful, clear this in the TSS too:
250 */
251 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
252 t->io_bitmap_max = 0;
253 tss->io_bitmap_owner = NULL;
254 tss->io_bitmap_max = 0;
255 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
256 put_cpu();
257 }
258
259 ds_exit_thread(current);
260}
261
262void flush_thread(void)
263{
264 struct task_struct *tsk = current;
265
266 tsk->thread.debugreg0 = 0;
267 tsk->thread.debugreg1 = 0;
268 tsk->thread.debugreg2 = 0;
269 tsk->thread.debugreg3 = 0;
270 tsk->thread.debugreg6 = 0;
271 tsk->thread.debugreg7 = 0;
272 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
273 clear_tsk_thread_flag(tsk, TIF_DEBUG);
274 /*
275 * Forget coprocessor state..
276 */
277 tsk->fpu_counter = 0;
278 clear_fpu(tsk);
279 clear_used_math();
280}
281
282void release_thread(struct task_struct *dead_task) 233void release_thread(struct task_struct *dead_task)
283{ 234{
284 BUG_ON(dead_task->mm); 235 BUG_ON(dead_task->mm);
@@ -366,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
366} 317}
367EXPORT_SYMBOL_GPL(start_thread); 318EXPORT_SYMBOL_GPL(start_thread);
368 319
369static void hard_disable_TSC(void)
370{
371 write_cr4(read_cr4() | X86_CR4_TSD);
372}
373
374void disable_TSC(void)
375{
376 preempt_disable();
377 if (!test_and_set_thread_flag(TIF_NOTSC))
378 /*
379 * Must flip the CPU state synchronously with
380 * TIF_NOTSC in the current running context.
381 */
382 hard_disable_TSC();
383 preempt_enable();
384}
385
386static void hard_enable_TSC(void)
387{
388 write_cr4(read_cr4() & ~X86_CR4_TSD);
389}
390
391static void enable_TSC(void)
392{
393 preempt_disable();
394 if (test_and_clear_thread_flag(TIF_NOTSC))
395 /*
396 * Must flip the CPU state synchronously with
397 * TIF_NOTSC in the current running context.
398 */
399 hard_enable_TSC();
400 preempt_enable();
401}
402
403int get_tsc_mode(unsigned long adr)
404{
405 unsigned int val;
406
407 if (test_thread_flag(TIF_NOTSC))
408 val = PR_TSC_SIGSEGV;
409 else
410 val = PR_TSC_ENABLE;
411
412 return put_user(val, (unsigned int __user *)adr);
413}
414
415int set_tsc_mode(unsigned int val)
416{
417 if (val == PR_TSC_SIGSEGV)
418 disable_TSC();
419 else if (val == PR_TSC_ENABLE)
420 enable_TSC();
421 else
422 return -EINVAL;
423
424 return 0;
425}
426
427static noinline void
428__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
429 struct tss_struct *tss)
430{
431 struct thread_struct *prev, *next;
432
433 prev = &prev_p->thread;
434 next = &next_p->thread;
435
436 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
437 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
438 ds_switch_to(prev_p, next_p);
439 else if (next->debugctlmsr != prev->debugctlmsr)
440 update_debugctlmsr(next->debugctlmsr);
441
442 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
443 set_debugreg(next->debugreg0, 0);
444 set_debugreg(next->debugreg1, 1);
445 set_debugreg(next->debugreg2, 2);
446 set_debugreg(next->debugreg3, 3);
447 /* no 4 and 5 */
448 set_debugreg(next->debugreg6, 6);
449 set_debugreg(next->debugreg7, 7);
450 }
451
452 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
453 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
454 /* prev and next are different */
455 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
456 hard_disable_TSC();
457 else
458 hard_enable_TSC();
459 }
460
461 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
462 /*
463 * Disable the bitmap via an invalid offset. We still cache
464 * the previous bitmap owner and the IO bitmap contents:
465 */
466 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
467 return;
468 }
469
470 if (likely(next == tss->io_bitmap_owner)) {
471 /*
472 * Previous owner of the bitmap (hence the bitmap content)
473 * matches the next task, we dont have to do anything but
474 * to set a valid offset in the TSS:
475 */
476 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
477 return;
478 }
479 /*
480 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
481 * and we let the task to get a GPF in case an I/O instruction
482 * is performed. The handler of the GPF will verify that the
483 * faulting task has a valid I/O bitmap and, it true, does the
484 * real copy and restart the instruction. This will save us
485 * redundant copies when the currently switched task does not
486 * perform any I/O during its timeslice.
487 */
488 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
489}
490 320
491/* 321/*
492 * switch_to(x,yn) should switch tasks from x to y. 322 * switch_to(x,yn) should switch tasks from x to y.
@@ -600,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
600 return prev_p; 430 return prev_p;
601} 431}
602 432
603int sys_fork(struct pt_regs *regs)
604{
605 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
606}
607
608int sys_clone(struct pt_regs *regs) 433int sys_clone(struct pt_regs *regs)
609{ 434{
610 unsigned long clone_flags; 435 unsigned long clone_flags;
@@ -621,21 +446,6 @@ int sys_clone(struct pt_regs *regs)
621} 446}
622 447
623/* 448/*
624 * This is trivial, and on the face of it looks like it
625 * could equally well be done in user mode.
626 *
627 * Not so, for quite unobvious reasons - register pressure.
628 * In user mode vfork() cannot have a stack frame, and if
629 * done by calling the "clone()" system call directly, you
630 * do not have enough call-clobbered registers to hold all
631 * the information you need.
632 */
633int sys_vfork(struct pt_regs *regs)
634{
635 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL);
636}
637
638/*
639 * sys_execve() executes a new program. 449 * sys_execve() executes a new program.
640 */ 450 */
641int sys_execve(struct pt_regs *regs) 451int sys_execve(struct pt_regs *regs)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 836ef6575f01..abb7e6a7f0c6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs)
237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
238} 238}
239 239
240/*
241 * Free current thread data structures etc..
242 */
243void exit_thread(void)
244{
245 struct task_struct *me = current;
246 struct thread_struct *t = &me->thread;
247
248 if (me->thread.io_bitmap_ptr) {
249 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
250
251 kfree(t->io_bitmap_ptr);
252 t->io_bitmap_ptr = NULL;
253 clear_thread_flag(TIF_IO_BITMAP);
254 /*
255 * Careful, clear this in the TSS too:
256 */
257 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
258 t->io_bitmap_max = 0;
259 put_cpu();
260 }
261
262 ds_exit_thread(current);
263}
264
265void flush_thread(void)
266{
267 struct task_struct *tsk = current;
268
269 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
270 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
271 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
272 clear_tsk_thread_flag(tsk, TIF_IA32);
273 } else {
274 set_tsk_thread_flag(tsk, TIF_IA32);
275 current_thread_info()->status |= TS_COMPAT;
276 }
277 }
278 clear_tsk_thread_flag(tsk, TIF_DEBUG);
279
280 tsk->thread.debugreg0 = 0;
281 tsk->thread.debugreg1 = 0;
282 tsk->thread.debugreg2 = 0;
283 tsk->thread.debugreg3 = 0;
284 tsk->thread.debugreg6 = 0;
285 tsk->thread.debugreg7 = 0;
286 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
287 /*
288 * Forget coprocessor state..
289 */
290 tsk->fpu_counter = 0;
291 clear_fpu(tsk);
292 clear_used_math();
293}
294
295void release_thread(struct task_struct *dead_task) 240void release_thread(struct task_struct *dead_task)
296{ 241{
297 if (dead_task->mm) { 242 if (dead_task->mm) {
@@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
425} 370}
426EXPORT_SYMBOL_GPL(start_thread); 371EXPORT_SYMBOL_GPL(start_thread);
427 372
428static void hard_disable_TSC(void)
429{
430 write_cr4(read_cr4() | X86_CR4_TSD);
431}
432
433void disable_TSC(void)
434{
435 preempt_disable();
436 if (!test_and_set_thread_flag(TIF_NOTSC))
437 /*
438 * Must flip the CPU state synchronously with
439 * TIF_NOTSC in the current running context.
440 */
441 hard_disable_TSC();
442 preempt_enable();
443}
444
445static void hard_enable_TSC(void)
446{
447 write_cr4(read_cr4() & ~X86_CR4_TSD);
448}
449
450static void enable_TSC(void)
451{
452 preempt_disable();
453 if (test_and_clear_thread_flag(TIF_NOTSC))
454 /*
455 * Must flip the CPU state synchronously with
456 * TIF_NOTSC in the current running context.
457 */
458 hard_enable_TSC();
459 preempt_enable();
460}
461
462int get_tsc_mode(unsigned long adr)
463{
464 unsigned int val;
465
466 if (test_thread_flag(TIF_NOTSC))
467 val = PR_TSC_SIGSEGV;
468 else
469 val = PR_TSC_ENABLE;
470
471 return put_user(val, (unsigned int __user *)adr);
472}
473
474int set_tsc_mode(unsigned int val)
475{
476 if (val == PR_TSC_SIGSEGV)
477 disable_TSC();
478 else if (val == PR_TSC_ENABLE)
479 enable_TSC();
480 else
481 return -EINVAL;
482
483 return 0;
484}
485
486/*
487 * This special macro can be used to load a debugging register
488 */
489#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
490
491static inline void __switch_to_xtra(struct task_struct *prev_p,
492 struct task_struct *next_p,
493 struct tss_struct *tss)
494{
495 struct thread_struct *prev, *next;
496
497 prev = &prev_p->thread,
498 next = &next_p->thread;
499
500 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
501 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
502 ds_switch_to(prev_p, next_p);
503 else if (next->debugctlmsr != prev->debugctlmsr)
504 update_debugctlmsr(next->debugctlmsr);
505
506 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
507 loaddebug(next, 0);
508 loaddebug(next, 1);
509 loaddebug(next, 2);
510 loaddebug(next, 3);
511 /* no 4 and 5 */
512 loaddebug(next, 6);
513 loaddebug(next, 7);
514 }
515
516 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
517 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
518 /* prev and next are different */
519 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
520 hard_disable_TSC();
521 else
522 hard_enable_TSC();
523 }
524
525 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
526 /*
527 * Copy the relevant range of the IO bitmap.
528 * Normally this is 128 bytes or less:
529 */
530 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
531 max(prev->io_bitmap_max, next->io_bitmap_max));
532 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
533 /*
534 * Clear any possible leftover bits:
535 */
536 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
537 }
538}
539
540/* 373/*
541 * switch_to(x,y) should switch tasks from x to y. 374 * switch_to(x,y) should switch tasks from x to y.
542 * 375 *
@@ -694,11 +527,6 @@ void set_personality_64bit(void)
694 current->personality &= ~READ_IMPLIES_EXEC; 527 current->personality &= ~READ_IMPLIES_EXEC;
695} 528}
696 529
697asmlinkage long sys_fork(struct pt_regs *regs)
698{
699 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
700}
701
702asmlinkage long 530asmlinkage long
703sys_clone(unsigned long clone_flags, unsigned long newsp, 531sys_clone(unsigned long clone_flags, unsigned long newsp,
704 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) 532 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
@@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
708 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 536 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
709} 537}
710 538
711/*
712 * This is trivial, and on the face of it looks like it
713 * could equally well be done in user mode.
714 *
715 * Not so, for quite unobvious reasons - register pressure.
716 * In user mode vfork() cannot have a stack frame, and if
717 * done by calling the "clone()" system call directly, you
718 * do not have enough call-clobbered registers to hold all
719 * the information you need.
720 */
721asmlinkage long sys_vfork(struct pt_regs *regs)
722{
723 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
724 NULL, NULL);
725}
726
727unsigned long get_wchan(struct task_struct *p) 539unsigned long get_wchan(struct task_struct *p)
728{ 540{
729 unsigned long stack; 541 unsigned long stack;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fb2159a5c817..3d9672e59c16 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1383,7 +1383,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1383#ifdef CONFIG_X86_32 1383#ifdef CONFIG_X86_32
1384# define IS_IA32 1 1384# define IS_IA32 1
1385#elif defined CONFIG_IA32_EMULATION 1385#elif defined CONFIG_IA32_EMULATION
1386# define IS_IA32 test_thread_flag(TIF_IA32) 1386# define IS_IA32 is_compat_task()
1387#else 1387#else
1388# define IS_IA32 0 1388# define IS_IA32 0
1389#endif 1389#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5b85759e7972..4c54bc0d8ff3 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -600,19 +600,7 @@ static int __init setup_elfcorehdr(char *arg)
600early_param("elfcorehdr", setup_elfcorehdr); 600early_param("elfcorehdr", setup_elfcorehdr);
601#endif 601#endif
602 602
603static int __init default_update_apic(void) 603static struct x86_quirks default_x86_quirks __initdata;
604{
605#ifdef CONFIG_SMP
606 if (!apic->wakeup_cpu)
607 apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
608#endif
609
610 return 0;
611}
612
613static struct x86_quirks default_x86_quirks __initdata = {
614 .update_apic = default_update_apic,
615};
616 604
617struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 605struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
618 606
@@ -875,9 +863,7 @@ void __init setup_arch(char **cmdline_p)
875 863
876 reserve_initrd(); 864 reserve_initrd();
877 865
878#ifdef CONFIG_X86_64
879 vsmp_init(); 866 vsmp_init();
880#endif
881 867
882 io_delay_init(); 868 io_delay_init();
883 869
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 7cdcd16885ed..d2cc6428c587 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -187,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
187/* 187/*
188 * Set up a signal frame. 188 * Set up a signal frame.
189 */ 189 */
190#ifdef CONFIG_X86_32
191static const struct {
192 u16 poplmovl;
193 u32 val;
194 u16 int80;
195} __attribute__((packed)) retcode = {
196 0xb858, /* popl %eax; movl $..., %eax */
197 __NR_sigreturn,
198 0x80cd, /* int $0x80 */
199};
200
201static const struct {
202 u8 movl;
203 u32 val;
204 u16 int80;
205 u8 pad;
206} __attribute__((packed)) rt_retcode = {
207 0xb8, /* movl $..., %eax */
208 __NR_rt_sigreturn,
209 0x80cd, /* int $0x80 */
210 0
211};
212 190
213/* 191/*
214 * Determine which stack to use.. 192 * Determine which stack to use..
215 */ 193 */
194static unsigned long align_sigframe(unsigned long sp)
195{
196#ifdef CONFIG_X86_32
197 /*
198 * Align the stack pointer according to the i386 ABI,
199 * i.e. so that on function entry ((sp + 4) & 15) == 0.
200 */
201 sp = ((sp + 4) & -16ul) - 4;
202#else /* !CONFIG_X86_32 */
203 sp = round_down(sp, 16) - 8;
204#endif
205 return sp;
206}
207
216static inline void __user * 208static inline void __user *
217get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, 209get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
218 void **fpstate) 210 void __user **fpstate)
219{ 211{
220 unsigned long sp;
221
222 /* Default to using normal stack */ 212 /* Default to using normal stack */
223 sp = regs->sp; 213 unsigned long sp = regs->sp;
214
215#ifdef CONFIG_X86_64
216 /* redzone */
217 sp -= 128;
218#endif /* CONFIG_X86_64 */
224 219
225 /* 220 /*
226 * If we are on the alternate signal stack and would overflow it, don't. 221 * If we are on the alternate signal stack and would overflow it, don't.
@@ -234,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
234 if (sas_ss_flags(sp) == 0) 229 if (sas_ss_flags(sp) == 0)
235 sp = current->sas_ss_sp + current->sas_ss_size; 230 sp = current->sas_ss_sp + current->sas_ss_size;
236 } else { 231 } else {
232#ifdef CONFIG_X86_32
237 /* This is the legacy signal stack switching. */ 233 /* This is the legacy signal stack switching. */
238 if ((regs->ss & 0xffff) != __USER_DS && 234 if ((regs->ss & 0xffff) != __USER_DS &&
239 !(ka->sa.sa_flags & SA_RESTORER) && 235 !(ka->sa.sa_flags & SA_RESTORER) &&
240 ka->sa.sa_restorer) 236 ka->sa.sa_restorer)
241 sp = (unsigned long) ka->sa.sa_restorer; 237 sp = (unsigned long) ka->sa.sa_restorer;
238#endif /* CONFIG_X86_32 */
242 } 239 }
243 240
244 if (used_math()) { 241 if (used_math()) {
245 sp = sp - sig_xstate_size; 242 sp -= sig_xstate_size;
246 *fpstate = (struct _fpstate *) sp; 243#ifdef CONFIG_X86_64
244 sp = round_down(sp, 64);
245#endif /* CONFIG_X86_64 */
246 *fpstate = (void __user *)sp;
247
247 if (save_i387_xstate(*fpstate) < 0) 248 if (save_i387_xstate(*fpstate) < 0)
248 return (void __user *)-1L; 249 return (void __user *)-1L;
249 } 250 }
250 251
251 sp -= frame_size; 252 return (void __user *)align_sigframe(sp - frame_size);
252 /*
253 * Align the stack pointer according to the i386 ABI,
254 * i.e. so that on function entry ((sp + 4) & 15) == 0.
255 */
256 sp = ((sp + 4) & -16ul) - 4;
257
258 return (void __user *) sp;
259} 253}
260 254
255#ifdef CONFIG_X86_32
256static const struct {
257 u16 poplmovl;
258 u32 val;
259 u16 int80;
260} __attribute__((packed)) retcode = {
261 0xb858, /* popl %eax; movl $..., %eax */
262 __NR_sigreturn,
263 0x80cd, /* int $0x80 */
264};
265
266static const struct {
267 u8 movl;
268 u32 val;
269 u16 int80;
270 u8 pad;
271} __attribute__((packed)) rt_retcode = {
272 0xb8, /* movl $..., %eax */
273 __NR_rt_sigreturn,
274 0x80cd, /* int $0x80 */
275 0
276};
277
261static int 278static int
262__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, 279__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
263 struct pt_regs *regs) 280 struct pt_regs *regs)
@@ -388,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
388 return 0; 405 return 0;
389} 406}
390#else /* !CONFIG_X86_32 */ 407#else /* !CONFIG_X86_32 */
391/*
392 * Determine which stack to use..
393 */
394static void __user *
395get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
396{
397 /* Default to using normal stack - redzone*/
398 sp -= 128;
399
400 /* This is the X/Open sanctioned signal stack switching. */
401 if (ka->sa.sa_flags & SA_ONSTACK) {
402 if (sas_ss_flags(sp) == 0)
403 sp = current->sas_ss_sp + current->sas_ss_size;
404 }
405
406 return (void __user *)round_down(sp - size, 64);
407}
408
409static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 408static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
410 sigset_t *set, struct pt_regs *regs) 409 sigset_t *set, struct pt_regs *regs)
411{ 410{
@@ -414,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
414 int err = 0; 413 int err = 0;
415 struct task_struct *me = current; 414 struct task_struct *me = current;
416 415
417 if (used_math()) { 416 frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
418 fp = get_stack(ka, regs->sp, sig_xstate_size);
419 frame = (void __user *)round_down(
420 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
421
422 if (save_i387_xstate(fp) < 0)
423 return -EFAULT;
424 } else
425 frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
426 417
427 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 418 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
428 return -EFAULT; 419 return -EFAULT;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9ce666387f37..249334f5080a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -112,7 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
112DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 112DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
113EXPORT_PER_CPU_SYMBOL(cpu_info); 113EXPORT_PER_CPU_SYMBOL(cpu_info);
114 114
115static atomic_t init_deasserted; 115atomic_t init_deasserted;
116 116
117 117
118/* Set if we find a B stepping CPU */ 118/* Set if we find a B stepping CPU */
@@ -614,12 +614,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
614 unsigned long send_status, accept_status = 0; 614 unsigned long send_status, accept_status = 0;
615 int maxlvt, num_starts, j; 615 int maxlvt, num_starts, j;
616 616
617 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
618 send_status = uv_wakeup_secondary(phys_apicid, start_eip);
619 atomic_set(&init_deasserted, 1);
620 return send_status;
621 }
622
623 maxlvt = lapic_get_maxlvt(); 617 maxlvt = lapic_get_maxlvt();
624 618
625 /* 619 /*
@@ -748,7 +742,8 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
748/* 742/*
749 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 743 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
750 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 744 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
751 * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. 745 * Returns zero if CPU booted OK, else error code from
746 * ->wakeup_secondary_cpu.
752 */ 747 */
753static int __cpuinit do_boot_cpu(int apicid, int cpu) 748static int __cpuinit do_boot_cpu(int apicid, int cpu)
754{ 749{
@@ -835,9 +830,13 @@ do_rest:
835 } 830 }
836 831
837 /* 832 /*
838 * Starting actual IPI sequence... 833 * Kick the secondary CPU. Use the method in the APIC driver
834 * if it's defined - or use an INIT boot APIC message otherwise:
839 */ 835 */
840 boot_error = apic->wakeup_cpu(apicid, start_ip); 836 if (apic->wakeup_secondary_cpu)
837 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
838 else
839 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
841 840
842 if (!boot_error) { 841 if (!boot_error) {
843 /* 842 /*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c05430ac1b44..a1d288327ff0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -118,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
118 if (!user_mode_vm(regs)) 118 if (!user_mode_vm(regs))
119 die(str, regs, err); 119 die(str, regs, err);
120} 120}
121
122/*
123 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
124 * invalid offset set (the LAZY one) and the faulting thread has
125 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
126 * we set the offset field correctly and return 1.
127 */
128static int lazy_iobitmap_copy(void)
129{
130 struct thread_struct *thread;
131 struct tss_struct *tss;
132 int cpu;
133
134 cpu = get_cpu();
135 tss = &per_cpu(init_tss, cpu);
136 thread = &current->thread;
137
138 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
139 thread->io_bitmap_ptr) {
140 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
141 thread->io_bitmap_max);
142 /*
143 * If the previously set map was extending to higher ports
144 * than the current one, pad extra space with 0xff (no access).
145 */
146 if (thread->io_bitmap_max < tss->io_bitmap_max) {
147 memset((char *) tss->io_bitmap +
148 thread->io_bitmap_max, 0xff,
149 tss->io_bitmap_max - thread->io_bitmap_max);
150 }
151 tss->io_bitmap_max = thread->io_bitmap_max;
152 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
153 tss->io_bitmap_owner = thread;
154 put_cpu();
155
156 return 1;
157 }
158 put_cpu();
159
160 return 0;
161}
162#endif 121#endif
163 122
164static void __kprobes 123static void __kprobes
@@ -309,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
309 conditional_sti(regs); 268 conditional_sti(regs);
310 269
311#ifdef CONFIG_X86_32 270#ifdef CONFIG_X86_32
312 if (lazy_iobitmap_copy()) {
313 /* restart the faulting instruction */
314 return;
315 }
316
317 if (regs->flags & X86_VM_MASK) 271 if (regs->flags & X86_VM_MASK)
318 goto gp_in_vm86; 272 goto gp_in_vm86;
319#endif 273#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index c609205df594..74de562812cc 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -22,7 +22,7 @@
22#include <asm/paravirt.h> 22#include <asm/paravirt.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24 24
25#if defined CONFIG_PCI && defined CONFIG_PARAVIRT 25#ifdef CONFIG_PARAVIRT
26/* 26/*
27 * Interrupt control on vSMPowered systems: 27 * Interrupt control on vSMPowered systems:
28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' 28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
@@ -114,7 +114,6 @@ static void __init set_vsmp_pv_ops(void)
114} 114}
115#endif 115#endif
116 116
117#ifdef CONFIG_PCI
118static int is_vsmp = -1; 117static int is_vsmp = -1;
119 118
120static void __init detect_vsmp_box(void) 119static void __init detect_vsmp_box(void)
@@ -139,15 +138,6 @@ int is_vsmp_box(void)
139 return 0; 138 return 0;
140 } 139 }
141} 140}
142#else
143static void __init detect_vsmp_box(void)
144{
145}
146int is_vsmp_box(void)
147{
148 return 0;
149}
150#endif
151 141
152void __init vsmp_init(void) 142void __init vsmp_init(void)
153{ 143{
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 2b938a384910..08537747cb58 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,4 +1,4 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_SMP) += tlb.o 4obj-$(CONFIG_SMP) += tlb.o
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index bcc079c282dd..00f127c80b0e 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,5 +1,6 @@
1#include <linux/highmem.h> 1#include <linux/highmem.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/swap.h> /* for totalram_pages */
3 4
4void *kmap(struct page *page) 5void *kmap(struct page *page)
5{ 6{
@@ -156,3 +157,36 @@ EXPORT_SYMBOL(kmap);
156EXPORT_SYMBOL(kunmap); 157EXPORT_SYMBOL(kunmap);
157EXPORT_SYMBOL(kmap_atomic); 158EXPORT_SYMBOL(kmap_atomic);
158EXPORT_SYMBOL(kunmap_atomic); 159EXPORT_SYMBOL(kunmap_atomic);
160
161#ifdef CONFIG_NUMA
162void __init set_highmem_pages_init(void)
163{
164 struct zone *zone;
165 int nid;
166
167 for_each_zone(zone) {
168 unsigned long zone_start_pfn, zone_end_pfn;
169
170 if (!is_highmem(zone))
171 continue;
172
173 zone_start_pfn = zone->zone_start_pfn;
174 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
175
176 nid = zone_to_nid(zone);
177 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
178 zone->name, nid, zone_start_pfn, zone_end_pfn);
179
180 add_highpages_with_active_regions(nid, zone_start_pfn,
181 zone_end_pfn);
182 }
183 totalram_pages += totalhigh_pages;
184}
185#else
186void __init set_highmem_pages_init(void)
187{
188 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
189
190 totalram_pages += totalhigh_pages;
191}
192#endif /* CONFIG_NUMA */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
new file mode 100644
index 000000000000..ce6a722587d8
--- /dev/null
+++ b/arch/x86/mm/init.c
@@ -0,0 +1,49 @@
1#include <linux/swap.h>
2#include <asm/cacheflush.h>
3#include <asm/page.h>
4#include <asm/sections.h>
5#include <asm/system.h>
6
7void free_init_pages(char *what, unsigned long begin, unsigned long end)
8{
9 unsigned long addr = begin;
10
11 if (addr >= end)
12 return;
13
14 /*
15 * If debugging page accesses then do not free this memory but
16 * mark them not present - any buggy init-section access will
17 * create a kernel page fault:
18 */
19#ifdef CONFIG_DEBUG_PAGEALLOC
20 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
21 begin, PAGE_ALIGN(end));
22 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
23#else
24 /*
25 * We just marked the kernel text read only above, now that
26 * we are going to free part of that, we need to make that
27 * writeable first.
28 */
29 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
30
31 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
32
33 for (; addr < end; addr += PAGE_SIZE) {
34 ClearPageReserved(virt_to_page(addr));
35 init_page_count(virt_to_page(addr));
36 memset((void *)(addr & ~(PAGE_SIZE-1)),
37 POISON_FREE_INITMEM, PAGE_SIZE);
38 free_page(addr);
39 totalram_pages++;
40 }
41#endif
42}
43
44void free_initmem(void)
45{
46 free_init_pages("unused kernel memory",
47 (unsigned long)(&__init_begin),
48 (unsigned long)(&__init_end));
49}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ef0bb941cdf5..47df0e1bbeb9 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -50,8 +50,6 @@
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52 52
53unsigned int __VMALLOC_RESERVE = 128 << 20;
54
55unsigned long max_low_pfn_mapped; 53unsigned long max_low_pfn_mapped;
56unsigned long max_pfn_mapped; 54unsigned long max_pfn_mapped;
57 55
@@ -486,22 +484,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
486 work_with_active_regions(nid, add_highpages_work_fn, &data); 484 work_with_active_regions(nid, add_highpages_work_fn, &data);
487} 485}
488 486
489#ifndef CONFIG_NUMA
490static void __init set_highmem_pages_init(void)
491{
492 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
493
494 totalram_pages += totalhigh_pages;
495}
496#endif /* !CONFIG_NUMA */
497
498#else 487#else
499static inline void permanent_kmaps_init(pgd_t *pgd_base) 488static inline void permanent_kmaps_init(pgd_t *pgd_base)
500{ 489{
501} 490}
502static inline void set_highmem_pages_init(void)
503{
504}
505#endif /* CONFIG_HIGHMEM */ 491#endif /* CONFIG_HIGHMEM */
506 492
507void __init native_pagetable_setup_start(pgd_t *base) 493void __init native_pagetable_setup_start(pgd_t *base)
@@ -864,10 +850,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
864 unsigned long puds, pmds, ptes, tables, start; 850 unsigned long puds, pmds, ptes, tables, start;
865 851
866 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 852 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
867 tables = PAGE_ALIGN(puds * sizeof(pud_t)); 853 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
868 854
869 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 855 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
870 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 856 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
871 857
872 if (use_pse) { 858 if (use_pse) {
873 unsigned long extra; 859 unsigned long extra;
@@ -878,10 +864,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
878 } else 864 } else
879 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 865 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
880 866
881 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 867 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
882 868
883 /* for fixmap */ 869 /* for fixmap */
884 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); 870 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
885 871
886 /* 872 /*
887 * RED-PEN putting page tables only on node 0 could 873 * RED-PEN putting page tables only on node 0 could
@@ -1231,45 +1217,6 @@ void mark_rodata_ro(void)
1231} 1217}
1232#endif 1218#endif
1233 1219
1234void free_init_pages(char *what, unsigned long begin, unsigned long end)
1235{
1236#ifdef CONFIG_DEBUG_PAGEALLOC
1237 /*
1238 * If debugging page accesses then do not free this memory but
1239 * mark them not present - any buggy init-section access will
1240 * create a kernel page fault:
1241 */
1242 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1243 begin, PAGE_ALIGN(end));
1244 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1245#else
1246 unsigned long addr;
1247
1248 /*
1249 * We just marked the kernel text read only above, now that
1250 * we are going to free part of that, we need to make that
1251 * writeable first.
1252 */
1253 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1254
1255 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1256 ClearPageReserved(virt_to_page(addr));
1257 init_page_count(virt_to_page(addr));
1258 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1259 free_page(addr);
1260 totalram_pages++;
1261 }
1262 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1263#endif
1264}
1265
1266void free_initmem(void)
1267{
1268 free_init_pages("unused kernel memory",
1269 (unsigned long)(&__init_begin),
1270 (unsigned long)(&__init_end));
1271}
1272
1273#ifdef CONFIG_BLK_DEV_INITRD 1220#ifdef CONFIG_BLK_DEV_INITRD
1274void free_initrd_mem(unsigned long start, unsigned long end) 1221void free_initrd_mem(unsigned long start, unsigned long end)
1275{ 1222{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7d4e76da3368..11981fc8570a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -748,6 +748,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
748 pos = start_pfn << PAGE_SHIFT; 748 pos = start_pfn << PAGE_SHIFT;
749 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) 749 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
750 << (PMD_SHIFT - PAGE_SHIFT); 750 << (PMD_SHIFT - PAGE_SHIFT);
751 if (end_pfn > (end >> PAGE_SHIFT))
752 end_pfn = end >> PAGE_SHIFT;
751 if (start_pfn < end_pfn) { 753 if (start_pfn < end_pfn) {
752 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 754 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
753 pos = end_pfn << PAGE_SHIFT; 755 pos = end_pfn << PAGE_SHIFT;
@@ -979,43 +981,6 @@ void __init mem_init(void)
979 initsize >> 10); 981 initsize >> 10);
980} 982}
981 983
982void free_init_pages(char *what, unsigned long begin, unsigned long end)
983{
984 unsigned long addr = begin;
985
986 if (addr >= end)
987 return;
988
989 /*
990 * If debugging page accesses then do not free this memory but
991 * mark them not present - any buggy init-section access will
992 * create a kernel page fault:
993 */
994#ifdef CONFIG_DEBUG_PAGEALLOC
995 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
996 begin, PAGE_ALIGN(end));
997 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
998#else
999 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1000
1001 for (; addr < end; addr += PAGE_SIZE) {
1002 ClearPageReserved(virt_to_page(addr));
1003 init_page_count(virt_to_page(addr));
1004 memset((void *)(addr & ~(PAGE_SIZE-1)),
1005 POISON_FREE_INITMEM, PAGE_SIZE);
1006 free_page(addr);
1007 totalram_pages++;
1008 }
1009#endif
1010}
1011
1012void free_initmem(void)
1013{
1014 free_init_pages("unused kernel memory",
1015 (unsigned long)(&__init_begin),
1016 (unsigned long)(&__init_end));
1017}
1018
1019#ifdef CONFIG_DEBUG_RODATA 984#ifdef CONFIG_DEBUG_RODATA
1020const int rodata_test_data = 0xC3; 985const int rodata_test_data = 0xC3;
1021EXPORT_SYMBOL_GPL(rodata_test_data); 986EXPORT_SYMBOL_GPL(rodata_test_data);
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ca53224fc56c..04102d42ff42 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -20,6 +20,17 @@
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23int is_io_mapping_possible(resource_size_t base, unsigned long size)
24{
25#ifndef CONFIG_X86_PAE
26 /* There is no way to map greater than 1 << 32 address without PAE */
27 if (base + size > 0x100000000ULL)
28 return 0;
29#endif
30 return 1;
31}
32EXPORT_SYMBOL_GPL(is_io_mapping_possible);
33
23/* Map 'pfn' using fixed map 'type' and protections 'prot' 34/* Map 'pfn' using fixed map 'type' and protections 'prot'
24 */ 35 */
25void * 36void *
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 9cab18b0b857..0bcd7883d036 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,44 +9,44 @@
9 9
10#include <asm/e820.h> 10#include <asm/e820.h>
11 11
12static void __init memtest(unsigned long start_phys, unsigned long size, 12static u64 patterns[] __initdata = {
13 unsigned pattern) 13 0,
14 0xffffffffffffffffULL,
15 0x5555555555555555ULL,
16 0xaaaaaaaaaaaaaaaaULL,
17 0x1111111111111111ULL,
18 0x2222222222222222ULL,
19 0x4444444444444444ULL,
20 0x8888888888888888ULL,
21 0x3333333333333333ULL,
22 0x6666666666666666ULL,
23 0x9999999999999999ULL,
24 0xccccccccccccccccULL,
25 0x7777777777777777ULL,
26 0xbbbbbbbbbbbbbbbbULL,
27 0xddddddddddddddddULL,
28 0xeeeeeeeeeeeeeeeeULL,
29 0x7a6c7258554e494cULL, /* yeah ;-) */
30};
31
32static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
14{ 33{
15 unsigned long i; 34 printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
16 unsigned long *start; 35 (unsigned long long) pattern,
17 unsigned long start_bad; 36 (unsigned long long) start_bad,
18 unsigned long last_bad; 37 (unsigned long long) end_bad);
19 unsigned long val; 38 reserve_early(start_bad, end_bad, "BAD RAM");
20 unsigned long start_phys_aligned; 39}
21 unsigned long count;
22 unsigned long incr;
23
24 switch (pattern) {
25 case 0:
26 val = 0UL;
27 break;
28 case 1:
29 val = -1UL;
30 break;
31 case 2:
32#ifdef CONFIG_X86_64
33 val = 0x5555555555555555UL;
34#else
35 val = 0x55555555UL;
36#endif
37 break;
38 case 3:
39#ifdef CONFIG_X86_64
40 val = 0xaaaaaaaaaaaaaaaaUL;
41#else
42 val = 0xaaaaaaaaUL;
43#endif
44 break;
45 default:
46 return;
47 }
48 40
49 incr = sizeof(unsigned long); 41static void __init memtest(u64 pattern, u64 start_phys, u64 size)
42{
43 u64 i, count;
44 u64 *start;
45 u64 start_bad, last_bad;
46 u64 start_phys_aligned;
47 size_t incr;
48
49 incr = sizeof(pattern);
50 start_phys_aligned = ALIGN(start_phys, incr); 50 start_phys_aligned = ALIGN(start_phys, incr);
51 count = (size - (start_phys_aligned - start_phys))/incr; 51 count = (size - (start_phys_aligned - start_phys))/incr;
52 start = __va(start_phys_aligned); 52 start = __va(start_phys_aligned);
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size,
54 last_bad = 0; 54 last_bad = 0;
55 55
56 for (i = 0; i < count; i++) 56 for (i = 0; i < count; i++)
57 start[i] = val; 57 start[i] = pattern;
58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { 58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
59 if (*start != val) { 59 if (*start == pattern)
60 if (start_phys_aligned == last_bad + incr) { 60 continue;
61 last_bad += incr; 61 if (start_phys_aligned == last_bad + incr) {
62 } else { 62 last_bad += incr;
63 if (start_bad) { 63 continue;
64 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
65 val, start_bad, last_bad + incr);
66 reserve_early(start_bad, last_bad + incr, "BAD RAM");
67 }
68 start_bad = last_bad = start_phys_aligned;
69 }
70 } 64 }
65 if (start_bad)
66 reserve_bad_mem(pattern, start_bad, last_bad + incr);
67 start_bad = last_bad = start_phys_aligned;
71 } 68 }
72 if (start_bad) { 69 if (start_bad)
73 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", 70 reserve_bad_mem(pattern, start_bad, last_bad + incr);
74 val, start_bad, last_bad + incr); 71}
75 reserve_early(start_bad, last_bad + incr, "BAD RAM"); 72
73static void __init do_one_pass(u64 pattern, u64 start, u64 end)
74{
75 u64 size = 0;
76
77 while (start < end) {
78 start = find_e820_area_size(start, &size, 1);
79
80 /* done ? */
81 if (start >= end)
82 break;
83 if (start + size > end)
84 size = end - start;
85
86 printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
87 (unsigned long long) start,
88 (unsigned long long) start + size,
89 (unsigned long long) cpu_to_be64(pattern));
90 memtest(pattern, start, size);
91
92 start += size;
76 } 93 }
77} 94}
78 95
@@ -90,33 +107,22 @@ early_param("memtest", parse_memtest);
90 107
91void __init early_memtest(unsigned long start, unsigned long end) 108void __init early_memtest(unsigned long start, unsigned long end)
92{ 109{
93 u64 t_start, t_size; 110 unsigned int i;
94 unsigned pattern; 111 unsigned int idx = 0;
95 112
96 if (!memtest_pattern) 113 if (!memtest_pattern)
97 return; 114 return;
98 115
99 printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); 116 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
100 for (pattern = 0; pattern < memtest_pattern; pattern++) { 117 for (i = 0; i < memtest_pattern; i++) {
101 t_start = start; 118 idx = i % ARRAY_SIZE(patterns);
102 t_size = 0; 119 do_one_pass(patterns[idx], start, end);
103 while (t_start < end) { 120 }
104 t_start = find_e820_area_size(t_start, &t_size, 1);
105
106 /* done ? */
107 if (t_start >= end)
108 break;
109 if (t_start + t_size > end)
110 t_size = end - t_start;
111
112 printk(KERN_CONT "\n %010llx - %010llx pattern %d",
113 (unsigned long long)t_start,
114 (unsigned long long)t_start + t_size, pattern);
115
116 memtest(t_start, t_size, pattern);
117 121
118 t_start += t_size; 122 if (idx > 0) {
119 } 123 printk(KERN_INFO "early_memtest: wipe out "
124 "test pattern from memory\n");
125 /* additional test with pattern 0 will do this */
126 do_one_pass(0, start, end);
120 } 127 }
121 printk(KERN_CONT "\n");
122} 128}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3957cd6d6454..451fe95a0352 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -423,32 +423,6 @@ void __init initmem_init(unsigned long start_pfn,
423 setup_bootmem_allocator(); 423 setup_bootmem_allocator();
424} 424}
425 425
426void __init set_highmem_pages_init(void)
427{
428#ifdef CONFIG_HIGHMEM
429 struct zone *zone;
430 int nid;
431
432 for_each_zone(zone) {
433 unsigned long zone_start_pfn, zone_end_pfn;
434
435 if (!is_highmem(zone))
436 continue;
437
438 zone_start_pfn = zone->zone_start_pfn;
439 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
440
441 nid = zone_to_nid(zone);
442 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
443 zone->name, nid, zone_start_pfn, zone_end_pfn);
444
445 add_highpages_with_active_regions(nid, zone_start_pfn,
446 zone_end_pfn);
447 }
448 totalram_pages += totalhigh_pages;
449#endif
450}
451
452#ifdef CONFIG_MEMORY_HOTPLUG 426#ifdef CONFIG_MEMORY_HOTPLUG
453static int paddr_to_nid(u64 addr) 427static int paddr_to_nid(u64 addr)
454{ 428{
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 05f9aef6818a..2ed37158012d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,6 +11,7 @@
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h>
14#include <linux/gfp.h> 15#include <linux/gfp.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
@@ -634,6 +635,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
634} 635}
635 636
636/* 637/*
638 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map.
640 */
641int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
642{
643 unsigned long id_sz;
644
645 if (!pat_enabled || base >= __pa(high_memory))
646 return 0;
647
648 id_sz = (__pa(high_memory) < base + size) ?
649 __pa(high_memory) - base :
650 size;
651
652 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
653 printk(KERN_INFO
654 "%s:%d ioremap_change_attr failed %s "
655 "for %Lx-%Lx\n",
656 current->comm, current->pid,
657 cattr_name(flags),
658 base, (unsigned long long)(base + size));
659 return -EINVAL;
660 }
661 return 0;
662}
663
664/*
637 * Internal interface to reserve a range of physical memory with prot. 665 * Internal interface to reserve a range of physical memory with prot.
638 * Reserved non RAM regions only and after successful reserve_memtype, 666 * Reserved non RAM regions only and after successful reserve_memtype,
639 * this func also keeps identity mapping (if any) in sync with this new prot. 667 * this func also keeps identity mapping (if any) in sync with this new prot.
@@ -642,7 +670,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
642 int strict_prot) 670 int strict_prot)
643{ 671{
644 int is_ram = 0; 672 int is_ram = 0;
645 int id_sz, ret; 673 int ret;
646 unsigned long flags; 674 unsigned long flags;
647 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
648 676
@@ -679,23 +707,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
679 flags); 707 flags);
680 } 708 }
681 709
682 /* Need to keep identity mapping in sync */ 710 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
683 if (paddr >= __pa(high_memory))
684 return 0;
685
686 id_sz = (__pa(high_memory) < paddr + size) ?
687 __pa(high_memory) - paddr :
688 size;
689
690 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
691 free_memtype(paddr, paddr + size); 711 free_memtype(paddr, paddr + size);
692 printk(KERN_ERR
693 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
694 "for %Lx-%Lx\n",
695 current->comm, current->pid,
696 cattr_name(flags),
697 (unsigned long long)paddr,
698 (unsigned long long)(paddr + size));
699 return -EINVAL; 712 return -EINVAL;
700 } 713 }
701 return 0; 714 return 0;
@@ -877,6 +890,7 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
877 else 890 else
878 return pgprot_noncached(prot); 891 return pgprot_noncached(prot);
879} 892}
893EXPORT_SYMBOL_GPL(pgprot_writecombine);
880 894
881#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 895#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
882 896
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 86f2ffc43c3d..5b7c7c8464fe 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -313,6 +313,24 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
313 return young; 313 return young;
314} 314}
315 315
316/**
317 * reserve_top_address - reserves a hole in the top of kernel address space
318 * @reserve - size of hole to reserve
319 *
320 * Can be used to relocate the fixmap area and poke a hole in the top
321 * of kernel address space to make room for a hypervisor.
322 */
323void __init reserve_top_address(unsigned long reserve)
324{
325#ifdef CONFIG_X86_32
326 BUG_ON(fixmaps_set > 0);
327 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
328 (int)-reserve);
329 __FIXADDR_TOP = -reserve - PAGE_SIZE;
330 __VMALLOC_RESERVE += reserve;
331#endif
332}
333
316int fixmaps_set; 334int fixmaps_set;
317 335
318void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 336void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 0951db9ee519..f2e477c91c1b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -20,6 +20,8 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23unsigned int __VMALLOC_RESERVE = 128 << 20;
24
23/* 25/*
24 * Associate a virtual page frame with a given physical page frame 26 * Associate a virtual page frame with a given physical page frame
25 * and protection flags for that frame. 27 * and protection flags for that frame.
@@ -97,22 +99,6 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97unsigned long __FIXADDR_TOP = 0xfffff000; 99unsigned long __FIXADDR_TOP = 0xfffff000;
98EXPORT_SYMBOL(__FIXADDR_TOP); 100EXPORT_SYMBOL(__FIXADDR_TOP);
99 101
100/**
101 * reserve_top_address - reserves a hole in the top of kernel address space
102 * @reserve - size of hole to reserve
103 *
104 * Can be used to relocate the fixmap area and poke a hole in the top
105 * of kernel address space to make room for a hypervisor.
106 */
107void __init reserve_top_address(unsigned long reserve)
108{
109 BUG_ON(fixmaps_set > 0);
110 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
111 (int)-reserve);
112 __FIXADDR_TOP = -reserve - PAGE_SIZE;
113 __VMALLOC_RESERVE += reserve;
114}
115
116/* 102/*
117 * vmalloc=size forces the vmalloc area to be exactly 'size' 103 * vmalloc=size forces the vmalloc area to be exactly 'size'
118 * bytes. This can be used to increase (or decrease) the 104 * bytes. This can be used to increase (or decrease) the
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index e9f80c744cf3..10131fbdaada 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -78,8 +78,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
78 if (cpu_has_arch_perfmon) { 78 if (cpu_has_arch_perfmon) {
79 union cpuid10_eax eax; 79 union cpuid10_eax eax;
80 eax.full = cpuid_eax(0xa); 80 eax.full = cpuid_eax(0xa);
81 if (counter_width < eax.split.bit_width) 81
82 counter_width = eax.split.bit_width; 82 /*
83 * For Core2 (family 6, model 15), don't reset the
84 * counter width:
85 */
86 if (!(eax.split.version_id == 0 &&
87 current_cpu_data.x86 == 6 &&
88 current_cpu_data.x86_model == 15)) {
89
90 if (counter_width < eax.split.bit_width)
91 counter_width = eax.split.bit_width;
92 }
83 } 93 }
84 94
85 /* clear all counters */ 95 /* clear all counters */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 352ea6830659..82cd39a6cbd3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -942,6 +942,9 @@ asmlinkage void __init xen_start_kernel(void)
942 possible map and a non-dummy shared_info. */ 942 possible map and a non-dummy shared_info. */
943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
944 944
945 local_irq_disable();
946 early_boot_irqs_off();
947
945 xen_raw_console_write("mapping kernel into physical memory\n"); 948 xen_raw_console_write("mapping kernel into physical memory\n");
946 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 949 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
947 950
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b92f5b0866b0..a104593e70c3 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
38 } 38 }
39} 39}
40 40
41void blk_recalc_rq_segments(struct request *rq) 41static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio,
43 unsigned int *seg_size_ptr)
42{ 44{
43 int nr_phys_segs;
44 unsigned int phys_size; 45 unsigned int phys_size;
45 struct bio_vec *bv, *bvprv = NULL; 46 struct bio_vec *bv, *bvprv = NULL;
46 int seg_size; 47 int cluster, i, high, highprv = 1;
47 int cluster; 48 unsigned int seg_size, nr_phys_segs;
48 struct req_iterator iter; 49 struct bio *fbio;
49 int high, highprv = 1;
50 struct request_queue *q = rq->q;
51 50
52 if (!rq->bio) 51 if (!bio)
53 return; 52 return 0;
54 53
54 fbio = bio;
55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
56 seg_size = 0; 56 seg_size = 0;
57 phys_size = nr_phys_segs = 0; 57 phys_size = nr_phys_segs = 0;
58 rq_for_each_segment(bv, rq, iter) { 58 for_each_bio(bio) {
59 /* 59 bio_for_each_segment(bv, bio, i) {
60 * the trick here is making sure that a high page is never 60 /*
61 * considered part of another segment, since that might 61 * the trick here is making sure that a high page is
62 * change with the bounce page. 62 * never considered part of another segment, since that
63 */ 63 * might change with the bounce page.
64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 64 */
65 if (high || highprv) 65 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
66 goto new_segment; 66 if (high || highprv)
67 if (cluster) {
68 if (seg_size + bv->bv_len > q->max_segment_size)
69 goto new_segment;
70 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
71 goto new_segment;
72 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
73 goto new_segment; 67 goto new_segment;
68 if (cluster) {
69 if (seg_size + bv->bv_len > q->max_segment_size)
70 goto new_segment;
71 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
72 goto new_segment;
73 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
74 goto new_segment;
75
76 seg_size += bv->bv_len;
77 bvprv = bv;
78 continue;
79 }
80new_segment:
81 if (nr_phys_segs == 1 && seg_size >
82 fbio->bi_seg_front_size)
83 fbio->bi_seg_front_size = seg_size;
74 84
75 seg_size += bv->bv_len; 85 nr_phys_segs++;
76 bvprv = bv; 86 bvprv = bv;
77 continue; 87 seg_size = bv->bv_len;
88 highprv = high;
78 } 89 }
79new_segment:
80 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
81 rq->bio->bi_seg_front_size = seg_size;
82
83 nr_phys_segs++;
84 bvprv = bv;
85 seg_size = bv->bv_len;
86 highprv = high;
87 } 90 }
88 91
89 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) 92 if (seg_size_ptr)
93 *seg_size_ptr = seg_size;
94
95 return nr_phys_segs;
96}
97
98void blk_recalc_rq_segments(struct request *rq)
99{
100 unsigned int seg_size = 0, phys_segs;
101
102 phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
103
104 if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
90 rq->bio->bi_seg_front_size = seg_size; 105 rq->bio->bi_seg_front_size = seg_size;
91 if (seg_size > rq->biotail->bi_seg_back_size) 106 if (seg_size > rq->biotail->bi_seg_back_size)
92 rq->biotail->bi_seg_back_size = seg_size; 107 rq->biotail->bi_seg_back_size = seg_size;
93 108
94 rq->nr_phys_segments = nr_phys_segs; 109 rq->nr_phys_segments = phys_segs;
95} 110}
96 111
97void blk_recount_segments(struct request_queue *q, struct bio *bio) 112void blk_recount_segments(struct request_queue *q, struct bio *bio)
98{ 113{
99 struct request rq;
100 struct bio *nxt = bio->bi_next; 114 struct bio *nxt = bio->bi_next;
101 rq.q = q; 115
102 rq.bio = rq.biotail = bio;
103 bio->bi_next = NULL; 116 bio->bi_next = NULL;
104 blk_recalc_rq_segments(&rq); 117 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
105 bio->bi_next = nxt; 118 bio->bi_next = nxt;
106 bio->bi_phys_segments = rq.nr_phys_segments;
107 bio->bi_flags |= (1 << BIO_SEG_VALID); 119 bio->bi_flags |= (1 << BIO_SEG_VALID);
108} 120}
109EXPORT_SYMBOL(blk_recount_segments); 121EXPORT_SYMBOL(blk_recount_segments);
diff --git a/block/genhd.c b/block/genhd.c
index e1eadcc9546a..a9ec910974c1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
256} 256}
257#endif /* CONFIG_PROC_FS */ 257#endif /* CONFIG_PROC_FS */
258 258
259/**
260 * register_blkdev - register a new block device
261 *
262 * @major: the requested major device number [1..255]. If @major=0, try to
263 * allocate any unused major number.
264 * @name: the name of the new block device as a zero terminated string
265 *
266 * The @name must be unique within the system.
267 *
268 * The return value depends on the @major input parameter.
269 * - if a major device number was requested in range [1..255] then the
270 * function returns zero on success, or a negative error code
271 * - if any unused major number was requested with @major=0 parameter
272 * then the return value is the allocated major number in range
273 * [1..255] or a negative error code otherwise
274 */
259int register_blkdev(unsigned int major, const char *name) 275int register_blkdev(unsigned int major, const char *name)
260{ 276{
261 struct blk_major_name **n, *p; 277 struct blk_major_name **n, *p;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ba5292d69ebd..b2d1ee32cfe8 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -214,7 +214,7 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
214 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 214 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
215 "yes" : "no"); 215 "yes" : "no");
216 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 216 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
217 seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); 217 seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize);
218} 218}
219 219
220const struct crypto_type crypto_ahash_type = { 220const struct crypto_type crypto_ahash_type = {
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 63719ab9ea44..115b1cd6dcf5 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -24,7 +24,7 @@
24#include <linux/libata.h> 24#include <linux/libata.h>
25 25
26#define DRV_NAME "pata_amd" 26#define DRV_NAME "pata_amd"
27#define DRV_VERSION "0.3.11" 27#define DRV_VERSION "0.4.1"
28 28
29/** 29/**
30 * timing_setup - shared timing computation and load 30 * timing_setup - shared timing computation and load
@@ -145,6 +145,13 @@ static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
145 return ata_sff_prereset(link, deadline); 145 return ata_sff_prereset(link, deadline);
146} 146}
147 147
148/**
149 * amd_cable_detect - report cable type
150 * @ap: port
151 *
152 * AMD controller/BIOS setups record the cable type in word 0x42
153 */
154
148static int amd_cable_detect(struct ata_port *ap) 155static int amd_cable_detect(struct ata_port *ap)
149{ 156{
150 static const u32 bitmask[2] = {0x03, 0x0C}; 157 static const u32 bitmask[2] = {0x03, 0x0C};
@@ -158,6 +165,40 @@ static int amd_cable_detect(struct ata_port *ap)
158} 165}
159 166
160/** 167/**
168 * amd_fifo_setup - set the PIO FIFO for ATA/ATAPI
169 * @ap: ATA interface
170 * @adev: ATA device
171 *
172 * Set the PCI fifo for this device according to the devices present
173 * on the bus at this point in time. We need to turn the post write buffer
174 * off for ATAPI devices as we may need to issue a word sized write to the
175 * device as the final I/O
176 */
177
178static void amd_fifo_setup(struct ata_port *ap)
179{
180 struct ata_device *adev;
181 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
182 static const u8 fifobit[2] = { 0xC0, 0x30};
183 u8 fifo = fifobit[ap->port_no];
184 u8 r;
185
186
187 ata_for_each_dev(adev, &ap->link, ENABLED) {
188 if (adev->class == ATA_DEV_ATAPI)
189 fifo = 0;
190 }
191 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */
192 fifo = 0;
193
194 /* On the later chips the read prefetch bits become no-op bits */
195 pci_read_config_byte(pdev, 0x41, &r);
196 r &= ~fifobit[ap->port_no];
197 r |= fifo;
198 pci_write_config_byte(pdev, 0x41, r);
199}
200
201/**
161 * amd33_set_piomode - set initial PIO mode data 202 * amd33_set_piomode - set initial PIO mode data
162 * @ap: ATA interface 203 * @ap: ATA interface
163 * @adev: ATA device 204 * @adev: ATA device
@@ -167,21 +208,25 @@ static int amd_cable_detect(struct ata_port *ap)
167 208
168static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev) 209static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
169{ 210{
211 amd_fifo_setup(ap);
170 timing_setup(ap, adev, 0x40, adev->pio_mode, 1); 212 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
171} 213}
172 214
173static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev) 215static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
174{ 216{
217 amd_fifo_setup(ap);
175 timing_setup(ap, adev, 0x40, adev->pio_mode, 2); 218 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
176} 219}
177 220
178static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev) 221static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
179{ 222{
223 amd_fifo_setup(ap);
180 timing_setup(ap, adev, 0x40, adev->pio_mode, 3); 224 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
181} 225}
182 226
183static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev) 227static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
184{ 228{
229 amd_fifo_setup(ap);
185 timing_setup(ap, adev, 0x40, adev->pio_mode, 4); 230 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
186} 231}
187 232
@@ -397,6 +442,16 @@ static struct ata_port_operations nv133_port_ops = {
397 .set_dmamode = nv133_set_dmamode, 442 .set_dmamode = nv133_set_dmamode,
398}; 443};
399 444
445static void amd_clear_fifo(struct pci_dev *pdev)
446{
447 u8 fifo;
448 /* Disable the FIFO, the FIFO logic will re-enable it as
449 appropriate */
450 pci_read_config_byte(pdev, 0x41, &fifo);
451 fifo &= 0x0F;
452 pci_write_config_byte(pdev, 0x41, fifo);
453}
454
400static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 455static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
401{ 456{
402 static const struct ata_port_info info[10] = { 457 static const struct ata_port_info info[10] = {
@@ -503,14 +558,8 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
503 558
504 if (type < 3) 559 if (type < 3)
505 ata_pci_bmdma_clear_simplex(pdev); 560 ata_pci_bmdma_clear_simplex(pdev);
506 561 if (pdev->vendor == PCI_VENDOR_ID_AMD)
507 /* Check for AMD7411 */ 562 amd_clear_fifo(pdev);
508 if (type == 3)
509 /* FIFO is broken */
510 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
511 else
512 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
513
514 /* Cable detection on Nvidia chips doesn't work too well, 563 /* Cable detection on Nvidia chips doesn't work too well,
515 * cache BIOS programmed UDMA mode. 564 * cache BIOS programmed UDMA mode.
516 */ 565 */
@@ -536,18 +585,11 @@ static int amd_reinit_one(struct pci_dev *pdev)
536 return rc; 585 return rc;
537 586
538 if (pdev->vendor == PCI_VENDOR_ID_AMD) { 587 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
539 u8 fifo; 588 amd_clear_fifo(pdev);
540 pci_read_config_byte(pdev, 0x41, &fifo);
541 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
542 /* FIFO is broken */
543 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
544 else
545 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
546 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 || 589 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
547 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401) 590 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
548 ata_pci_bmdma_clear_simplex(pdev); 591 ata_pci_bmdma_clear_simplex(pdev);
549 } 592 }
550
551 ata_host_resume(host); 593 ata_host_resume(host);
552 return 0; 594 return 0;
553} 595}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index f1bb2f9fecbf..b05b86a912c5 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -557,6 +557,9 @@ static unsigned int it821x_read_id(struct ata_device *adev,
557 id[83] |= 0x4400; /* Word 83 is valid and LBA48 */ 557 id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
558 id[86] |= 0x0400; /* LBA48 on */ 558 id[86] |= 0x0400; /* LBA48 on */
559 id[ATA_ID_MAJOR_VER] |= 0x1F; 559 id[ATA_ID_MAJOR_VER] |= 0x1F;
560 /* Clear the serial number because it's different each boot
561 which breaks validation on resume */
562 memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN);
560 } 563 }
561 return err_mask; 564 return err_mask;
562} 565}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 6c1d778b63a9..e3bc1b436284 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -283,9 +283,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
283static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, 283static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
284 unsigned char *buf, unsigned int buflen, int rw) 284 unsigned char *buf, unsigned int buflen, int rw)
285{ 285{
286 if (ata_id_has_dword_io(dev->id)) { 286 int slop = buflen & 3;
287 /* 32bit I/O capable *and* we need to write a whole number of dwords */
288 if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)) {
287 struct ata_port *ap = dev->link->ap; 289 struct ata_port *ap = dev->link->ap;
288 int slop = buflen & 3;
289 unsigned long flags; 290 unsigned long flags;
290 291
291 local_irq_save(flags); 292 local_irq_save(flags);
@@ -735,7 +736,7 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
735 struct ata_port *ap = adev->link->ap; 736 struct ata_port *ap = adev->link->ap;
736 int slop = buflen & 3; 737 int slop = buflen & 3;
737 738
738 if (ata_id_has_dword_io(adev->id)) { 739 if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)) {
739 if (rw == WRITE) 740 if (rw == WRITE)
740 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 741 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
741 else 742 else
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 4ae1a4138b47..7007edd2d451 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3114,19 +3114,17 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3114 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3114 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3115 } 3115 }
3116 3116
3117 if (!IS_SOC(hpriv)) { 3117 /* Clear any currently outstanding host interrupt conditions */
3118 /* Clear any currently outstanding host interrupt conditions */ 3118 writelfl(0, mmio + hpriv->irq_cause_ofs);
3119 writelfl(0, mmio + hpriv->irq_cause_ofs);
3120 3119
3121 /* and unmask interrupt generation for host regs */ 3120 /* and unmask interrupt generation for host regs */
3122 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3121 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3123 3122
3124 /* 3123 /*
3125 * enable only global host interrupts for now. 3124 * enable only global host interrupts for now.
3126 * The per-port interrupts get done later as ports are set up. 3125 * The per-port interrupts get done later as ports are set up.
3127 */ 3126 */
3128 mv_set_main_irq_mask(host, 0, PCI_ERR); 3127 mv_set_main_irq_mask(host, 0, PCI_ERR);
3129 }
3130done: 3128done:
3131 return rc; 3129 return rc;
3132} 3130}
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 144a49f15220..8733a2ea04c2 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -901,7 +901,7 @@ static int __devinit eeprom_read(struct lanai_dev *lanai)
901 clock_l(); udelay(5); 901 clock_l(); udelay(5);
902 for (i = 128; i != 0; i >>= 1) { /* write command out */ 902 for (i = 128; i != 0; i >>= 1) { /* write command out */
903 tmp = (lanai->conf1 & ~CONFIG1_PROMDATA) | 903 tmp = (lanai->conf1 & ~CONFIG1_PROMDATA) |
904 (data & i) ? CONFIG1_PROMDATA : 0; 904 ((data & i) ? CONFIG1_PROMDATA : 0);
905 if (lanai->conf1 != tmp) { 905 if (lanai->conf1 != tmp) {
906 set_config1(tmp); 906 set_config1(tmp);
907 udelay(5); /* Let new data settle */ 907 udelay(5); /* Let new data settle */
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2cb67b61176..b5a061114630 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3611 schedule_timeout_uninterruptible(30*HZ); 3611 schedule_timeout_uninterruptible(30*HZ);
3612 3612
3613 /* Now try to get the controller to respond to a no-op */ 3613 /* Now try to get the controller to respond to a no-op */
3614 for (i=0; i<12; i++) { 3614 for (i=0; i<30; i++) {
3615 if (cciss_noop(pdev) == 0) 3615 if (cciss_noop(pdev) == 0)
3616 break; 3616 break;
3617 else 3617
3618 printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : "")); 3618 schedule_timeout_uninterruptible(HZ);
3619 }
3620 if (i == 30) {
3621 printk(KERN_ERR "cciss: controller seems dead\n");
3622 return -EBUSY;
3619 } 3623 }
3620 } 3624 }
3621 3625
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 918ef725de41..b6c8ce254359 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -40,6 +40,7 @@
40#include <linux/hdreg.h> 40#include <linux/hdreg.h>
41#include <linux/cdrom.h> 41#include <linux/cdrom.h>
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/scatterlist.h>
43 44
44#include <xen/xenbus.h> 45#include <xen/xenbus.h>
45#include <xen/grant_table.h> 46#include <xen/grant_table.h>
@@ -82,6 +83,7 @@ struct blkfront_info
82 enum blkif_state connected; 83 enum blkif_state connected;
83 int ring_ref; 84 int ring_ref;
84 struct blkif_front_ring ring; 85 struct blkif_front_ring ring;
86 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
85 unsigned int evtchn, irq; 87 unsigned int evtchn, irq;
86 struct request_queue *rq; 88 struct request_queue *rq;
87 struct work_struct work; 89 struct work_struct work;
@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
204 struct blkfront_info *info = req->rq_disk->private_data; 206 struct blkfront_info *info = req->rq_disk->private_data;
205 unsigned long buffer_mfn; 207 unsigned long buffer_mfn;
206 struct blkif_request *ring_req; 208 struct blkif_request *ring_req;
207 struct req_iterator iter;
208 struct bio_vec *bvec;
209 unsigned long id; 209 unsigned long id;
210 unsigned int fsect, lsect; 210 unsigned int fsect, lsect;
211 int ref; 211 int i, ref;
212 grant_ref_t gref_head; 212 grant_ref_t gref_head;
213 struct scatterlist *sg;
213 214
214 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 215 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
215 return 1; 216 return 1;
@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
238 if (blk_barrier_rq(req)) 239 if (blk_barrier_rq(req))
239 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 240 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
240 241
241 ring_req->nr_segments = 0; 242 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
242 rq_for_each_segment(bvec, req, iter) { 243 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
243 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST); 244
244 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); 245 for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
245 fsect = bvec->bv_offset >> 9; 246 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
246 lsect = fsect + (bvec->bv_len >> 9) - 1; 247 fsect = sg->offset >> 9;
248 lsect = fsect + (sg->length >> 9) - 1;
247 /* install a grant reference. */ 249 /* install a grant reference. */
248 ref = gnttab_claim_grant_reference(&gref_head); 250 ref = gnttab_claim_grant_reference(&gref_head);
249 BUG_ON(ref == -ENOSPC); 251 BUG_ON(ref == -ENOSPC);
@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
254 buffer_mfn, 256 buffer_mfn,
255 rq_data_dir(req) ); 257 rq_data_dir(req) );
256 258
257 info->shadow[id].frame[ring_req->nr_segments] = 259 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
258 mfn_to_pfn(buffer_mfn); 260 ring_req->seg[i] =
259
260 ring_req->seg[ring_req->nr_segments] =
261 (struct blkif_request_segment) { 261 (struct blkif_request_segment) {
262 .gref = ref, 262 .gref = ref,
263 .first_sect = fsect, 263 .first_sect = fsect,
264 .last_sect = lsect }; 264 .last_sect = lsect };
265
266 ring_req->nr_segments++;
267 } 265 }
268 266
269 info->ring.req_prod_pvt++; 267 info->ring.req_prod_pvt++;
@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
622 SHARED_RING_INIT(sring); 620 SHARED_RING_INIT(sring);
623 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 621 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
624 622
623 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
624
625 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 625 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
626 if (err < 0) { 626 if (err < 0) {
627 free_page((unsigned long)sring); 627 free_page((unsigned long)sring);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 72c667f9bee1..12715d3c078d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -420,7 +420,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
420 dev->sigdata.lock = NULL; 420 dev->sigdata.lock = NULL;
421 master->lock.hw_lock = NULL; /* SHM removed */ 421 master->lock.hw_lock = NULL; /* SHM removed */
422 master->lock.file_priv = NULL; 422 master->lock.file_priv = NULL;
423 wake_up_interruptible(&master->lock.lock_queue); 423 wake_up_interruptible_all(&master->lock.lock_queue);
424 } 424 }
425 break; 425 break;
426 case _DRM_AGP: 426 case _DRM_AGP:
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 733028b4d45e..1c3a8c557140 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -452,6 +452,59 @@ static void drm_setup_crtcs(struct drm_device *dev)
452 kfree(modes); 452 kfree(modes);
453 kfree(enabled); 453 kfree(enabled);
454} 454}
455
456/**
457 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
458 * @encoder: encoder to test
459 * @crtc: crtc to test
460 *
461 * Return false if @encoder can't be driven by @crtc, true otherwise.
462 */
463static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
464 struct drm_crtc *crtc)
465{
466 struct drm_device *dev;
467 struct drm_crtc *tmp;
468 int crtc_mask = 1;
469
470 WARN(!crtc, "checking null crtc?");
471
472 dev = crtc->dev;
473
474 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
475 if (tmp == crtc)
476 break;
477 crtc_mask <<= 1;
478 }
479
480 if (encoder->possible_crtcs & crtc_mask)
481 return true;
482 return false;
483}
484
485/*
486 * Check the CRTC we're going to map each output to vs. its current
487 * CRTC. If they don't match, we have to disable the output and the CRTC
488 * since the driver will have to re-route things.
489 */
490static void
491drm_crtc_prepare_encoders(struct drm_device *dev)
492{
493 struct drm_encoder_helper_funcs *encoder_funcs;
494 struct drm_encoder *encoder;
495
496 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
497 encoder_funcs = encoder->helper_private;
498 /* Disable unused encoders */
499 if (encoder->crtc == NULL)
500 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
501 /* Disable encoders whose CRTC is about to change */
502 if (encoder_funcs->get_crtc &&
503 encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
504 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
505 }
506}
507
455/** 508/**
456 * drm_crtc_set_mode - set a mode 509 * drm_crtc_set_mode - set a mode
457 * @crtc: CRTC to program 510 * @crtc: CRTC to program
@@ -547,6 +600,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
547 encoder_funcs->prepare(encoder); 600 encoder_funcs->prepare(encoder);
548 } 601 }
549 602
603 drm_crtc_prepare_encoders(dev);
604
550 crtc_funcs->prepare(crtc); 605 crtc_funcs->prepare(crtc);
551 606
552 /* Set up the DPLL and any encoders state that needs to adjust or depend 607 /* Set up the DPLL and any encoders state that needs to adjust or depend
@@ -617,7 +672,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
617 struct drm_device *dev; 672 struct drm_device *dev;
618 struct drm_crtc **save_crtcs, *new_crtc; 673 struct drm_crtc **save_crtcs, *new_crtc;
619 struct drm_encoder **save_encoders, *new_encoder; 674 struct drm_encoder **save_encoders, *new_encoder;
620 struct drm_framebuffer *old_fb; 675 struct drm_framebuffer *old_fb = NULL;
621 bool save_enabled; 676 bool save_enabled;
622 bool mode_changed = false; 677 bool mode_changed = false;
623 bool fb_changed = false; 678 bool fb_changed = false;
@@ -668,9 +723,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
668 * and then just flip_or_move it */ 723 * and then just flip_or_move it */
669 if (set->crtc->fb != set->fb) { 724 if (set->crtc->fb != set->fb) {
670 /* If we have no fb then treat it as a full mode set */ 725 /* If we have no fb then treat it as a full mode set */
671 if (set->crtc->fb == NULL) 726 if (set->crtc->fb == NULL) {
727 DRM_DEBUG("crtc has no fb, full mode set\n");
672 mode_changed = true; 728 mode_changed = true;
673 else if ((set->fb->bits_per_pixel != 729 } else if ((set->fb->bits_per_pixel !=
674 set->crtc->fb->bits_per_pixel) || 730 set->crtc->fb->bits_per_pixel) ||
675 set->fb->depth != set->crtc->fb->depth) 731 set->fb->depth != set->crtc->fb->depth)
676 fb_changed = true; 732 fb_changed = true;
@@ -682,7 +738,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
682 fb_changed = true; 738 fb_changed = true;
683 739
684 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 740 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
685 DRM_DEBUG("modes are different\n"); 741 DRM_DEBUG("modes are different, full mode set\n");
686 drm_mode_debug_printmodeline(&set->crtc->mode); 742 drm_mode_debug_printmodeline(&set->crtc->mode);
687 drm_mode_debug_printmodeline(set->mode); 743 drm_mode_debug_printmodeline(set->mode);
688 mode_changed = true; 744 mode_changed = true;
@@ -708,6 +764,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
708 } 764 }
709 765
710 if (new_encoder != connector->encoder) { 766 if (new_encoder != connector->encoder) {
767 DRM_DEBUG("encoder changed, full mode switch\n");
711 mode_changed = true; 768 mode_changed = true;
712 connector->encoder = new_encoder; 769 connector->encoder = new_encoder;
713 } 770 }
@@ -734,10 +791,20 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
734 if (set->connectors[ro] == connector) 791 if (set->connectors[ro] == connector)
735 new_crtc = set->crtc; 792 new_crtc = set->crtc;
736 } 793 }
794
795 /* Make sure the new CRTC will work with the encoder */
796 if (new_crtc &&
797 !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
798 ret = -EINVAL;
799 goto fail_set_mode;
800 }
737 if (new_crtc != connector->encoder->crtc) { 801 if (new_crtc != connector->encoder->crtc) {
802 DRM_DEBUG("crtc changed, full mode switch\n");
738 mode_changed = true; 803 mode_changed = true;
739 connector->encoder->crtc = new_crtc; 804 connector->encoder->crtc = new_crtc;
740 } 805 }
806 DRM_DEBUG("setting connector %d crtc to %p\n",
807 connector->base.id, new_crtc);
741 } 808 }
742 809
743 /* mode_set_base is not a required function */ 810 /* mode_set_base is not a required function */
@@ -781,6 +848,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
781 848
782fail_set_mode: 849fail_set_mode:
783 set->crtc->enabled = save_enabled; 850 set->crtc->enabled = save_enabled;
851 set->crtc->fb = old_fb;
784 count = 0; 852 count = 0;
785 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 853 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
786 if (!connector->encoder) 854 if (!connector->encoder)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a4d3244758a..a839a28d8ee6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,7 +125,7 @@ static bool edid_is_valid(struct edid *edid)
125 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); 125 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
126 goto bad; 126 goto bad;
127 } 127 }
128 if (edid->revision <= 0 || edid->revision > 3) { 128 if (edid->revision > 3) {
129 DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); 129 DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
130 goto bad; 130 goto bad;
131 } 131 }
@@ -320,10 +320,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
320 mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); 320 mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
321 321
322 mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; 322 mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
323 mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) | 323 mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) |
324 pt->vsync_offset_lo); 324 pt->vsync_offset_lo);
325 mode->vsync_end = mode->vsync_start + 325 mode->vsync_end = mode->vsync_start +
326 ((pt->vsync_pulse_width_hi << 8) | 326 ((pt->vsync_pulse_width_hi << 4) |
327 pt->vsync_pulse_width_lo); 327 pt->vsync_pulse_width_lo);
328 mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); 328 mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
329 329
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 6c020fe5431c..f52663ebe016 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -484,6 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
484 mutex_lock(&dev->struct_mutex); 484 mutex_lock(&dev->struct_mutex);
485 485
486 if (file_priv->is_master) { 486 if (file_priv->is_master) {
487 struct drm_master *master = file_priv->master;
487 struct drm_file *temp; 488 struct drm_file *temp;
488 list_for_each_entry(temp, &dev->filelist, lhead) { 489 list_for_each_entry(temp, &dev->filelist, lhead) {
489 if ((temp->master == file_priv->master) && 490 if ((temp->master == file_priv->master) &&
@@ -491,6 +492,19 @@ int drm_release(struct inode *inode, struct file *filp)
491 temp->authenticated = 0; 492 temp->authenticated = 0;
492 } 493 }
493 494
495 /**
496 * Since the master is disappearing, so is the
497 * possibility to lock.
498 */
499
500 if (master->lock.hw_lock) {
501 if (dev->sigdata.lock == master->lock.hw_lock)
502 dev->sigdata.lock = NULL;
503 master->lock.hw_lock = NULL;
504 master->lock.file_priv = NULL;
505 wake_up_interruptible_all(&master->lock.lock_queue);
506 }
507
494 if (file_priv->minor->master == file_priv->master) { 508 if (file_priv->minor->master == file_priv->master) {
495 /* drop the reference held my the minor */ 509 /* drop the reference held my the minor */
496 drm_master_put(&file_priv->minor->master); 510 drm_master_put(&file_priv->minor->master);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3795dbc0f50c..93e677a481f5 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -435,6 +435,8 @@ EXPORT_SYMBOL(drm_vblank_get);
435 */ 435 */
436void drm_vblank_put(struct drm_device *dev, int crtc) 436void drm_vblank_put(struct drm_device *dev, int crtc)
437{ 437{
438 BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
439
438 /* Last user schedules interrupt disable */ 440 /* Last user schedules interrupt disable */
439 if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) 441 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
440 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); 442 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
@@ -460,8 +462,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
460 * so that interrupts remain enabled in the interim. 462 * so that interrupts remain enabled in the interim.
461 */ 463 */
462 if (!dev->vblank_inmodeset[crtc]) { 464 if (!dev->vblank_inmodeset[crtc]) {
463 dev->vblank_inmodeset[crtc] = 1; 465 dev->vblank_inmodeset[crtc] = 0x1;
464 drm_vblank_get(dev, crtc); 466 if (drm_vblank_get(dev, crtc) == 0)
467 dev->vblank_inmodeset[crtc] |= 0x2;
465 } 468 }
466} 469}
467EXPORT_SYMBOL(drm_vblank_pre_modeset); 470EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -473,9 +476,12 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
473 if (dev->vblank_inmodeset[crtc]) { 476 if (dev->vblank_inmodeset[crtc]) {
474 spin_lock_irqsave(&dev->vbl_lock, irqflags); 477 spin_lock_irqsave(&dev->vbl_lock, irqflags);
475 dev->vblank_disable_allowed = 1; 478 dev->vblank_disable_allowed = 1;
476 dev->vblank_inmodeset[crtc] = 0;
477 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 479 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
478 drm_vblank_put(dev, crtc); 480
481 if (dev->vblank_inmodeset[crtc] & 0x2)
482 drm_vblank_put(dev, crtc);
483
484 dev->vblank_inmodeset[crtc] = 0;
479 } 485 }
480} 486}
481EXPORT_SYMBOL(drm_vblank_post_modeset); 487EXPORT_SYMBOL(drm_vblank_post_modeset);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 46e7b28f0707..e2f70a516c34 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -80,6 +80,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
80 __set_current_state(TASK_INTERRUPTIBLE); 80 __set_current_state(TASK_INTERRUPTIBLE);
81 if (!master->lock.hw_lock) { 81 if (!master->lock.hw_lock) {
82 /* Device has been unregistered */ 82 /* Device has been unregistered */
83 send_sig(SIGTERM, current, 0);
83 ret = -EINTR; 84 ret = -EINTR;
84 break; 85 break;
85 } 86 }
@@ -93,7 +94,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
93 /* Contention */ 94 /* Contention */
94 schedule(); 95 schedule();
95 if (signal_pending(current)) { 96 if (signal_pending(current)) {
96 ret = -ERESTARTSYS; 97 ret = -EINTR;
97 break; 98 break;
98 } 99 }
99 } 100 }
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 46bb923b097c..096e2a37446d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -146,14 +146,6 @@ static void drm_master_destroy(struct kref *kref)
146 146
147 drm_ht_remove(&master->magiclist); 147 drm_ht_remove(&master->magiclist);
148 148
149 if (master->lock.hw_lock) {
150 if (dev->sigdata.lock == master->lock.hw_lock)
151 dev->sigdata.lock = NULL;
152 master->lock.hw_lock = NULL;
153 master->lock.file_priv = NULL;
154 wake_up_interruptible(&master->lock.lock_queue);
155 }
156
157 drm_free(master, sizeof(*master), DRM_MEM_DRIVER); 149 drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
158} 150}
159 151
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2d797ffe8137..6dab63bdc4c1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -811,7 +811,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
811 dev_priv->hws_map.flags = 0; 811 dev_priv->hws_map.flags = 0;
812 dev_priv->hws_map.mtrr = 0; 812 dev_priv->hws_map.mtrr = 0;
813 813
814 drm_core_ioremap(&dev_priv->hws_map, dev); 814 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
815 if (dev_priv->hws_map.handle == NULL) { 815 if (dev_priv->hws_map.handle == NULL) {
816 i915_dma_cleanup(dev); 816 i915_dma_cleanup(dev);
817 dev_priv->status_gfx_addr = 0; 817 dev_priv->status_gfx_addr = 0;
@@ -1090,6 +1090,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1090 dev_priv->mm.gtt_mapping = 1090 dev_priv->mm.gtt_mapping =
1091 io_mapping_create_wc(dev->agp->base, 1091 io_mapping_create_wc(dev->agp->base,
1092 dev->agp->agp_info.aper_size * 1024*1024); 1092 dev->agp->agp_info.aper_size * 1024*1024);
1093 if (dev_priv->mm.gtt_mapping == NULL) {
1094 ret = -EIO;
1095 goto out_rmmap;
1096 }
1097
1093 /* Set up a WC MTRR for non-PAT systems. This is more common than 1098 /* Set up a WC MTRR for non-PAT systems. This is more common than
1094 * one would think, because the kernel disables PAT on first 1099 * one would think, because the kernel disables PAT on first
1095 * generation Core chips because WC PAT gets overridden by a UC 1100 * generation Core chips because WC PAT gets overridden by a UC
@@ -1122,7 +1127,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1122 if (!I915_NEED_GFX_HWS(dev)) { 1127 if (!I915_NEED_GFX_HWS(dev)) {
1123 ret = i915_init_phys_hws(dev); 1128 ret = i915_init_phys_hws(dev);
1124 if (ret != 0) 1129 if (ret != 0)
1125 goto out_rmmap; 1130 goto out_iomapfree;
1126 } 1131 }
1127 1132
1128 /* On the 945G/GM, the chipset reports the MSI capability on the 1133 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1161,6 +1166,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1161 1166
1162 return 0; 1167 return 0;
1163 1168
1169out_iomapfree:
1170 io_mapping_free(dev_priv->mm.gtt_mapping);
1164out_rmmap: 1171out_rmmap:
1165 iounmap(dev_priv->regs); 1172 iounmap(dev_priv->regs);
1166free_priv: 1173free_priv:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0692622ee2b3..b293ef0bae71 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -68,9 +68,11 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
68 i915_save_state(dev); 68 i915_save_state(dev);
69 69
70 /* If KMS is active, we do the leavevt stuff here */ 70 /* If KMS is active, we do the leavevt stuff here */
71 if (drm_core_check_feature(dev, DRIVER_MODESET) && i915_gem_idle(dev)) { 71 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
72 dev_err(&dev->pdev->dev, "GEM idle failed, aborting suspend\n"); 72 if (i915_gem_idle(dev))
73 return -EBUSY; 73 dev_err(&dev->pdev->dev,
74 "GEM idle failed, resume may fail\n");
75 drm_irq_uninstall(dev);
74 } 76 }
75 77
76 intel_opregion_free(dev); 78 intel_opregion_free(dev);
@@ -108,6 +110,8 @@ static int i915_resume(struct drm_device *dev)
108 if (ret != 0) 110 if (ret != 0)
109 ret = -1; 111 ret = -1;
110 mutex_unlock(&dev->struct_mutex); 112 mutex_unlock(&dev->struct_mutex);
113
114 drm_irq_install(dev);
111 } 115 }
112 116
113 return ret; 117 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 25b337438ca7..85685bfd12da 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1051,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1051 drm_i915_private_t *dev_priv = dev->dev_private; 1051 drm_i915_private_t *dev_priv = dev->dev_private;
1052 uint32_t seqno; 1052 uint32_t seqno;
1053 1053
1054 if (!dev_priv->hw_status_page)
1055 return;
1056
1054 seqno = i915_get_gem_seqno(dev); 1057 seqno = i915_get_gem_seqno(dev);
1055 1058
1056 while (!list_empty(&dev_priv->mm.request_list)) { 1059 while (!list_empty(&dev_priv->mm.request_list)) {
@@ -3545,7 +3548,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3545 user_data = (char __user *) (uintptr_t) args->data_ptr; 3548 user_data = (char __user *) (uintptr_t) args->data_ptr;
3546 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 3549 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3547 3550
3548 DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); 3551 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
3549 ret = copy_from_user(obj_addr, user_data, args->size); 3552 ret = copy_from_user(obj_addr, user_data, args->size);
3550 if (ret) 3553 if (ret)
3551 return -EFAULT; 3554 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 548ff2c66431..87b6b603469e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -383,12 +383,13 @@ int i915_irq_emit(struct drm_device *dev, void *data,
383 drm_i915_irq_emit_t *emit = data; 383 drm_i915_irq_emit_t *emit = data;
384 int result; 384 int result;
385 385
386 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
387
388 if (!dev_priv) { 386 if (!dev_priv) {
389 DRM_ERROR("called with no initialization\n"); 387 DRM_ERROR("called with no initialization\n");
390 return -EINVAL; 388 return -EINVAL;
391 } 389 }
390
391 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
392
392 mutex_lock(&dev->struct_mutex); 393 mutex_lock(&dev->struct_mutex);
393 result = i915_emit_irq(dev); 394 result = i915_emit_irq(dev);
394 mutex_unlock(&dev->struct_mutex); 395 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 65be30dccc77..fc28e2bbd542 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -111,6 +111,12 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
111 panel_fixed_mode->clock = dvo_timing->clock * 10; 111 panel_fixed_mode->clock = dvo_timing->clock * 10;
112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
113 113
114 /* Some VBTs have bogus h/vtotal values */
115 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
116 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
117 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
118 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
119
114 drm_mode_set_name(panel_fixed_mode); 120 drm_mode_set_name(panel_fixed_mode);
115 121
116 dev_priv->vbt_mode = panel_fixed_mode; 122 dev_priv->vbt_mode = panel_fixed_mode;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 65b635ce28c8..a2834276cb38 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -217,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
217 return false; 217 return false;
218} 218}
219 219
220#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0) 220#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
221/** 221/**
222 * Returns whether the given set of divisors are valid for a given refclk with 222 * Returns whether the given set of divisors are valid for a given refclk with
223 * the given connectors. 223 * the given connectors.
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 9fee3ca17344..9aefb5e5864d 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -79,10 +79,11 @@ static struct i2c_algo_bit_data ioc_data = {
79 .getsda = ioc_getsda, 79 .getsda = ioc_getsda,
80 .getscl = ioc_getscl, 80 .getscl = ioc_getscl,
81 .udelay = 80, 81 .udelay = 80,
82 .timeout = 100 82 .timeout = HZ,
83}; 83};
84 84
85static struct i2c_adapter ioc_ops = { 85static struct i2c_adapter ioc_ops = {
86 .nr = 0,
86 .algo_data = &ioc_data, 87 .algo_data = &ioc_data,
87}; 88};
88 89
@@ -90,7 +91,7 @@ static int __init i2c_ioc_init(void)
90{ 91{
91 force_ones = FORCE_ONES | SCL | SDA; 92 force_ones = FORCE_ONES | SCL | SDA;
92 93
93 return i2c_bit_add_bus(&ioc_ops); 94 return i2c_bit_add_numbered_bus(&ioc_ops);
94} 95}
95 96
96module_init(i2c_ioc_init); 97module_init(i2c_ioc_init);
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index edab51973bf5..a7c59908c457 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -72,7 +72,7 @@ static unsigned int amd_ec_wait_write(struct amd_smbus *smbus)
72{ 72{
73 int timeout = 500; 73 int timeout = 500;
74 74
75 while (timeout-- && (inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF)) 75 while ((inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF) && --timeout)
76 udelay(1); 76 udelay(1);
77 77
78 if (!timeout) { 78 if (!timeout) {
@@ -88,7 +88,7 @@ static unsigned int amd_ec_wait_read(struct amd_smbus *smbus)
88{ 88{
89 int timeout = 500; 89 int timeout = 500;
90 90
91 while (timeout-- && (~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF)) 91 while ((~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF) && --timeout)
92 udelay(1); 92 udelay(1);
93 93
94 if (!timeout) { 94 if (!timeout) {
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 8e8467970481..c016f7a2c5fc 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -114,7 +114,7 @@ static int ixp2000_i2c_probe(struct platform_device *plat_dev)
114 drv_data->algo_data.getsda = ixp2000_bit_getsda; 114 drv_data->algo_data.getsda = ixp2000_bit_getsda;
115 drv_data->algo_data.getscl = ixp2000_bit_getscl; 115 drv_data->algo_data.getscl = ixp2000_bit_getscl;
116 drv_data->algo_data.udelay = 6; 116 drv_data->algo_data.udelay = 6;
117 drv_data->algo_data.timeout = 100; 117 drv_data->algo_data.timeout = HZ;
118 118
119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, 119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
120 sizeof(drv_data->adapter.name)); 120 sizeof(drv_data->adapter.name));
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 6af68146c342..bdb1f7510e91 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -644,7 +644,7 @@ static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
644 644
645 i2c_pxa_start_message(i2c); 645 i2c_pxa_start_message(i2c);
646 646
647 while (timeout-- && i2c->msg_num > 0) { 647 while (i2c->msg_num > 0 && --timeout) {
648 i2c_pxa_handler(0, i2c); 648 i2c_pxa_handler(0, i2c);
649 udelay(10); 649 udelay(10);
650 } 650 }
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 162b74a04886..42df0eca43d5 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -76,7 +76,7 @@ static struct i2c_algo_bit_data scx200_i2c_data = {
76 .getsda = scx200_i2c_getsda, 76 .getsda = scx200_i2c_getsda,
77 .getscl = scx200_i2c_getscl, 77 .getscl = scx200_i2c_getscl,
78 .udelay = 10, 78 .udelay = 10,
79 .timeout = 100, 79 .timeout = HZ,
80}; 80};
81 81
82static struct i2c_adapter scx200_i2c_ops = { 82static struct i2c_adapter scx200_i2c_ops = {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index b1c9abe24c7b..e7d984866de0 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1831,7 +1831,8 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1831 case I2C_SMBUS_QUICK: 1831 case I2C_SMBUS_QUICK:
1832 msg[0].len = 0; 1832 msg[0].len = 0;
1833 /* Special case: The read/write field is used as data */ 1833 /* Special case: The read/write field is used as data */
1834 msg[0].flags = flags | (read_write==I2C_SMBUS_READ)?I2C_M_RD:0; 1834 msg[0].flags = flags | (read_write == I2C_SMBUS_READ ?
1835 I2C_M_RD : 0);
1835 num = 1; 1836 num = 1;
1836 break; 1837 break;
1837 case I2C_SMBUS_BYTE: 1838 case I2C_SMBUS_BYTE:
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c171988a9f51..7e13d2df9af3 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,6 +35,7 @@
35#include <linux/i2c.h> 35#include <linux/i2c.h>
36#include <linux/i2c-dev.h> 36#include <linux/i2c-dev.h>
37#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
38#include <linux/jiffies.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
40static struct i2c_driver i2cdev_driver; 41static struct i2c_driver i2cdev_driver;
@@ -422,7 +423,10 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
422 client->adapter->retries = arg; 423 client->adapter->retries = arg;
423 break; 424 break;
424 case I2C_TIMEOUT: 425 case I2C_TIMEOUT:
425 client->adapter->timeout = arg; 426 /* For historical reasons, user-space sets the timeout
427 * value in units of 10 ms.
428 */
429 client->adapter->timeout = msecs_to_jiffies(arg * 10);
426 break; 430 break;
427 default: 431 default:
428 /* NOTE: returning a fault code here could cause trouble 432 /* NOTE: returning a fault code here could cause trouble
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3dad2299d9c5..e072903b12f0 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -46,7 +46,7 @@ menuconfig IDE
46 SMART parameters from disk drives. 46 SMART parameters from disk drives.
47 47
48 To compile this driver as a module, choose M here: the 48 To compile this driver as a module, choose M here: the
49 module will be called ide. 49 module will be called ide-core.ko.
50 50
51 For further information, please read <file:Documentation/ide/ide.txt>. 51 For further information, please read <file:Documentation/ide/ide.txt>.
52 52
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 69660a431cd9..77267c859965 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -166,7 +166,7 @@ static unsigned int init_chipset_amd74xx(struct pci_dev *dev)
166 * Check for broken FIFO support. 166 * Check for broken FIFO support.
167 */ 167 */
168 if (dev->vendor == PCI_VENDOR_ID_AMD && 168 if (dev->vendor == PCI_VENDOR_ID_AMD &&
169 dev->vendor == PCI_DEVICE_ID_AMD_VIPER_7411) 169 dev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
170 t &= 0x0f; 170 t &= 0x0f;
171 else 171 else
172 t |= 0xf0; 172 t |= 0xf0;
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index b2735d28f5cc..ecd1e62ca91a 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -52,7 +52,7 @@ static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
52{ 52{
53 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 53 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
54 unsigned long flags; 54 unsigned long flags;
55 int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8; 55 int timing_shift = (drive->dn ^ 1) * 8;
56 u32 pio_timing_data; 56 u32 pio_timing_data;
57 u16 pio_mode_data; 57 u16 pio_mode_data;
58 58
@@ -85,7 +85,7 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
85{ 85{
86 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 86 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
87 unsigned long flags; 87 unsigned long flags;
88 int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8; 88 int timing_shift = (drive->dn ^ 1) * 8;
89 u32 tmp32; 89 u32 tmp32;
90 u16 tmp16; 90 u16 tmp16;
91 u16 udma_ctl = 0; 91 u16 udma_ctl = 0;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 0bfeb0c79d6e..ddfbea41d296 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -55,7 +55,7 @@
55 55
56static DEFINE_MUTEX(idecd_ref_mutex); 56static DEFINE_MUTEX(idecd_ref_mutex);
57 57
58static void ide_cd_release(struct kref *); 58static void ide_cd_release(struct device *);
59 59
60static struct cdrom_info *ide_cd_get(struct gendisk *disk) 60static struct cdrom_info *ide_cd_get(struct gendisk *disk)
61{ 61{
@@ -67,7 +67,7 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk)
67 if (ide_device_get(cd->drive)) 67 if (ide_device_get(cd->drive))
68 cd = NULL; 68 cd = NULL;
69 else 69 else
70 kref_get(&cd->kref); 70 get_device(&cd->dev);
71 71
72 } 72 }
73 mutex_unlock(&idecd_ref_mutex); 73 mutex_unlock(&idecd_ref_mutex);
@@ -79,7 +79,7 @@ static void ide_cd_put(struct cdrom_info *cd)
79 ide_drive_t *drive = cd->drive; 79 ide_drive_t *drive = cd->drive;
80 80
81 mutex_lock(&idecd_ref_mutex); 81 mutex_lock(&idecd_ref_mutex);
82 kref_put(&cd->kref, ide_cd_release); 82 put_device(&cd->dev);
83 ide_device_put(drive); 83 ide_device_put(drive);
84 mutex_unlock(&idecd_ref_mutex); 84 mutex_unlock(&idecd_ref_mutex);
85} 85}
@@ -194,6 +194,14 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
194 bio_sectors = max(bio_sectors(failed_command->bio), 4U); 194 bio_sectors = max(bio_sectors(failed_command->bio), 4U);
195 sector &= ~(bio_sectors - 1); 195 sector &= ~(bio_sectors - 1);
196 196
197 /*
198 * The SCSI specification allows for the value
199 * returned by READ CAPACITY to be up to 75 2K
200 * sectors past the last readable block.
201 * Therefore, if we hit a medium error within the
202 * last 75 2K sectors, we decrease the saved size
203 * value.
204 */
197 if (sector < get_capacity(info->disk) && 205 if (sector < get_capacity(info->disk) &&
198 drive->probed_capacity - sector < 4 * 75) 206 drive->probed_capacity - sector < 4 * 75)
199 set_capacity(info->disk, sector); 207 set_capacity(info->disk, sector);
@@ -1790,15 +1798,17 @@ static void ide_cd_remove(ide_drive_t *drive)
1790 ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__); 1798 ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
1791 1799
1792 ide_proc_unregister_driver(drive, info->driver); 1800 ide_proc_unregister_driver(drive, info->driver);
1793 1801 device_del(&info->dev);
1794 del_gendisk(info->disk); 1802 del_gendisk(info->disk);
1795 1803
1796 ide_cd_put(info); 1804 mutex_lock(&idecd_ref_mutex);
1805 put_device(&info->dev);
1806 mutex_unlock(&idecd_ref_mutex);
1797} 1807}
1798 1808
1799static void ide_cd_release(struct kref *kref) 1809static void ide_cd_release(struct device *dev)
1800{ 1810{
1801 struct cdrom_info *info = to_ide_drv(kref, cdrom_info); 1811 struct cdrom_info *info = to_ide_drv(dev, cdrom_info);
1802 struct cdrom_device_info *devinfo = &info->devinfo; 1812 struct cdrom_device_info *devinfo = &info->devinfo;
1803 ide_drive_t *drive = info->drive; 1813 ide_drive_t *drive = info->drive;
1804 struct gendisk *g = info->disk; 1814 struct gendisk *g = info->disk;
@@ -1997,7 +2007,12 @@ static int ide_cd_probe(ide_drive_t *drive)
1997 2007
1998 ide_init_disk(g, drive); 2008 ide_init_disk(g, drive);
1999 2009
2000 kref_init(&info->kref); 2010 info->dev.parent = &drive->gendev;
2011 info->dev.release = ide_cd_release;
2012 dev_set_name(&info->dev, dev_name(&drive->gendev));
2013
2014 if (device_register(&info->dev))
2015 goto out_free_disk;
2001 2016
2002 info->drive = drive; 2017 info->drive = drive;
2003 info->driver = &ide_cdrom_driver; 2018 info->driver = &ide_cdrom_driver;
@@ -2011,7 +2026,7 @@ static int ide_cd_probe(ide_drive_t *drive)
2011 g->driverfs_dev = &drive->gendev; 2026 g->driverfs_dev = &drive->gendev;
2012 g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; 2027 g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
2013 if (ide_cdrom_setup(drive)) { 2028 if (ide_cdrom_setup(drive)) {
2014 ide_cd_release(&info->kref); 2029 put_device(&info->dev);
2015 goto failed; 2030 goto failed;
2016 } 2031 }
2017 2032
@@ -2021,6 +2036,8 @@ static int ide_cd_probe(ide_drive_t *drive)
2021 add_disk(g); 2036 add_disk(g);
2022 return 0; 2037 return 0;
2023 2038
2039out_free_disk:
2040 put_disk(g);
2024out_free_cd: 2041out_free_cd:
2025 kfree(info); 2042 kfree(info);
2026failed: 2043failed:
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index ac40d6cb90a2..c878bfcf1116 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -80,7 +80,7 @@ struct cdrom_info {
80 ide_drive_t *drive; 80 ide_drive_t *drive;
81 struct ide_driver *driver; 81 struct ide_driver *driver;
82 struct gendisk *disk; 82 struct gendisk *disk;
83 struct kref kref; 83 struct device dev;
84 84
85 /* Buffer for table of contents. NULL if we haven't allocated 85 /* Buffer for table of contents. NULL if we haven't allocated
86 a TOC buffer for this device yet. */ 86 a TOC buffer for this device yet. */
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 7857b209c6df..047109419902 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -25,7 +25,7 @@ module_param(debug_mask, ulong, 0644);
25 25
26static DEFINE_MUTEX(ide_disk_ref_mutex); 26static DEFINE_MUTEX(ide_disk_ref_mutex);
27 27
28static void ide_disk_release(struct kref *); 28static void ide_disk_release(struct device *);
29 29
30static struct ide_disk_obj *ide_disk_get(struct gendisk *disk) 30static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
31{ 31{
@@ -37,7 +37,7 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
37 if (ide_device_get(idkp->drive)) 37 if (ide_device_get(idkp->drive))
38 idkp = NULL; 38 idkp = NULL;
39 else 39 else
40 kref_get(&idkp->kref); 40 get_device(&idkp->dev);
41 } 41 }
42 mutex_unlock(&ide_disk_ref_mutex); 42 mutex_unlock(&ide_disk_ref_mutex);
43 return idkp; 43 return idkp;
@@ -48,7 +48,7 @@ static void ide_disk_put(struct ide_disk_obj *idkp)
48 ide_drive_t *drive = idkp->drive; 48 ide_drive_t *drive = idkp->drive;
49 49
50 mutex_lock(&ide_disk_ref_mutex); 50 mutex_lock(&ide_disk_ref_mutex);
51 kref_put(&idkp->kref, ide_disk_release); 51 put_device(&idkp->dev);
52 ide_device_put(drive); 52 ide_device_put(drive);
53 mutex_unlock(&ide_disk_ref_mutex); 53 mutex_unlock(&ide_disk_ref_mutex);
54} 54}
@@ -66,17 +66,18 @@ static void ide_gd_remove(ide_drive_t *drive)
66 struct gendisk *g = idkp->disk; 66 struct gendisk *g = idkp->disk;
67 67
68 ide_proc_unregister_driver(drive, idkp->driver); 68 ide_proc_unregister_driver(drive, idkp->driver);
69 69 device_del(&idkp->dev);
70 del_gendisk(g); 70 del_gendisk(g);
71
72 drive->disk_ops->flush(drive); 71 drive->disk_ops->flush(drive);
73 72
74 ide_disk_put(idkp); 73 mutex_lock(&ide_disk_ref_mutex);
74 put_device(&idkp->dev);
75 mutex_unlock(&ide_disk_ref_mutex);
75} 76}
76 77
77static void ide_disk_release(struct kref *kref) 78static void ide_disk_release(struct device *dev)
78{ 79{
79 struct ide_disk_obj *idkp = to_ide_drv(kref, ide_disk_obj); 80 struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj);
80 ide_drive_t *drive = idkp->drive; 81 ide_drive_t *drive = idkp->drive;
81 struct gendisk *g = idkp->disk; 82 struct gendisk *g = idkp->disk;
82 83
@@ -348,7 +349,12 @@ static int ide_gd_probe(ide_drive_t *drive)
348 349
349 ide_init_disk(g, drive); 350 ide_init_disk(g, drive);
350 351
351 kref_init(&idkp->kref); 352 idkp->dev.parent = &drive->gendev;
353 idkp->dev.release = ide_disk_release;
354 dev_set_name(&idkp->dev, dev_name(&drive->gendev));
355
356 if (device_register(&idkp->dev))
357 goto out_free_disk;
352 358
353 idkp->drive = drive; 359 idkp->drive = drive;
354 idkp->driver = &ide_gd_driver; 360 idkp->driver = &ide_gd_driver;
@@ -373,6 +379,8 @@ static int ide_gd_probe(ide_drive_t *drive)
373 add_disk(g); 379 add_disk(g);
374 return 0; 380 return 0;
375 381
382out_free_disk:
383 put_disk(g);
376out_free_idkp: 384out_free_idkp:
377 kfree(idkp); 385 kfree(idkp);
378failed: 386failed:
diff --git a/drivers/ide/ide-gd.h b/drivers/ide/ide-gd.h
index a86779f0756b..b604bdd318a1 100644
--- a/drivers/ide/ide-gd.h
+++ b/drivers/ide/ide-gd.h
@@ -17,7 +17,7 @@ struct ide_disk_obj {
17 ide_drive_t *drive; 17 ide_drive_t *drive;
18 struct ide_driver *driver; 18 struct ide_driver *driver;
19 struct gendisk *disk; 19 struct gendisk *disk;
20 struct kref kref; 20 struct device dev;
21 unsigned int openers; /* protected by BKL for now */ 21 unsigned int openers; /* protected by BKL for now */
22 22
23 /* Last failed packet command */ 23 /* Last failed packet command */
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index d7ecd3c79757..bb450a7608c2 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -169,7 +169,7 @@ typedef struct ide_tape_obj {
169 ide_drive_t *drive; 169 ide_drive_t *drive;
170 struct ide_driver *driver; 170 struct ide_driver *driver;
171 struct gendisk *disk; 171 struct gendisk *disk;
172 struct kref kref; 172 struct device dev;
173 173
174 /* 174 /*
175 * failed_pc points to the last failed packet command, or contains 175 * failed_pc points to the last failed packet command, or contains
@@ -267,7 +267,7 @@ static DEFINE_MUTEX(idetape_ref_mutex);
267 267
268static struct class *idetape_sysfs_class; 268static struct class *idetape_sysfs_class;
269 269
270static void ide_tape_release(struct kref *); 270static void ide_tape_release(struct device *);
271 271
272static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) 272static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
273{ 273{
@@ -279,7 +279,7 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
279 if (ide_device_get(tape->drive)) 279 if (ide_device_get(tape->drive))
280 tape = NULL; 280 tape = NULL;
281 else 281 else
282 kref_get(&tape->kref); 282 get_device(&tape->dev);
283 } 283 }
284 mutex_unlock(&idetape_ref_mutex); 284 mutex_unlock(&idetape_ref_mutex);
285 return tape; 285 return tape;
@@ -290,7 +290,7 @@ static void ide_tape_put(struct ide_tape_obj *tape)
290 ide_drive_t *drive = tape->drive; 290 ide_drive_t *drive = tape->drive;
291 291
292 mutex_lock(&idetape_ref_mutex); 292 mutex_lock(&idetape_ref_mutex);
293 kref_put(&tape->kref, ide_tape_release); 293 put_device(&tape->dev);
294 ide_device_put(drive); 294 ide_device_put(drive);
295 mutex_unlock(&idetape_ref_mutex); 295 mutex_unlock(&idetape_ref_mutex);
296} 296}
@@ -308,7 +308,7 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
308 mutex_lock(&idetape_ref_mutex); 308 mutex_lock(&idetape_ref_mutex);
309 tape = idetape_devs[i]; 309 tape = idetape_devs[i];
310 if (tape) 310 if (tape)
311 kref_get(&tape->kref); 311 get_device(&tape->dev);
312 mutex_unlock(&idetape_ref_mutex); 312 mutex_unlock(&idetape_ref_mutex);
313 return tape; 313 return tape;
314} 314}
@@ -2256,15 +2256,17 @@ static void ide_tape_remove(ide_drive_t *drive)
2256 idetape_tape_t *tape = drive->driver_data; 2256 idetape_tape_t *tape = drive->driver_data;
2257 2257
2258 ide_proc_unregister_driver(drive, tape->driver); 2258 ide_proc_unregister_driver(drive, tape->driver);
2259 2259 device_del(&tape->dev);
2260 ide_unregister_region(tape->disk); 2260 ide_unregister_region(tape->disk);
2261 2261
2262 ide_tape_put(tape); 2262 mutex_lock(&idetape_ref_mutex);
2263 put_device(&tape->dev);
2264 mutex_unlock(&idetape_ref_mutex);
2263} 2265}
2264 2266
2265static void ide_tape_release(struct kref *kref) 2267static void ide_tape_release(struct device *dev)
2266{ 2268{
2267 struct ide_tape_obj *tape = to_ide_drv(kref, ide_tape_obj); 2269 struct ide_tape_obj *tape = to_ide_drv(dev, ide_tape_obj);
2268 ide_drive_t *drive = tape->drive; 2270 ide_drive_t *drive = tape->drive;
2269 struct gendisk *g = tape->disk; 2271 struct gendisk *g = tape->disk;
2270 2272
@@ -2407,7 +2409,12 @@ static int ide_tape_probe(ide_drive_t *drive)
2407 2409
2408 ide_init_disk(g, drive); 2410 ide_init_disk(g, drive);
2409 2411
2410 kref_init(&tape->kref); 2412 tape->dev.parent = &drive->gendev;
2413 tape->dev.release = ide_tape_release;
2414 dev_set_name(&tape->dev, dev_name(&drive->gendev));
2415
2416 if (device_register(&tape->dev))
2417 goto out_free_disk;
2411 2418
2412 tape->drive = drive; 2419 tape->drive = drive;
2413 tape->driver = &idetape_driver; 2420 tape->driver = &idetape_driver;
@@ -2436,6 +2443,8 @@ static int ide_tape_probe(ide_drive_t *drive)
2436 2443
2437 return 0; 2444 return 0;
2438 2445
2446out_free_disk:
2447 put_disk(g);
2439out_free_tape: 2448out_free_tape:
2440 kfree(tape); 2449 kfree(tape);
2441failed: 2450failed:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 258805da15c3..0920e3b0c962 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -337,6 +337,7 @@ static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
337 int a, b, i, j = 1; 337 int a, b, i, j = 1;
338 unsigned int *dev_param_mask = (unsigned int *)kp->arg; 338 unsigned int *dev_param_mask = (unsigned int *)kp->arg;
339 339
340 /* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */
340 if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 && 341 if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
341 sscanf(s, "%d.%d", &a, &b) != 2) 342 sscanf(s, "%d.%d", &a, &b) != 2)
342 return -EINVAL; 343 return -EINVAL;
@@ -349,7 +350,7 @@ static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
349 if (j) 350 if (j)
350 *dev_param_mask |= (1 << i); 351 *dev_param_mask |= (1 << i);
351 else 352 else
352 *dev_param_mask &= (1 << i); 353 *dev_param_mask &= ~(1 << i);
353 354
354 return 0; 355 return 0;
355} 356}
@@ -392,6 +393,8 @@ static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
392{ 393{
393 int a, b, c = 0, h = 0, s = 0, i, j = 1; 394 int a, b, c = 0, h = 0, s = 0, i, j = 1;
394 395
396 /* controller . device (0 or 1) : Cylinders , Heads , Sectors */
397 /* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */
395 if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 && 398 if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
396 sscanf(str, "%d.%d:%d", &a, &b, &j) != 3) 399 sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
397 return -EINVAL; 400 return -EINVAL;
@@ -407,7 +410,7 @@ static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
407 if (j) 410 if (j)
408 ide_disks |= (1 << i); 411 ide_disks |= (1 << i);
409 else 412 else
410 ide_disks &= (1 << i); 413 ide_disks &= ~(1 << i);
411 414
412 ide_disks_chs[i].cyl = c; 415 ide_disks_chs[i].cyl = c;
413 ide_disks_chs[i].head = h; 416 ide_disks_chs[i].head = h;
@@ -469,6 +472,8 @@ static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
469{ 472{
470 int i, j = 1; 473 int i, j = 1;
471 474
475 /* controller (ignore) */
476 /* controller : 1 (ignore) | 0 (use) */
472 if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1) 477 if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
473 return -EINVAL; 478 return -EINVAL;
474 479
@@ -478,7 +483,7 @@ static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
478 if (j) 483 if (j)
479 ide_ignore_cable |= (1 << i); 484 ide_ignore_cable |= (1 << i);
480 else 485 else
481 ide_ignore_cable &= (1 << i); 486 ide_ignore_cable &= ~(1 << i);
482 487
483 return 0; 488 return 0;
484} 489}
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index e1c4f5437396..13b8153112ed 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -5,9 +5,8 @@
5 * May be copied or modified under the terms of the GNU General Public License 5 * May be copied or modified under the terms of the GNU General Public License
6 * Based in part on the ITE vendor provided SCSI driver. 6 * Based in part on the ITE vendor provided SCSI driver.
7 * 7 *
8 * Documentation available from 8 * Documentation:
9 * http://www.ite.com.tw/pc/IT8212F_V04.pdf 9 * Datasheet is freely available, some other documents under NDA.
10 * Some other documents are NDA.
11 * 10 *
12 * The ITE8212 isn't exactly a standard IDE controller. It has two 11 * The ITE8212 isn't exactly a standard IDE controller. It has two
13 * modes. In pass through mode then it is an IDE controller. In its smart 12 * modes. In pass through mode then it is an IDE controller. In its smart
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
index 2727bcd24194..467373cab8e5 100644
--- a/drivers/ieee1394/dma.h
+++ b/drivers/ieee1394/dma.h
@@ -12,6 +12,7 @@
12 12
13#include <asm/types.h> 13#include <asm/types.h>
14 14
15struct file;
15struct pci_dev; 16struct pci_dev;
16struct scatterlist; 17struct scatterlist;
17struct vm_area_struct; 18struct vm_area_struct;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 2beb8d94f7bd..872338003721 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -1275,7 +1275,7 @@ static void __exit ieee1394_cleanup(void)
1275 unregister_chrdev_region(IEEE1394_CORE_DEV, 256); 1275 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1276} 1276}
1277 1277
1278module_init(ieee1394_init); 1278fs_initcall(ieee1394_init);
1279module_exit(ieee1394_cleanup); 1279module_exit(ieee1394_cleanup);
1280 1280
1281/* Exported symbols */ 1281/* Exported symbols */
@@ -1314,6 +1314,7 @@ EXPORT_SYMBOL(hpsb_make_lock64packet);
1314EXPORT_SYMBOL(hpsb_make_phypacket); 1314EXPORT_SYMBOL(hpsb_make_phypacket);
1315EXPORT_SYMBOL(hpsb_read); 1315EXPORT_SYMBOL(hpsb_read);
1316EXPORT_SYMBOL(hpsb_write); 1316EXPORT_SYMBOL(hpsb_write);
1317EXPORT_SYMBOL(hpsb_lock);
1317EXPORT_SYMBOL(hpsb_packet_success); 1318EXPORT_SYMBOL(hpsb_packet_success);
1318 1319
1319/** highlevel.c **/ 1320/** highlevel.c **/
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 10c3d9f8c038..675b3135d5f1 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -501,8 +501,6 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
501 if (length == 0) 501 if (length == 0)
502 return -EINVAL; 502 return -EINVAL;
503 503
504 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
505
506 packet = hpsb_make_readpacket(host, node, addr, length); 504 packet = hpsb_make_readpacket(host, node, addr, length);
507 505
508 if (!packet) { 506 if (!packet) {
@@ -550,8 +548,6 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
550 if (length == 0) 548 if (length == 0)
551 return -EINVAL; 549 return -EINVAL;
552 550
553 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
554
555 packet = hpsb_make_writepacket(host, node, addr, buffer, length); 551 packet = hpsb_make_writepacket(host, node, addr, buffer, length);
556 552
557 if (!packet) 553 if (!packet)
@@ -570,3 +566,30 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
570 566
571 return retval; 567 return retval;
572} 568}
569
570int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
571 u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
572{
573 struct hpsb_packet *packet;
574 int retval = 0;
575
576 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
577 if (!packet)
578 return -ENOMEM;
579
580 packet->generation = generation;
581 retval = hpsb_send_packet_and_wait(packet);
582 if (retval < 0)
583 goto hpsb_lock_fail;
584
585 retval = hpsb_packet_success(packet);
586
587 if (retval == 0)
588 *data = packet->data[0];
589
590hpsb_lock_fail:
591 hpsb_free_tlabel(packet);
592 hpsb_free_packet(packet);
593
594 return retval;
595}
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
index d2d5bc3546d7..20b693be14b2 100644
--- a/drivers/ieee1394/ieee1394_transactions.h
+++ b/drivers/ieee1394/ieee1394_transactions.h
@@ -30,6 +30,8 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
30 u64 addr, quadlet_t *buffer, size_t length); 30 u64 addr, quadlet_t *buffer, size_t length);
31int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, 31int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
32 u64 addr, quadlet_t *buffer, size_t length); 32 u64 addr, quadlet_t *buffer, size_t length);
33int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
34 u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
33 35
34#ifdef HPSB_DEBUG_TLABELS 36#ifdef HPSB_DEBUG_TLABELS
35extern spinlock_t hpsb_tlabel_lock; 37extern spinlock_t hpsb_tlabel_lock;
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
index b5de5f21ef78..c2089c093aa7 100644
--- a/drivers/ieee1394/iso.h
+++ b/drivers/ieee1394/iso.h
@@ -13,6 +13,7 @@
13#define IEEE1394_ISO_H 13#define IEEE1394_ISO_H
14 14
15#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
16#include <linux/wait.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17#include <asm/types.h> 18#include <asm/types.h>
18 19
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 906c5a98d814..53aada5bbe1e 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -971,6 +971,9 @@ static struct unit_directory *nodemgr_process_unit_directory
971 ud->ud_kv = ud_kv; 971 ud->ud_kv = ud_kv;
972 ud->id = (*id)++; 972 ud->id = (*id)++;
973 973
974 /* inherit vendor_id from root directory if none exists in unit dir */
975 ud->vendor_id = ne->vendor_id;
976
974 csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) { 977 csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) {
975 switch (kv->key.id) { 978 switch (kv->key.id) {
976 case CSR1212_KV_ID_VENDOR: 979 case CSR1212_KV_ID_VENDOR:
@@ -1265,7 +1268,8 @@ static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
1265 csr1212_destroy_csr(csr); 1268 csr1212_destroy_csr(csr);
1266 } 1269 }
1267 1270
1268 /* Mark the node current */ 1271 /* Finally, mark the node current */
1272 smp_wmb();
1269 ne->generation = generation; 1273 ne->generation = generation;
1270 1274
1271 if (ne->in_limbo) { 1275 if (ne->in_limbo) {
@@ -1798,7 +1802,7 @@ void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
1798{ 1802{
1799 packet->host = ne->host; 1803 packet->host = ne->host;
1800 packet->generation = ne->generation; 1804 packet->generation = ne->generation;
1801 barrier(); 1805 smp_rmb();
1802 packet->node_id = ne->nodeid; 1806 packet->node_id = ne->nodeid;
1803} 1807}
1804 1808
@@ -1807,7 +1811,7 @@ int hpsb_node_write(struct node_entry *ne, u64 addr,
1807{ 1811{
1808 unsigned int generation = ne->generation; 1812 unsigned int generation = ne->generation;
1809 1813
1810 barrier(); 1814 smp_rmb();
1811 return hpsb_write(ne->host, ne->nodeid, generation, 1815 return hpsb_write(ne->host, ne->nodeid, generation,
1812 addr, buffer, length); 1816 addr, buffer, length);
1813} 1817}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 15ea09733e84..ee5acdbd114a 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -21,9 +21,11 @@
21#define _IEEE1394_NODEMGR_H 21#define _IEEE1394_NODEMGR_H
22 22
23#include <linux/device.h> 23#include <linux/device.h>
24#include <asm/system.h>
24#include <asm/types.h> 25#include <asm/types.h>
25 26
26#include "ieee1394_core.h" 27#include "ieee1394_core.h"
28#include "ieee1394_transactions.h"
27#include "ieee1394_types.h" 29#include "ieee1394_types.h"
28 30
29struct csr1212_csr; 31struct csr1212_csr;
@@ -154,6 +156,22 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
154void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet); 156void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
155int hpsb_node_write(struct node_entry *ne, u64 addr, 157int hpsb_node_write(struct node_entry *ne, u64 addr,
156 quadlet_t *buffer, size_t length); 158 quadlet_t *buffer, size_t length);
159static inline int hpsb_node_read(struct node_entry *ne, u64 addr,
160 quadlet_t *buffer, size_t length)
161{
162 unsigned int g = ne->generation;
163
164 smp_rmb();
165 return hpsb_read(ne->host, ne->nodeid, g, addr, buffer, length);
166}
167static inline int hpsb_node_lock(struct node_entry *ne, u64 addr, int extcode,
168 quadlet_t *buffer, quadlet_t arg)
169{
170 unsigned int g = ne->generation;
171
172 smp_rmb();
173 return hpsb_lock(ne->host, ne->nodeid, g, addr, extcode, buffer, arg);
174}
157int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *)); 175int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
158 176
159int init_ieee1394_nodemgr(void); 177int init_ieee1394_nodemgr(void);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index c3c8b9bc40ae..45470f18d7e9 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -839,7 +839,7 @@ static void atkbd_disconnect(struct serio *serio)
839 */ 839 */
840static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd) 840static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd)
841{ 841{
842 const unsigned int forced_release_keys[] = { 842 static const unsigned int forced_release_keys[] = {
843 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, 843 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93,
844 }; 844 };
845 int i; 845 int i;
@@ -856,7 +856,7 @@ static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd)
856 */ 856 */
857static void atkbd_hp_keymap_fixup(struct atkbd *atkbd) 857static void atkbd_hp_keymap_fixup(struct atkbd *atkbd)
858{ 858{
859 const unsigned int forced_release_keys[] = { 859 static const unsigned int forced_release_keys[] = {
860 0x94, 860 0x94,
861 }; 861 };
862 int i; 862 int i;
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 19284016e0f4..ee855c5202e8 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -209,8 +209,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev)
209 goto out; 209 goto out;
210 } 210 }
211 211
212 if (!pdata->debounce_time || !pdata->debounce_time > MAX_MULT || 212 if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT ||
213 !pdata->coldrive_time || !pdata->coldrive_time > MAX_MULT) { 213 !pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) {
214 printk(KERN_ERR DRV_NAME 214 printk(KERN_ERR DRV_NAME
215 ": Invalid Debounce/Columdrive Time from pdata\n"); 215 ": Invalid Debounce/Columdrive Time from pdata\n");
216 bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */ 216 bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index c8ed065ea0cb..abb04c82c622 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -288,7 +288,7 @@ static int corgikbd_resume(struct platform_device *dev)
288#define corgikbd_resume NULL 288#define corgikbd_resume NULL
289#endif 289#endif
290 290
291static int __init corgikbd_probe(struct platform_device *pdev) 291static int __devinit corgikbd_probe(struct platform_device *pdev)
292{ 292{
293 struct corgikbd *corgikbd; 293 struct corgikbd *corgikbd;
294 struct input_dev *input_dev; 294 struct input_dev *input_dev;
@@ -368,7 +368,7 @@ static int __init corgikbd_probe(struct platform_device *pdev)
368 return err; 368 return err;
369} 369}
370 370
371static int corgikbd_remove(struct platform_device *pdev) 371static int __devexit corgikbd_remove(struct platform_device *pdev)
372{ 372{
373 int i; 373 int i;
374 struct corgikbd *corgikbd = platform_get_drvdata(pdev); 374 struct corgikbd *corgikbd = platform_get_drvdata(pdev);
@@ -388,7 +388,7 @@ static int corgikbd_remove(struct platform_device *pdev)
388 388
389static struct platform_driver corgikbd_driver = { 389static struct platform_driver corgikbd_driver = {
390 .probe = corgikbd_probe, 390 .probe = corgikbd_probe,
391 .remove = corgikbd_remove, 391 .remove = __devexit_p(corgikbd_remove),
392 .suspend = corgikbd_suspend, 392 .suspend = corgikbd_suspend,
393 .resume = corgikbd_resume, 393 .resume = corgikbd_resume,
394 .driver = { 394 .driver = {
@@ -397,7 +397,7 @@ static struct platform_driver corgikbd_driver = {
397 }, 397 },
398}; 398};
399 399
400static int __devinit corgikbd_init(void) 400static int __init corgikbd_init(void)
401{ 401{
402 return platform_driver_register(&corgikbd_driver); 402 return platform_driver_register(&corgikbd_driver);
403} 403}
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 3f3d1198cdb1..058fa8b02c21 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -279,7 +279,7 @@ static int omap_kp_resume(struct platform_device *dev)
279#define omap_kp_resume NULL 279#define omap_kp_resume NULL
280#endif 280#endif
281 281
282static int __init omap_kp_probe(struct platform_device *pdev) 282static int __devinit omap_kp_probe(struct platform_device *pdev)
283{ 283{
284 struct omap_kp *omap_kp; 284 struct omap_kp *omap_kp;
285 struct input_dev *input_dev; 285 struct input_dev *input_dev;
@@ -422,7 +422,7 @@ err1:
422 return -EINVAL; 422 return -EINVAL;
423} 423}
424 424
425static int omap_kp_remove(struct platform_device *pdev) 425static int __devexit omap_kp_remove(struct platform_device *pdev)
426{ 426{
427 struct omap_kp *omap_kp = platform_get_drvdata(pdev); 427 struct omap_kp *omap_kp = platform_get_drvdata(pdev);
428 428
@@ -454,7 +454,7 @@ static int omap_kp_remove(struct platform_device *pdev)
454 454
455static struct platform_driver omap_kp_driver = { 455static struct platform_driver omap_kp_driver = {
456 .probe = omap_kp_probe, 456 .probe = omap_kp_probe,
457 .remove = omap_kp_remove, 457 .remove = __devexit_p(omap_kp_remove),
458 .suspend = omap_kp_suspend, 458 .suspend = omap_kp_suspend,
459 .resume = omap_kp_resume, 459 .resume = omap_kp_resume,
460 .driver = { 460 .driver = {
@@ -463,7 +463,7 @@ static struct platform_driver omap_kp_driver = {
463 }, 463 },
464}; 464};
465 465
466static int __devinit omap_kp_init(void) 466static int __init omap_kp_init(void)
467{ 467{
468 printk(KERN_INFO "OMAP Keypad Driver\n"); 468 printk(KERN_INFO "OMAP Keypad Driver\n");
469 return platform_driver_register(&omap_kp_driver); 469 return platform_driver_register(&omap_kp_driver);
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index c48b76a46a58..9d1781a618e9 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -343,7 +343,7 @@ static int spitzkbd_resume(struct platform_device *dev)
343#define spitzkbd_resume NULL 343#define spitzkbd_resume NULL
344#endif 344#endif
345 345
346static int __init spitzkbd_probe(struct platform_device *dev) 346static int __devinit spitzkbd_probe(struct platform_device *dev)
347{ 347{
348 struct spitzkbd *spitzkbd; 348 struct spitzkbd *spitzkbd;
349 struct input_dev *input_dev; 349 struct input_dev *input_dev;
@@ -444,7 +444,7 @@ static int __init spitzkbd_probe(struct platform_device *dev)
444 return err; 444 return err;
445} 445}
446 446
447static int spitzkbd_remove(struct platform_device *dev) 447static int __devexit spitzkbd_remove(struct platform_device *dev)
448{ 448{
449 int i; 449 int i;
450 struct spitzkbd *spitzkbd = platform_get_drvdata(dev); 450 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
@@ -470,7 +470,7 @@ static int spitzkbd_remove(struct platform_device *dev)
470 470
471static struct platform_driver spitzkbd_driver = { 471static struct platform_driver spitzkbd_driver = {
472 .probe = spitzkbd_probe, 472 .probe = spitzkbd_probe,
473 .remove = spitzkbd_remove, 473 .remove = __devexit_p(spitzkbd_remove),
474 .suspend = spitzkbd_suspend, 474 .suspend = spitzkbd_suspend,
475 .resume = spitzkbd_resume, 475 .resume = spitzkbd_resume,
476 .driver = { 476 .driver = {
@@ -479,7 +479,7 @@ static struct platform_driver spitzkbd_driver = {
479 }, 479 },
480}; 480};
481 481
482static int __devinit spitzkbd_init(void) 482static int __init spitzkbd_init(void)
483{ 483{
484 return platform_driver_register(&spitzkbd_driver); 484 return platform_driver_register(&spitzkbd_driver);
485} 485}
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 9bef935ef19f..4f38e6f7dfdd 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -70,7 +70,7 @@ config MOUSE_PS2_SYNAPTICS
70config MOUSE_PS2_LIFEBOOK 70config MOUSE_PS2_LIFEBOOK
71 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED 71 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
72 default y 72 default y
73 depends on MOUSE_PS2 73 depends on MOUSE_PS2 && X86
74 help 74 help
75 Say Y here if you have a Fujitsu B-series Lifebook PS/2 75 Say Y here if you have a Fujitsu B-series Lifebook PS/2
76 TouchScreen connected to your system. 76 TouchScreen connected to your system.
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b9a25d57bc5e..6ab0eb1ada1c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -542,7 +542,7 @@ int elantech_detect(struct psmouse *psmouse, int set_properties)
542 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || 542 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
543 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || 543 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
544 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) { 544 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
545 pr_err("elantech.c: sending Elantech magic knock failed.\n"); 545 pr_debug("elantech.c: sending Elantech magic knock failed.\n");
546 return -1; 546 return -1;
547 } 547 }
548 548
@@ -551,8 +551,27 @@ int elantech_detect(struct psmouse *psmouse, int set_properties)
551 * set of magic numbers 551 * set of magic numbers
552 */ 552 */
553 if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) { 553 if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
554 pr_info("elantech.c: unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n", 554 pr_debug("elantech.c: "
555 param[0], param[1], param[2]); 555 "unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
556 param[0], param[1], param[2]);
557 return -1;
558 }
559
560 /*
561 * Query touchpad's firmware version and see if it reports known
562 * value to avoid mis-detection. Logitech mice are known to respond
563 * to Elantech magic knock and there might be more.
564 */
565 if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
566 pr_debug("elantech.c: failed to query firmware version.\n");
567 return -1;
568 }
569
570 pr_debug("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
571 param[0], param[1], param[2]);
572
573 if (param[0] == 0 || param[1] != 0) {
574 pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
556 return -1; 575 return -1;
557 } 576 }
558 577
@@ -600,8 +619,7 @@ int elantech_init(struct psmouse *psmouse)
600 int i, error; 619 int i, error;
601 unsigned char param[3]; 620 unsigned char param[3];
602 621
603 etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL); 622 psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
604 psmouse->private = etd;
605 if (!etd) 623 if (!etd)
606 return -1; 624 return -1;
607 625
@@ -610,14 +628,12 @@ int elantech_init(struct psmouse *psmouse)
610 etd->parity[i] = etd->parity[i & (i - 1)] ^ 1; 628 etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
611 629
612 /* 630 /*
613 * Find out what version hardware this is 631 * Do the version query again so we can store the result
614 */ 632 */
615 if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) { 633 if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
616 pr_err("elantech.c: failed to query firmware version.\n"); 634 pr_err("elantech.c: failed to query firmware version.\n");
617 goto init_fail; 635 goto init_fail;
618 } 636 }
619 pr_info("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
620 param[0], param[1], param[2]);
621 etd->fw_version_maj = param[0]; 637 etd->fw_version_maj = param[0];
622 etd->fw_version_min = param[2]; 638 etd->fw_version_min = param[2];
623 639
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index d297accf9a7f..1e827ad0afbe 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -83,7 +83,7 @@ static int write_tbcr(struct pxa930_trkball *trkball, int v)
83 83
84 __raw_writel(v, trkball->mmio_base + TBCR); 84 __raw_writel(v, trkball->mmio_base + TBCR);
85 85
86 while (i--) { 86 while (--i) {
87 if (__raw_readl(trkball->mmio_base + TBCR) == v) 87 if (__raw_readl(trkball->mmio_base + TBCR) == v)
88 break; 88 break;
89 msleep(1); 89 msleep(1);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 865fc69e9bc3..f3e4f7b0240d 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -182,11 +182,6 @@ static int synaptics_identify(struct psmouse *psmouse)
182 182
183static int synaptics_query_hardware(struct psmouse *psmouse) 183static int synaptics_query_hardware(struct psmouse *psmouse)
184{ 184{
185 int retries = 0;
186
187 while ((retries++ < 3) && psmouse_reset(psmouse))
188 /* empty */;
189
190 if (synaptics_identify(psmouse)) 185 if (synaptics_identify(psmouse))
191 return -1; 186 return -1;
192 if (synaptics_model_id(psmouse)) 187 if (synaptics_model_id(psmouse))
@@ -582,6 +577,8 @@ static int synaptics_reconnect(struct psmouse *psmouse)
582 struct synaptics_data *priv = psmouse->private; 577 struct synaptics_data *priv = psmouse->private;
583 struct synaptics_data old_priv = *priv; 578 struct synaptics_data old_priv = *priv;
584 579
580 psmouse_reset(psmouse);
581
585 if (synaptics_detect(psmouse, 0)) 582 if (synaptics_detect(psmouse, 0))
586 return -1; 583 return -1;
587 584
@@ -640,6 +637,8 @@ int synaptics_init(struct psmouse *psmouse)
640 if (!priv) 637 if (!priv)
641 return -1; 638 return -1;
642 639
640 psmouse_reset(psmouse);
641
643 if (synaptics_query_hardware(psmouse)) { 642 if (synaptics_query_hardware(psmouse)) {
644 printk(KERN_ERR "Unable to query Synaptics hardware.\n"); 643 printk(KERN_ERR "Unable to query Synaptics hardware.\n");
645 goto init_fail; 644 goto init_fail;
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index b10ffae7c39b..e29cdc13a199 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -57,7 +57,7 @@ static int amba_kmi_write(struct serio *io, unsigned char val)
57 struct amba_kmi_port *kmi = io->port_data; 57 struct amba_kmi_port *kmi = io->port_data;
58 unsigned int timeleft = 10000; /* timeout in 100ms */ 58 unsigned int timeleft = 10000; /* timeout in 100ms */
59 59
60 while ((readb(KMISTAT) & KMISTAT_TXEMPTY) == 0 && timeleft--) 60 while ((readb(KMISTAT) & KMISTAT_TXEMPTY) == 0 && --timeleft)
61 udelay(10); 61 udelay(10);
62 62
63 if (timeleft) 63 if (timeleft)
@@ -129,8 +129,8 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
129 io->write = amba_kmi_write; 129 io->write = amba_kmi_write;
130 io->open = amba_kmi_open; 130 io->open = amba_kmi_open;
131 io->close = amba_kmi_close; 131 io->close = amba_kmi_close;
132 strlcpy(io->name, dev->dev.bus_id, sizeof(io->name)); 132 strlcpy(io->name, dev_name(&dev->dev), sizeof(io->name));
133 strlcpy(io->phys, dev->dev.bus_id, sizeof(io->phys)); 133 strlcpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
134 io->port_data = kmi; 134 io->port_data = kmi;
135 io->dev.parent = &dev->dev; 135 io->dev.parent = &dev->dev;
136 136
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index adc3bd6e7f7b..bd0f92d9f40f 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -359,7 +359,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
359 359
360 snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s", 360 snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s",
361 (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse"); 361 (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
362 strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys)); 362 strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
363 serio->id.type = SERIO_8042; 363 serio->id.type = SERIO_8042;
364 serio->write = gscps2_write; 364 serio->write = gscps2_write;
365 serio->open = gscps2_open; 365 serio->open = gscps2_open;
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 2ad88780a170..57953c0eb82f 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -246,8 +246,8 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
246 serio->write = ps2_write; 246 serio->write = ps2_write;
247 serio->open = ps2_open; 247 serio->open = ps2_open;
248 serio->close = ps2_close; 248 serio->close = ps2_close;
249 strlcpy(serio->name, dev->dev.bus_id, sizeof(serio->name)); 249 strlcpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
250 strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys)); 250 strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
251 serio->port_data = ps2if; 251 serio->port_data = ps2if;
252 serio->dev.parent = &dev->dev; 252 serio->dev.parent = &dev->dev;
253 ps2if->io = serio; 253 ps2if->io = serio;
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index a89a6a8f05e6..055969e8be13 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -236,7 +236,7 @@ static int __devinit atmel_tsadcc_probe(struct platform_device *pdev)
236 ts_dev->bufferedmeasure = 0; 236 ts_dev->bufferedmeasure = 0;
237 237
238 snprintf(ts_dev->phys, sizeof(ts_dev->phys), 238 snprintf(ts_dev->phys, sizeof(ts_dev->phys),
239 "%s/input0", pdev->dev.bus_id); 239 "%s/input0", dev_name(&pdev->dev));
240 240
241 input_dev->name = "atmel touch screen controller"; 241 input_dev->name = "atmel touch screen controller";
242 input_dev->phys = ts_dev->phys; 242 input_dev->phys = ts_dev->phys;
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 65202c9f63ff..3fb51b54fe61 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -268,7 +268,7 @@ static int corgits_resume(struct platform_device *dev)
268#define corgits_resume NULL 268#define corgits_resume NULL
269#endif 269#endif
270 270
271static int __init corgits_probe(struct platform_device *pdev) 271static int __devinit corgits_probe(struct platform_device *pdev)
272{ 272{
273 struct corgi_ts *corgi_ts; 273 struct corgi_ts *corgi_ts;
274 struct input_dev *input_dev; 274 struct input_dev *input_dev;
@@ -343,7 +343,7 @@ static int __init corgits_probe(struct platform_device *pdev)
343 return err; 343 return err;
344} 344}
345 345
346static int corgits_remove(struct platform_device *pdev) 346static int __devexit corgits_remove(struct platform_device *pdev)
347{ 347{
348 struct corgi_ts *corgi_ts = platform_get_drvdata(pdev); 348 struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
349 349
@@ -352,12 +352,13 @@ static int corgits_remove(struct platform_device *pdev)
352 corgi_ts->machinfo->put_hsync(); 352 corgi_ts->machinfo->put_hsync();
353 input_unregister_device(corgi_ts->input); 353 input_unregister_device(corgi_ts->input);
354 kfree(corgi_ts); 354 kfree(corgi_ts);
355
355 return 0; 356 return 0;
356} 357}
357 358
358static struct platform_driver corgits_driver = { 359static struct platform_driver corgits_driver = {
359 .probe = corgits_probe, 360 .probe = corgits_probe,
360 .remove = corgits_remove, 361 .remove = __devexit_p(corgits_remove),
361 .suspend = corgits_suspend, 362 .suspend = corgits_suspend,
362 .resume = corgits_resume, 363 .resume = corgits_resume,
363 .driver = { 364 .driver = {
@@ -366,7 +367,7 @@ static struct platform_driver corgits_driver = {
366 }, 367 },
367}; 368};
368 369
369static int __devinit corgits_init(void) 370static int __init corgits_init(void)
370{ 371{
371 return platform_driver_register(&corgits_driver); 372 return platform_driver_register(&corgits_driver);
372} 373}
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index b75dc2990574..4ab070246892 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -289,7 +289,8 @@ static int tsc2007_probe(struct i2c_client *client,
289 289
290 pdata->init_platform_hw(); 290 pdata->init_platform_hw();
291 291
292 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", client->dev.bus_id); 292 snprintf(ts->phys, sizeof(ts->phys),
293 "%s/input0", dev_name(&client->dev));
293 294
294 input_dev->name = "TSC2007 Touchscreen"; 295 input_dev->name = "TSC2007 Touchscreen";
295 input_dev->phys = ts->phys; 296 input_dev->phys = ts->phys;
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 5080b26ba160..fb7cb9bdfbd5 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -60,6 +60,10 @@ static int swap_xy;
60module_param(swap_xy, bool, 0644); 60module_param(swap_xy, bool, 0644);
61MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped."); 61MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped.");
62 62
63static int hwcalib_xy;
64module_param(hwcalib_xy, bool, 0644);
65MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available");
66
63/* device specifc data/functions */ 67/* device specifc data/functions */
64struct usbtouch_usb; 68struct usbtouch_usb;
65struct usbtouch_device_info { 69struct usbtouch_device_info {
@@ -118,6 +122,7 @@ enum {
118 122
119#define USB_DEVICE_HID_CLASS(vend, prod) \ 123#define USB_DEVICE_HID_CLASS(vend, prod) \
120 .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \ 124 .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
125 | USB_DEVICE_ID_MATCH_INT_PROTOCOL \
121 | USB_DEVICE_ID_MATCH_DEVICE, \ 126 | USB_DEVICE_ID_MATCH_DEVICE, \
122 .idVendor = (vend), \ 127 .idVendor = (vend), \
123 .idProduct = (prod), \ 128 .idProduct = (prod), \
@@ -260,8 +265,13 @@ static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
260 265
261static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) 266static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
262{ 267{
263 dev->x = (pkt[8] << 8) | pkt[7]; 268 if (hwcalib_xy) {
264 dev->y = (pkt[10] << 8) | pkt[9]; 269 dev->x = (pkt[4] << 8) | pkt[3];
270 dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]);
271 } else {
272 dev->x = (pkt[8] << 8) | pkt[7];
273 dev->y = (pkt[10] << 8) | pkt[9];
274 }
265 dev->touch = (pkt[2] & 0x40) ? 1 : 0; 275 dev->touch = (pkt[2] & 0x40) ? 1 : 0;
266 276
267 return 1; 277 return 1;
@@ -294,6 +304,12 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
294 return ret; 304 return ret;
295 } 305 }
296 306
307 /* Default min/max xy are the raw values, override if using hw-calib */
308 if (hwcalib_xy) {
309 input_set_abs_params(usbtouch->input, ABS_X, 0, 0xffff, 0, 0);
310 input_set_abs_params(usbtouch->input, ABS_Y, 0, 0xffff, 0, 0);
311 }
312
297 return 0; 313 return 0;
298} 314}
299#endif 315#endif
diff --git a/drivers/isdn/sc/shmem.c b/drivers/isdn/sc/shmem.c
index 712220cef139..7f16d75d2d89 100644
--- a/drivers/isdn/sc/shmem.c
+++ b/drivers/isdn/sc/shmem.c
@@ -54,7 +54,7 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n)
54 spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); 54 spin_unlock_irqrestore(&sc_adapter[card]->lock, flags);
55 pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename, 55 pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename,
56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80); 56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
57 pr_debug("%s: copying %d bytes from %#lx to %#lx\n", 57 pr_debug("%s: copying %zu bytes from %#lx to %#lx\n",
58 sc_adapter[card]->devicename, n, 58 sc_adapter[card]->devicename, n,
59 (unsigned long) src, 59 (unsigned long) src,
60 sc_adapter[card]->rambase + ((unsigned long) dest %0x4000)); 60 sc_adapter[card]->rambase + ((unsigned long) dest %0x4000));
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 01e3cffd03b8..e2466425d9ca 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error)
1237 update_head_pos(mirror, r1_bio); 1237 update_head_pos(mirror, r1_bio);
1238 1238
1239 if (atomic_dec_and_test(&r1_bio->remaining)) { 1239 if (atomic_dec_and_test(&r1_bio->remaining)) {
1240 md_done_sync(mddev, r1_bio->sectors, uptodate); 1240 sector_t s = r1_bio->sectors;
1241 put_buf(r1_bio); 1241 put_buf(r1_bio);
1242 md_done_sync(mddev, s, uptodate);
1242 } 1243 }
1243} 1244}
1244 1245
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6736d6dff981..7301631abe04 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error)
1236 /* for reconstruct, we always reschedule after a read. 1236 /* for reconstruct, we always reschedule after a read.
1237 * for resync, only after all reads 1237 * for resync, only after all reads
1238 */ 1238 */
1239 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1239 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1240 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1240 atomic_dec_and_test(&r10_bio->remaining)) { 1241 atomic_dec_and_test(&r10_bio->remaining)) {
1241 /* we have read all the blocks, 1242 /* we have read all the blocks,
@@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error)
1243 */ 1244 */
1244 reschedule_retry(r10_bio); 1245 reschedule_retry(r10_bio);
1245 } 1246 }
1246 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1247} 1247}
1248 1248
1249static void end_sync_write(struct bio *bio, int error) 1249static void end_sync_write(struct bio *bio, int error)
@@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error)
1264 1264
1265 update_head_pos(i, r10_bio); 1265 update_head_pos(i, r10_bio);
1266 1266
1267 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1267 while (atomic_dec_and_test(&r10_bio->remaining)) { 1268 while (atomic_dec_and_test(&r10_bio->remaining)) {
1268 if (r10_bio->master_bio == NULL) { 1269 if (r10_bio->master_bio == NULL) {
1269 /* the primary of several recovery bios */ 1270 /* the primary of several recovery bios */
1270 md_done_sync(mddev, r10_bio->sectors, 1); 1271 sector_t s = r10_bio->sectors;
1271 put_buf(r10_bio); 1272 put_buf(r10_bio);
1273 md_done_sync(mddev, s, 1);
1272 break; 1274 break;
1273 } else { 1275 } else {
1274 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; 1276 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
@@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error)
1276 r10_bio = r10_bio2; 1278 r10_bio = r10_bio2;
1277 } 1279 }
1278 } 1280 }
1279 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1280} 1281}
1281 1282
1282/* 1283/*
@@ -1749,8 +1750,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1749 if (!go_faster && conf->nr_waiting) 1750 if (!go_faster && conf->nr_waiting)
1750 msleep_interruptible(1000); 1751 msleep_interruptible(1000);
1751 1752
1752 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1753
1754 /* Again, very different code for resync and recovery. 1753 /* Again, very different code for resync and recovery.
1755 * Both must result in an r10bio with a list of bios that 1754 * Both must result in an r10bio with a list of bios that
1756 * have bi_end_io, bi_sector, bi_bdev set, 1755 * have bi_end_io, bi_sector, bi_bdev set,
@@ -1886,6 +1885,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1886 /* resync. Schedule a read for every block at this virt offset */ 1885 /* resync. Schedule a read for every block at this virt offset */
1887 int count = 0; 1886 int count = 0;
1888 1887
1888 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1889
1889 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 1890 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1890 &sync_blocks, mddev->degraded) && 1891 &sync_blocks, mddev->degraded) &&
1891 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1892 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
@@ -2010,13 +2011,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
2010 /* There is nowhere to write, so all non-sync 2011 /* There is nowhere to write, so all non-sync
2011 * drives must be failed, so try the next chunk... 2012 * drives must be failed, so try the next chunk...
2012 */ 2013 */
2013 { 2014 if (sector_nr + max_sync < max_sector)
2014 sector_t sec = max_sector - sector_nr; 2015 max_sector = sector_nr + max_sync;
2015 sectors_skipped += sec; 2016
2017 sectors_skipped += (max_sector - sector_nr);
2016 chunks_skipped ++; 2018 chunks_skipped ++;
2017 sector_nr = max_sector; 2019 sector_nr = max_sector;
2018 goto skipped; 2020 goto skipped;
2019 }
2020} 2021}
2021 2022
2022static int run(mddev_t *mddev) 2023static int run(mddev_t *mddev)
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 40ebde53b3ce..b0198691892a 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -51,6 +51,10 @@ comment "Supported SDMC DM1105 Adapters"
51 depends on DVB_CORE && PCI && I2C 51 depends on DVB_CORE && PCI && I2C
52source "drivers/media/dvb/dm1105/Kconfig" 52source "drivers/media/dvb/dm1105/Kconfig"
53 53
54comment "Supported FireWire (IEEE 1394) Adapters"
55 depends on DVB_CORE && IEEE1394
56source "drivers/media/dvb/firewire/Kconfig"
57
54comment "Supported DVB Frontends" 58comment "Supported DVB Frontends"
55 depends on DVB_CORE 59 depends on DVB_CORE
56source "drivers/media/dvb/frontends/Kconfig" 60source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index f91e9eb15e52..6092a5bb5a7d 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/ 5obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/
6
7obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/b2c2/flexcop-hw-filter.c b/drivers/media/dvb/b2c2/flexcop-hw-filter.c
index b386cc66c6b3..451974ba32f3 100644
--- a/drivers/media/dvb/b2c2/flexcop-hw-filter.c
+++ b/drivers/media/dvb/b2c2/flexcop-hw-filter.c
@@ -192,6 +192,7 @@ int flexcop_pid_feed_control(struct flexcop_device *fc, struct dvb_demux_feed *d
192 192
193 return 0; 193 return 0;
194} 194}
195EXPORT_SYMBOL(flexcop_pid_feed_control);
195 196
196void flexcop_hw_filter_init(struct flexcop_device *fc) 197void flexcop_hw_filter_init(struct flexcop_device *fc)
197{ 198{
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 5b30dfc7846b..76e37fd96bb6 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -13,9 +13,9 @@ static int enable_pid_filtering = 1;
13module_param(enable_pid_filtering, int, 0444); 13module_param(enable_pid_filtering, int, 0444);
14MODULE_PARM_DESC(enable_pid_filtering, "enable hardware pid filtering: supported values: 0 (fullts), 1"); 14MODULE_PARM_DESC(enable_pid_filtering, "enable hardware pid filtering: supported values: 0 (fullts), 1");
15 15
16static int irq_chk_intv; 16static int irq_chk_intv = 100;
17module_param(irq_chk_intv, int, 0644); 17module_param(irq_chk_intv, int, 0644);
18MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ watchdog (currently just debugging)."); 18MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
19 19
20#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG 20#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
21#define dprintk(level,args...) \ 21#define dprintk(level,args...) \
@@ -34,7 +34,9 @@ MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ watchdog (currently jus
34 34
35static int debug; 35static int debug;
36module_param(debug, int, 0644); 36module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug, "set debug level (1=info,2=regs,4=TS,8=irqdma (|-able))." DEBSTATUS); 37MODULE_PARM_DESC(debug,
38 "set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
39 DEBSTATUS);
38 40
39#define DRIVER_VERSION "0.1" 41#define DRIVER_VERSION "0.1"
40#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" 42#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver"
@@ -58,6 +60,8 @@ struct flexcop_pci {
58 int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */ 60 int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
59 u32 last_dma1_cur_pos; /* position of the pointer last time the timer/packet irq occured */ 61 u32 last_dma1_cur_pos; /* position of the pointer last time the timer/packet irq occured */
60 int count; 62 int count;
63 int count_prev;
64 int stream_problem;
61 65
62 spinlock_t irq_lock; 66 spinlock_t irq_lock;
63 67
@@ -103,18 +107,32 @@ static void flexcop_pci_irq_check_work(struct work_struct *work)
103 container_of(work, struct flexcop_pci, irq_check_work.work); 107 container_of(work, struct flexcop_pci, irq_check_work.work);
104 struct flexcop_device *fc = fc_pci->fc_dev; 108 struct flexcop_device *fc = fc_pci->fc_dev;
105 109
106 flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); 110 if (fc->feedcount) {
107 111
108 flexcop_dump_reg(fc_pci->fc_dev,dma1_000,4); 112 if (fc_pci->count == fc_pci->count_prev) {
109 113 deb_chk("no IRQ since the last check\n");
110 if (v.sram_dest_reg_714.net_ovflow_error) 114 if (fc_pci->stream_problem++ == 3) {
111 deb_chk("sram net_ovflow_error\n"); 115 struct dvb_demux_feed *feed;
112 if (v.sram_dest_reg_714.media_ovflow_error) 116
113 deb_chk("sram media_ovflow_error\n"); 117 spin_lock_irq(&fc->demux.lock);
114 if (v.sram_dest_reg_714.cai_ovflow_error) 118 list_for_each_entry(feed, &fc->demux.feed_list,
115 deb_chk("sram cai_ovflow_error\n"); 119 list_head) {
116 if (v.sram_dest_reg_714.cai_ovflow_error) 120 flexcop_pid_feed_control(fc, feed, 0);
117 deb_chk("sram cai_ovflow_error\n"); 121 }
122
123 list_for_each_entry(feed, &fc->demux.feed_list,
124 list_head) {
125 flexcop_pid_feed_control(fc, feed, 1);
126 }
127 spin_unlock_irq(&fc->demux.lock);
128
129 fc_pci->stream_problem = 0;
130 }
131 } else {
132 fc_pci->stream_problem = 0;
133 fc_pci->count_prev = fc_pci->count;
134 }
135 }
118 136
119 schedule_delayed_work(&fc_pci->irq_check_work, 137 schedule_delayed_work(&fc_pci->irq_check_work,
120 msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv)); 138 msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
@@ -216,16 +234,12 @@ static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
216 flexcop_dma_control_timer_irq(fc,FC_DMA_1,1); 234 flexcop_dma_control_timer_irq(fc,FC_DMA_1,1);
217 deb_irq("IRQ enabled\n"); 235 deb_irq("IRQ enabled\n");
218 236
237 fc_pci->count_prev = fc_pci->count;
238
219// fc_pci->active_dma1_addr = 0; 239// fc_pci->active_dma1_addr = 0;
220// flexcop_dma_control_size_irq(fc,FC_DMA_1,1); 240// flexcop_dma_control_size_irq(fc,FC_DMA_1,1);
221 241
222 if (irq_chk_intv > 0)
223 schedule_delayed_work(&fc_pci->irq_check_work,
224 msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
225 } else { 242 } else {
226 if (irq_chk_intv > 0)
227 cancel_delayed_work(&fc_pci->irq_check_work);
228
229 flexcop_dma_control_timer_irq(fc,FC_DMA_1,0); 243 flexcop_dma_control_timer_irq(fc,FC_DMA_1,0);
230 deb_irq("IRQ disabled\n"); 244 deb_irq("IRQ disabled\n");
231 245
@@ -299,8 +313,6 @@ static int flexcop_pci_init(struct flexcop_pci *fc_pci)
299 IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0) 313 IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
300 goto err_pci_iounmap; 314 goto err_pci_iounmap;
301 315
302
303
304 fc_pci->init_state |= FC_PCI_INIT; 316 fc_pci->init_state |= FC_PCI_INIT;
305 return ret; 317 return ret;
306 318
@@ -375,6 +387,10 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
375 387
376 INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work); 388 INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
377 389
390 if (irq_chk_intv > 0)
391 schedule_delayed_work(&fc_pci->irq_check_work,
392 msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
393
378 return ret; 394 return ret;
379 395
380err_fc_exit: 396err_fc_exit:
@@ -393,6 +409,9 @@ static void flexcop_pci_remove(struct pci_dev *pdev)
393{ 409{
394 struct flexcop_pci *fc_pci = pci_get_drvdata(pdev); 410 struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
395 411
412 if (irq_chk_intv > 0)
413 cancel_delayed_work(&fc_pci->irq_check_work);
414
396 flexcop_pci_dma_exit(fc_pci); 415 flexcop_pci_dma_exit(fc_pci);
397 flexcop_device_exit(fc_pci->fc_dev); 416 flexcop_device_exit(fc_pci->fc_dev);
398 flexcop_pci_exit(fc_pci); 417 flexcop_pci_exit(fc_pci);
diff --git a/drivers/media/dvb/b2c2/flexcop.c b/drivers/media/dvb/b2c2/flexcop.c
index 676413a915b4..91068952b502 100644
--- a/drivers/media/dvb/b2c2/flexcop.c
+++ b/drivers/media/dvb/b2c2/flexcop.c
@@ -212,8 +212,7 @@ void flexcop_reset_block_300(struct flexcop_device *fc)
212 v210.sw_reset_210.Block_reset_enable = 0xb2; 212 v210.sw_reset_210.Block_reset_enable = 0xb2;
213 213
214 fc->write_ibi_reg(fc,sw_reset_210,v210); 214 fc->write_ibi_reg(fc,sw_reset_210,v210);
215 msleep(1); 215 udelay(1000);
216
217 fc->write_ibi_reg(fc,ctrl_208,v208_save); 216 fc->write_ibi_reg(fc,ctrl_208,v208_save);
218} 217}
219 218
diff --git a/drivers/media/dvb/firewire/Kconfig b/drivers/media/dvb/firewire/Kconfig
new file mode 100644
index 000000000000..69028253e984
--- /dev/null
+++ b/drivers/media/dvb/firewire/Kconfig
@@ -0,0 +1,22 @@
1config DVB_FIREDTV
2 tristate "FireDTV and FloppyDTV"
3 depends on DVB_CORE && IEEE1394
4 help
5 Support for DVB receivers from Digital Everywhere
6 which are connected via IEEE 1394 (FireWire).
7
8 These devices don't have an MPEG decoder built in,
9 so you need an external software decoder to watch TV.
10
11 To compile this driver as a module, say M here:
12 the module will be called firedtv.
13
14if DVB_FIREDTV
15
16config DVB_FIREDTV_IEEE1394
17 def_bool IEEE1394
18
19config DVB_FIREDTV_INPUT
20 def_bool INPUT = y || (INPUT = m && DVB_FIREDTV = m)
21
22endif # DVB_FIREDTV
diff --git a/drivers/media/dvb/firewire/Makefile b/drivers/media/dvb/firewire/Makefile
new file mode 100644
index 000000000000..2034695ba194
--- /dev/null
+++ b/drivers/media/dvb/firewire/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_DVB_FIREDTV) += firedtv.o
2
3firedtv-y := firedtv-avc.o firedtv-ci.o firedtv-dvb.o firedtv-fe.o
4firedtv-$(CONFIG_DVB_FIREDTV_IEEE1394) += firedtv-1394.o
5firedtv-$(CONFIG_DVB_FIREDTV_INPUT) += firedtv-rc.o
6
7ccflags-y += -Idrivers/media/dvb/dvb-core
8ccflags-$(CONFIG_DVB_FIREDTV_IEEE1394) += -Idrivers/ieee1394
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
new file mode 100644
index 000000000000..4e207658c5d9
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -0,0 +1,285 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2007-2008 Ben Backx <ben@bbackx.com>
6 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 */
13
14#include <linux/device.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/list.h>
18#include <linux/spinlock.h>
19#include <linux/types.h>
20
21#include <dma.h>
22#include <csr1212.h>
23#include <highlevel.h>
24#include <hosts.h>
25#include <ieee1394.h>
26#include <iso.h>
27#include <nodemgr.h>
28
29#include "firedtv.h"
30
31static LIST_HEAD(node_list);
32static DEFINE_SPINLOCK(node_list_lock);
33
34#define FIREWIRE_HEADER_SIZE 4
35#define CIP_HEADER_SIZE 8
36
37static void rawiso_activity_cb(struct hpsb_iso *iso)
38{
39 struct firedtv *f, *fdtv = NULL;
40 unsigned int i, num, packet;
41 unsigned char *buf;
42 unsigned long flags;
43 int count;
44
45 spin_lock_irqsave(&node_list_lock, flags);
46 list_for_each_entry(f, &node_list, list)
47 if (f->backend_data == iso) {
48 fdtv = f;
49 break;
50 }
51 spin_unlock_irqrestore(&node_list_lock, flags);
52
53 packet = iso->first_packet;
54 num = hpsb_iso_n_ready(iso);
55
56 if (!fdtv) {
57 dev_err(fdtv->device, "received at unknown iso channel\n");
58 goto out;
59 }
60
61 for (i = 0; i < num; i++, packet = (packet + 1) % iso->buf_packets) {
62 buf = dma_region_i(&iso->data_buf, unsigned char,
63 iso->infos[packet].offset + CIP_HEADER_SIZE);
64 count = (iso->infos[packet].len - CIP_HEADER_SIZE) /
65 (188 + FIREWIRE_HEADER_SIZE);
66
67 /* ignore empty packet */
68 if (iso->infos[packet].len <= CIP_HEADER_SIZE)
69 continue;
70
71 while (count--) {
72 if (buf[FIREWIRE_HEADER_SIZE] == 0x47)
73 dvb_dmx_swfilter_packets(&fdtv->demux,
74 &buf[FIREWIRE_HEADER_SIZE], 1);
75 else
76 dev_err(fdtv->device,
77 "skipping invalid packet\n");
78 buf += 188 + FIREWIRE_HEADER_SIZE;
79 }
80 }
81out:
82 hpsb_iso_recv_release_packets(iso, num);
83}
84
85static inline struct node_entry *node_of(struct firedtv *fdtv)
86{
87 return container_of(fdtv->device, struct unit_directory, device)->ne;
88}
89
90static int node_lock(struct firedtv *fdtv, u64 addr, void *data, __be32 arg)
91{
92 return hpsb_node_lock(node_of(fdtv), addr, EXTCODE_COMPARE_SWAP, data,
93 (__force quadlet_t)arg);
94}
95
96static int node_read(struct firedtv *fdtv, u64 addr, void *data, size_t len)
97{
98 return hpsb_node_read(node_of(fdtv), addr, data, len);
99}
100
101static int node_write(struct firedtv *fdtv, u64 addr, void *data, size_t len)
102{
103 return hpsb_node_write(node_of(fdtv), addr, data, len);
104}
105
106#define FDTV_ISO_BUFFER_PACKETS 256
107#define FDTV_ISO_BUFFER_SIZE (FDTV_ISO_BUFFER_PACKETS * 200)
108
109static int start_iso(struct firedtv *fdtv)
110{
111 struct hpsb_iso *iso_handle;
112 int ret;
113
114 iso_handle = hpsb_iso_recv_init(node_of(fdtv)->host,
115 FDTV_ISO_BUFFER_SIZE, FDTV_ISO_BUFFER_PACKETS,
116 fdtv->isochannel, HPSB_ISO_DMA_DEFAULT,
117 -1, /* stat.config.irq_interval */
118 rawiso_activity_cb);
119 if (iso_handle == NULL) {
120 dev_err(fdtv->device, "cannot initialize iso receive\n");
121 return -ENOMEM;
122 }
123 fdtv->backend_data = iso_handle;
124
125 ret = hpsb_iso_recv_start(iso_handle, -1, -1, 0);
126 if (ret != 0) {
127 dev_err(fdtv->device, "cannot start iso receive\n");
128 hpsb_iso_shutdown(iso_handle);
129 fdtv->backend_data = NULL;
130 }
131 return ret;
132}
133
134static void stop_iso(struct firedtv *fdtv)
135{
136 struct hpsb_iso *iso_handle = fdtv->backend_data;
137
138 if (iso_handle != NULL) {
139 hpsb_iso_stop(iso_handle);
140 hpsb_iso_shutdown(iso_handle);
141 }
142 fdtv->backend_data = NULL;
143}
144
145static const struct firedtv_backend fdtv_1394_backend = {
146 .lock = node_lock,
147 .read = node_read,
148 .write = node_write,
149 .start_iso = start_iso,
150 .stop_iso = stop_iso,
151};
152
153static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
154 int cts, u8 *data, size_t length)
155{
156 struct firedtv *f, *fdtv = NULL;
157 unsigned long flags;
158 int su;
159
160 if (length == 0 || (data[0] & 0xf0) != 0)
161 return;
162
163 su = data[1] & 0x7;
164
165 spin_lock_irqsave(&node_list_lock, flags);
166 list_for_each_entry(f, &node_list, list)
167 if (node_of(f)->host == host &&
168 node_of(f)->nodeid == nodeid &&
169 (f->subunit == su || (f->subunit == 0 && su == 0x7))) {
170 fdtv = f;
171 break;
172 }
173 spin_unlock_irqrestore(&node_list_lock, flags);
174
175 if (fdtv)
176 avc_recv(fdtv, data, length);
177}
178
179static int node_probe(struct device *dev)
180{
181 struct unit_directory *ud =
182 container_of(dev, struct unit_directory, device);
183 struct firedtv *fdtv;
184 int kv_len, err;
185 void *kv_str;
186
187 kv_len = (ud->model_name_kv->value.leaf.len - 2) * sizeof(quadlet_t);
188 kv_str = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(ud->model_name_kv);
189
190 fdtv = fdtv_alloc(dev, &fdtv_1394_backend, kv_str, kv_len);
191 if (!fdtv)
192 return -ENOMEM;
193
194 /*
195 * Work around a bug in udev's path_id script: Use the fw-host's dev
196 * instead of the unit directory's dev as parent of the input device.
197 */
198 err = fdtv_register_rc(fdtv, dev->parent->parent);
199 if (err)
200 goto fail_free;
201
202 spin_lock_irq(&node_list_lock);
203 list_add_tail(&fdtv->list, &node_list);
204 spin_unlock_irq(&node_list_lock);
205
206 err = avc_identify_subunit(fdtv);
207 if (err)
208 goto fail;
209
210 err = fdtv_dvb_register(fdtv);
211 if (err)
212 goto fail;
213
214 avc_register_remote_control(fdtv);
215 return 0;
216fail:
217 spin_lock_irq(&node_list_lock);
218 list_del(&fdtv->list);
219 spin_unlock_irq(&node_list_lock);
220 fdtv_unregister_rc(fdtv);
221fail_free:
222 kfree(fdtv);
223 return err;
224}
225
226static int node_remove(struct device *dev)
227{
228 struct firedtv *fdtv = dev->driver_data;
229
230 fdtv_dvb_unregister(fdtv);
231
232 spin_lock_irq(&node_list_lock);
233 list_del(&fdtv->list);
234 spin_unlock_irq(&node_list_lock);
235
236 cancel_work_sync(&fdtv->remote_ctrl_work);
237 fdtv_unregister_rc(fdtv);
238
239 kfree(fdtv);
240 return 0;
241}
242
243static int node_update(struct unit_directory *ud)
244{
245 struct firedtv *fdtv = ud->device.driver_data;
246
247 if (fdtv->isochannel >= 0)
248 cmp_establish_pp_connection(fdtv, fdtv->subunit,
249 fdtv->isochannel);
250 return 0;
251}
252
253static struct hpsb_protocol_driver fdtv_driver = {
254 .name = "firedtv",
255 .update = node_update,
256 .driver = {
257 .probe = node_probe,
258 .remove = node_remove,
259 },
260};
261
262static struct hpsb_highlevel fdtv_highlevel = {
263 .name = "firedtv",
264 .fcp_request = fcp_request,
265};
266
267int __init fdtv_1394_init(struct ieee1394_device_id id_table[])
268{
269 int ret;
270
271 hpsb_register_highlevel(&fdtv_highlevel);
272 fdtv_driver.id_table = id_table;
273 ret = hpsb_register_protocol(&fdtv_driver);
274 if (ret) {
275 printk(KERN_ERR "firedtv: failed to register protocol\n");
276 hpsb_unregister_highlevel(&fdtv_highlevel);
277 }
278 return ret;
279}
280
281void __exit fdtv_1394_exit(void)
282{
283 hpsb_unregister_protocol(&fdtv_driver);
284 hpsb_unregister_highlevel(&fdtv_highlevel);
285}
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
new file mode 100644
index 000000000000..b55d9ccaf33e
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -0,0 +1,1315 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Ben Backx <ben@bbackx.com>
6 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 */
13
14#include <linux/bug.h>
15#include <linux/crc32.h>
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/jiffies.h>
19#include <linux/kernel.h>
20#include <linux/moduleparam.h>
21#include <linux/mutex.h>
22#include <linux/string.h>
23#include <linux/stringify.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
26
27#include "firedtv.h"
28
29#define FCP_COMMAND_REGISTER 0xfffff0000b00ULL
30
31#define AVC_CTYPE_CONTROL 0x0
32#define AVC_CTYPE_STATUS 0x1
33#define AVC_CTYPE_NOTIFY 0x3
34
35#define AVC_RESPONSE_ACCEPTED 0x9
36#define AVC_RESPONSE_STABLE 0xc
37#define AVC_RESPONSE_CHANGED 0xd
38#define AVC_RESPONSE_INTERIM 0xf
39
40#define AVC_SUBUNIT_TYPE_TUNER (0x05 << 3)
41#define AVC_SUBUNIT_TYPE_UNIT (0x1f << 3)
42
43#define AVC_OPCODE_VENDOR 0x00
44#define AVC_OPCODE_READ_DESCRIPTOR 0x09
45#define AVC_OPCODE_DSIT 0xc8
46#define AVC_OPCODE_DSD 0xcb
47
48#define DESCRIPTOR_TUNER_STATUS 0x80
49#define DESCRIPTOR_SUBUNIT_IDENTIFIER 0x00
50
51#define SFE_VENDOR_DE_COMPANYID_0 0x00 /* OUI of Digital Everywhere */
52#define SFE_VENDOR_DE_COMPANYID_1 0x12
53#define SFE_VENDOR_DE_COMPANYID_2 0x87
54
55#define SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL 0x0a
56#define SFE_VENDOR_OPCODE_LNB_CONTROL 0x52
57#define SFE_VENDOR_OPCODE_TUNE_QPSK 0x58 /* for DVB-S */
58
59#define SFE_VENDOR_OPCODE_GET_FIRMWARE_VERSION 0x00
60#define SFE_VENDOR_OPCODE_HOST2CA 0x56
61#define SFE_VENDOR_OPCODE_CA2HOST 0x57
62#define SFE_VENDOR_OPCODE_CISTATUS 0x59
63#define SFE_VENDOR_OPCODE_TUNE_QPSK2 0x60 /* for DVB-S2 */
64
65#define SFE_VENDOR_TAG_CA_RESET 0x00
66#define SFE_VENDOR_TAG_CA_APPLICATION_INFO 0x01
67#define SFE_VENDOR_TAG_CA_PMT 0x02
68#define SFE_VENDOR_TAG_CA_DATE_TIME 0x04
69#define SFE_VENDOR_TAG_CA_MMI 0x05
70#define SFE_VENDOR_TAG_CA_ENTER_MENU 0x07
71
72#define EN50221_LIST_MANAGEMENT_ONLY 0x03
73#define EN50221_TAG_APP_INFO 0x9f8021
74#define EN50221_TAG_CA_INFO 0x9f8031
75
76struct avc_command_frame {
77 int length;
78 u8 ctype;
79 u8 subunit;
80 u8 opcode;
81 u8 operand[509];
82};
83
84struct avc_response_frame {
85 int length;
86 u8 response;
87 u8 subunit;
88 u8 opcode;
89 u8 operand[509];
90};
91
92#define AVC_DEBUG_FCP_SUBACTIONS 1
93#define AVC_DEBUG_FCP_PAYLOADS 2
94
95static int avc_debug;
96module_param_named(debug, avc_debug, int, 0644);
97MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
98 ", FCP subactions = " __stringify(AVC_DEBUG_FCP_SUBACTIONS)
99 ", FCP payloads = " __stringify(AVC_DEBUG_FCP_PAYLOADS)
100 ", or all = -1)");
101
102static const char *debug_fcp_ctype(unsigned int ctype)
103{
104 static const char *ctypes[] = {
105 [0x0] = "CONTROL", [0x1] = "STATUS",
106 [0x2] = "SPECIFIC INQUIRY", [0x3] = "NOTIFY",
107 [0x4] = "GENERAL INQUIRY", [0x8] = "NOT IMPLEMENTED",
108 [0x9] = "ACCEPTED", [0xa] = "REJECTED",
109 [0xb] = "IN TRANSITION", [0xc] = "IMPLEMENTED/STABLE",
110 [0xd] = "CHANGED", [0xf] = "INTERIM",
111 };
112 const char *ret = ctype < ARRAY_SIZE(ctypes) ? ctypes[ctype] : NULL;
113
114 return ret ? ret : "?";
115}
116
117static const char *debug_fcp_opcode(unsigned int opcode,
118 const u8 *data, size_t length)
119{
120 switch (opcode) {
121 case AVC_OPCODE_VENDOR: break;
122 case AVC_OPCODE_READ_DESCRIPTOR: return "ReadDescriptor";
123 case AVC_OPCODE_DSIT: return "DirectSelectInfo.Type";
124 case AVC_OPCODE_DSD: return "DirectSelectData";
125 default: return "?";
126 }
127
128 if (length < 7 ||
129 data[3] != SFE_VENDOR_DE_COMPANYID_0 ||
130 data[4] != SFE_VENDOR_DE_COMPANYID_1 ||
131 data[5] != SFE_VENDOR_DE_COMPANYID_2)
132 return "Vendor";
133
134 switch (data[6]) {
135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
138 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
139 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
140 }
141 return "Vendor";
142}
143
144static void debug_fcp(const u8 *data, size_t length)
145{
146 unsigned int subunit_type, subunit_id, op;
147 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
148
149 if (avc_debug & AVC_DEBUG_FCP_SUBACTIONS) {
150 subunit_type = data[1] >> 3;
151 subunit_id = data[1] & 7;
152 op = subunit_type == 0x1e || subunit_id == 5 ? ~0 : data[2];
153 printk(KERN_INFO "%ssu=%x.%x l=%d: %-8s - %s\n",
154 prefix, subunit_type, subunit_id, length,
155 debug_fcp_ctype(data[0]),
156 debug_fcp_opcode(op, data, length));
157 }
158
159 if (avc_debug & AVC_DEBUG_FCP_PAYLOADS)
160 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_NONE, 16, 1,
161 data, length, false);
162}
163
164static int __avc_write(struct firedtv *fdtv,
165 const struct avc_command_frame *c, struct avc_response_frame *r)
166{
167 int err, retry;
168
169 if (r)
170 fdtv->avc_reply_received = false;
171
172 for (retry = 0; retry < 6; retry++) {
173 if (unlikely(avc_debug))
174 debug_fcp(&c->ctype, c->length);
175
176 err = fdtv->backend->write(fdtv, FCP_COMMAND_REGISTER,
177 (void *)&c->ctype, c->length);
178 if (err) {
179 fdtv->avc_reply_received = true;
180 dev_err(fdtv->device, "FCP command write failed\n");
181 return err;
182 }
183
184 if (!r)
185 return 0;
186
187 /*
188 * AV/C specs say that answers should be sent within 150 ms.
189 * Time out after 200 ms.
190 */
191 if (wait_event_timeout(fdtv->avc_wait,
192 fdtv->avc_reply_received,
193 msecs_to_jiffies(200)) != 0) {
194 r->length = fdtv->response_length;
195 memcpy(&r->response, fdtv->response, r->length);
196
197 return 0;
198 }
199 }
200 dev_err(fdtv->device, "FCP response timed out\n");
201 return -ETIMEDOUT;
202}
203
204static int avc_write(struct firedtv *fdtv,
205 const struct avc_command_frame *c, struct avc_response_frame *r)
206{
207 int ret;
208
209 if (mutex_lock_interruptible(&fdtv->avc_mutex))
210 return -EINTR;
211
212 ret = __avc_write(fdtv, c, r);
213
214 mutex_unlock(&fdtv->avc_mutex);
215 return ret;
216}
217
218int avc_recv(struct firedtv *fdtv, void *data, size_t length)
219{
220 struct avc_response_frame *r =
221 data - offsetof(struct avc_response_frame, response);
222
223 if (unlikely(avc_debug))
224 debug_fcp(data, length);
225
226 if (length >= 8 &&
227 r->operand[0] == SFE_VENDOR_DE_COMPANYID_0 &&
228 r->operand[1] == SFE_VENDOR_DE_COMPANYID_1 &&
229 r->operand[2] == SFE_VENDOR_DE_COMPANYID_2 &&
230 r->operand[3] == SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL) {
231 if (r->response == AVC_RESPONSE_CHANGED) {
232 fdtv_handle_rc(fdtv,
233 r->operand[4] << 8 | r->operand[5]);
234 schedule_work(&fdtv->remote_ctrl_work);
235 } else if (r->response != AVC_RESPONSE_INTERIM) {
236 dev_info(fdtv->device,
237 "remote control result = %d\n", r->response);
238 }
239 return 0;
240 }
241
242 if (fdtv->avc_reply_received) {
243 dev_err(fdtv->device, "out-of-order AVC response, ignored\n");
244 return -EIO;
245 }
246
247 memcpy(fdtv->response, data, length);
248 fdtv->response_length = length;
249
250 fdtv->avc_reply_received = true;
251 wake_up(&fdtv->avc_wait);
252
253 return 0;
254}
255
256/*
257 * tuning command for setting the relative LNB frequency
258 * (not supported by the AVC standard)
259 */
260static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
261 struct dvb_frontend_parameters *params,
262 struct avc_command_frame *c)
263{
264 c->opcode = AVC_OPCODE_VENDOR;
265
266 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
267 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
268 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
269 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
270
271 c->operand[4] = (params->frequency >> 24) & 0xff;
272 c->operand[5] = (params->frequency >> 16) & 0xff;
273 c->operand[6] = (params->frequency >> 8) & 0xff;
274 c->operand[7] = params->frequency & 0xff;
275
276 c->operand[8] = ((params->u.qpsk.symbol_rate / 1000) >> 8) & 0xff;
277 c->operand[9] = (params->u.qpsk.symbol_rate / 1000) & 0xff;
278
279 switch (params->u.qpsk.fec_inner) {
280 case FEC_1_2: c->operand[10] = 0x1; break;
281 case FEC_2_3: c->operand[10] = 0x2; break;
282 case FEC_3_4: c->operand[10] = 0x3; break;
283 case FEC_5_6: c->operand[10] = 0x4; break;
284 case FEC_7_8: c->operand[10] = 0x5; break;
285 case FEC_4_5:
286 case FEC_8_9:
287 case FEC_AUTO:
288 default: c->operand[10] = 0x0;
289 }
290
291 if (fdtv->voltage == 0xff)
292 c->operand[11] = 0xff;
293 else if (fdtv->voltage == SEC_VOLTAGE_18) /* polarisation */
294 c->operand[11] = 0;
295 else
296 c->operand[11] = 1;
297
298 if (fdtv->tone == 0xff)
299 c->operand[12] = 0xff;
300 else if (fdtv->tone == SEC_TONE_ON) /* band */
301 c->operand[12] = 1;
302 else
303 c->operand[12] = 0;
304
305 if (fdtv->type == FIREDTV_DVB_S2) {
306 c->operand[13] = 0x1;
307 c->operand[14] = 0xff;
308 c->operand[15] = 0xff;
309 c->length = 20;
310 } else {
311 c->length = 16;
312 }
313}
314
315static void avc_tuner_dsd_dvb_c(struct dvb_frontend_parameters *params,
316 struct avc_command_frame *c)
317{
318 c->opcode = AVC_OPCODE_DSD;
319
320 c->operand[0] = 0; /* source plug */
321 c->operand[1] = 0xd2; /* subfunction replace */
322 c->operand[2] = 0x20; /* system id = DVB */
323 c->operand[3] = 0x00; /* antenna number */
324 c->operand[4] = 0x11; /* system_specific_multiplex selection_length */
325
326 /* multiplex_valid_flags, high byte */
327 c->operand[5] = 0 << 7 /* reserved */
328 | 0 << 6 /* Polarisation */
329 | 0 << 5 /* Orbital_Pos */
330 | 1 << 4 /* Frequency */
331 | 1 << 3 /* Symbol_Rate */
332 | 0 << 2 /* FEC_outer */
333 | (params->u.qam.fec_inner != FEC_AUTO ? 1 << 1 : 0)
334 | (params->u.qam.modulation != QAM_AUTO ? 1 << 0 : 0);
335
336 /* multiplex_valid_flags, low byte */
337 c->operand[6] = 0 << 7 /* NetworkID */
338 | 0 << 0 /* reserved */ ;
339
340 c->operand[7] = 0x00;
341 c->operand[8] = 0x00;
342 c->operand[9] = 0x00;
343 c->operand[10] = 0x00;
344
345 c->operand[11] = (((params->frequency / 4000) >> 16) & 0xff) | (2 << 6);
346 c->operand[12] = ((params->frequency / 4000) >> 8) & 0xff;
347 c->operand[13] = (params->frequency / 4000) & 0xff;
348 c->operand[14] = ((params->u.qpsk.symbol_rate / 1000) >> 12) & 0xff;
349 c->operand[15] = ((params->u.qpsk.symbol_rate / 1000) >> 4) & 0xff;
350 c->operand[16] = ((params->u.qpsk.symbol_rate / 1000) << 4) & 0xf0;
351 c->operand[17] = 0x00;
352
353 switch (params->u.qpsk.fec_inner) {
354 case FEC_1_2: c->operand[18] = 0x1; break;
355 case FEC_2_3: c->operand[18] = 0x2; break;
356 case FEC_3_4: c->operand[18] = 0x3; break;
357 case FEC_5_6: c->operand[18] = 0x4; break;
358 case FEC_7_8: c->operand[18] = 0x5; break;
359 case FEC_8_9: c->operand[18] = 0x6; break;
360 case FEC_4_5: c->operand[18] = 0x8; break;
361 case FEC_AUTO:
362 default: c->operand[18] = 0x0;
363 }
364
365 switch (params->u.qam.modulation) {
366 case QAM_16: c->operand[19] = 0x08; break;
367 case QAM_32: c->operand[19] = 0x10; break;
368 case QAM_64: c->operand[19] = 0x18; break;
369 case QAM_128: c->operand[19] = 0x20; break;
370 case QAM_256: c->operand[19] = 0x28; break;
371 case QAM_AUTO:
372 default: c->operand[19] = 0x00;
373 }
374
375 c->operand[20] = 0x00;
376 c->operand[21] = 0x00;
377 /* Nr_of_dsd_sel_specs = 0 -> no PIDs are transmitted */
378 c->operand[22] = 0x00;
379
380 c->length = 28;
381}
382
383static void avc_tuner_dsd_dvb_t(struct dvb_frontend_parameters *params,
384 struct avc_command_frame *c)
385{
386 struct dvb_ofdm_parameters *ofdm = &params->u.ofdm;
387
388 c->opcode = AVC_OPCODE_DSD;
389
390 c->operand[0] = 0; /* source plug */
391 c->operand[1] = 0xd2; /* subfunction replace */
392 c->operand[2] = 0x20; /* system id = DVB */
393 c->operand[3] = 0x00; /* antenna number */
394 c->operand[4] = 0x0c; /* system_specific_multiplex selection_length */
395
396 /* multiplex_valid_flags, high byte */
397 c->operand[5] =
398 0 << 7 /* reserved */
399 | 1 << 6 /* CenterFrequency */
400 | (ofdm->bandwidth != BANDWIDTH_AUTO ? 1 << 5 : 0)
401 | (ofdm->constellation != QAM_AUTO ? 1 << 4 : 0)
402 | (ofdm->hierarchy_information != HIERARCHY_AUTO ? 1 << 3 : 0)
403 | (ofdm->code_rate_HP != FEC_AUTO ? 1 << 2 : 0)
404 | (ofdm->code_rate_LP != FEC_AUTO ? 1 << 1 : 0)
405 | (ofdm->guard_interval != GUARD_INTERVAL_AUTO ? 1 << 0 : 0);
406
407 /* multiplex_valid_flags, low byte */
408 c->operand[6] =
409 0 << 7 /* NetworkID */
410 | (ofdm->transmission_mode != TRANSMISSION_MODE_AUTO ? 1 << 6 : 0)
411 | 0 << 5 /* OtherFrequencyFlag */
412 | 0 << 0 /* reserved */ ;
413
414 c->operand[7] = 0x0;
415 c->operand[8] = (params->frequency / 10) >> 24;
416 c->operand[9] = ((params->frequency / 10) >> 16) & 0xff;
417 c->operand[10] = ((params->frequency / 10) >> 8) & 0xff;
418 c->operand[11] = (params->frequency / 10) & 0xff;
419
420 switch (ofdm->bandwidth) {
421 case BANDWIDTH_7_MHZ: c->operand[12] = 0x20; break;
422 case BANDWIDTH_8_MHZ:
423 case BANDWIDTH_6_MHZ: /* not defined by AVC spec */
424 case BANDWIDTH_AUTO:
425 default: c->operand[12] = 0x00;
426 }
427
428 switch (ofdm->constellation) {
429 case QAM_16: c->operand[13] = 1 << 6; break;
430 case QAM_64: c->operand[13] = 2 << 6; break;
431 case QPSK:
432 default: c->operand[13] = 0x00;
433 }
434
435 switch (ofdm->hierarchy_information) {
436 case HIERARCHY_1: c->operand[13] |= 1 << 3; break;
437 case HIERARCHY_2: c->operand[13] |= 2 << 3; break;
438 case HIERARCHY_4: c->operand[13] |= 3 << 3; break;
439 case HIERARCHY_AUTO:
440 case HIERARCHY_NONE:
441 default: break;
442 }
443
444 switch (ofdm->code_rate_HP) {
445 case FEC_2_3: c->operand[13] |= 1; break;
446 case FEC_3_4: c->operand[13] |= 2; break;
447 case FEC_5_6: c->operand[13] |= 3; break;
448 case FEC_7_8: c->operand[13] |= 4; break;
449 case FEC_1_2:
450 default: break;
451 }
452
453 switch (ofdm->code_rate_LP) {
454 case FEC_2_3: c->operand[14] = 1 << 5; break;
455 case FEC_3_4: c->operand[14] = 2 << 5; break;
456 case FEC_5_6: c->operand[14] = 3 << 5; break;
457 case FEC_7_8: c->operand[14] = 4 << 5; break;
458 case FEC_1_2:
459 default: c->operand[14] = 0x00; break;
460 }
461
462 switch (ofdm->guard_interval) {
463 case GUARD_INTERVAL_1_16: c->operand[14] |= 1 << 3; break;
464 case GUARD_INTERVAL_1_8: c->operand[14] |= 2 << 3; break;
465 case GUARD_INTERVAL_1_4: c->operand[14] |= 3 << 3; break;
466 case GUARD_INTERVAL_1_32:
467 case GUARD_INTERVAL_AUTO:
468 default: break;
469 }
470
471 switch (ofdm->transmission_mode) {
472 case TRANSMISSION_MODE_8K: c->operand[14] |= 1 << 1; break;
473 case TRANSMISSION_MODE_2K:
474 case TRANSMISSION_MODE_AUTO:
475 default: break;
476 }
477
478 c->operand[15] = 0x00; /* network_ID[0] */
479 c->operand[16] = 0x00; /* network_ID[1] */
480 /* Nr_of_dsd_sel_specs = 0 -> no PIDs are transmitted */
481 c->operand[17] = 0x00;
482
483 c->length = 24;
484}
485
486int avc_tuner_dsd(struct firedtv *fdtv,
487 struct dvb_frontend_parameters *params)
488{
489 char buffer[sizeof(struct avc_command_frame)];
490 struct avc_command_frame *c = (void *)buffer;
491 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
492
493 memset(c, 0, sizeof(*c));
494
495 c->ctype = AVC_CTYPE_CONTROL;
496 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
497
498 switch (fdtv->type) {
499 case FIREDTV_DVB_S:
500 case FIREDTV_DVB_S2: avc_tuner_tuneqpsk(fdtv, params, c); break;
501 case FIREDTV_DVB_C: avc_tuner_dsd_dvb_c(params, c); break;
502 case FIREDTV_DVB_T: avc_tuner_dsd_dvb_t(params, c); break;
503 default:
504 BUG();
505 }
506
507 if (avc_write(fdtv, c, r) < 0)
508 return -EIO;
509
510 msleep(500);
511#if 0
512 /* FIXME: */
513 /* u8 *status was an out-parameter of avc_tuner_dsd, unused by caller */
514 if (status)
515 *status = r->operand[2];
516#endif
517 return 0;
518}
519
520int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[])
521{
522 char buffer[sizeof(struct avc_command_frame)];
523 struct avc_command_frame *c = (void *)buffer;
524 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
525 int pos, k;
526
527 if (pidc > 16 && pidc != 0xff)
528 return -EINVAL;
529
530 memset(c, 0, sizeof(*c));
531
532 c->ctype = AVC_CTYPE_CONTROL;
533 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
534 c->opcode = AVC_OPCODE_DSD;
535
536 c->operand[0] = 0; /* source plug */
537 c->operand[1] = 0xd2; /* subfunction replace */
538 c->operand[2] = 0x20; /* system id = DVB */
539 c->operand[3] = 0x00; /* antenna number */
540 c->operand[4] = 0x00; /* system_specific_multiplex selection_length */
541 c->operand[5] = pidc; /* Nr_of_dsd_sel_specs */
542
543 pos = 6;
544 if (pidc != 0xff)
545 for (k = 0; k < pidc; k++) {
546 c->operand[pos++] = 0x13; /* flowfunction relay */
547 c->operand[pos++] = 0x80; /* dsd_sel_spec_valid_flags -> PID */
548 c->operand[pos++] = (pid[k] >> 8) & 0x1f;
549 c->operand[pos++] = pid[k] & 0xff;
550 c->operand[pos++] = 0x00; /* tableID */
551 c->operand[pos++] = 0x00; /* filter_length */
552 }
553
554 c->length = ALIGN(3 + pos, 4);
555
556 if (avc_write(fdtv, c, r) < 0)
557 return -EIO;
558
559 msleep(50);
560 return 0;
561}
562
563int avc_tuner_get_ts(struct firedtv *fdtv)
564{
565 char buffer[sizeof(struct avc_command_frame)];
566 struct avc_command_frame *c = (void *)buffer;
567 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
568 int sl;
569
570 memset(c, 0, sizeof(*c));
571
572 c->ctype = AVC_CTYPE_CONTROL;
573 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
574 c->opcode = AVC_OPCODE_DSIT;
575
576 sl = fdtv->type == FIREDTV_DVB_T ? 0x0c : 0x11;
577
578 c->operand[0] = 0; /* source plug */
579 c->operand[1] = 0xd2; /* subfunction replace */
580 c->operand[2] = 0xff; /* status */
581 c->operand[3] = 0x20; /* system id = DVB */
582 c->operand[4] = 0x00; /* antenna number */
583 c->operand[5] = 0x0; /* system_specific_search_flags */
584 c->operand[6] = sl; /* system_specific_multiplex selection_length */
585 c->operand[7] = 0x00; /* valid_flags [0] */
586 c->operand[8] = 0x00; /* valid_flags [1] */
587 c->operand[7 + sl] = 0x00; /* nr_of_dsit_sel_specs (always 0) */
588
589 c->length = fdtv->type == FIREDTV_DVB_T ? 24 : 28;
590
591 if (avc_write(fdtv, c, r) < 0)
592 return -EIO;
593
594 msleep(250);
595 return 0;
596}
597
598int avc_identify_subunit(struct firedtv *fdtv)
599{
600 char buffer[sizeof(struct avc_command_frame)];
601 struct avc_command_frame *c = (void *)buffer;
602 struct avc_response_frame *r = (void *)buffer;
603
604 memset(c, 0, sizeof(*c));
605
606 c->ctype = AVC_CTYPE_CONTROL;
607 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
608 c->opcode = AVC_OPCODE_READ_DESCRIPTOR;
609
610 c->operand[0] = DESCRIPTOR_SUBUNIT_IDENTIFIER;
611 c->operand[1] = 0xff;
612 c->operand[2] = 0x00;
613 c->operand[3] = 0x00; /* length highbyte */
614 c->operand[4] = 0x08; /* length lowbyte */
615 c->operand[5] = 0x00; /* offset highbyte */
616 c->operand[6] = 0x0d; /* offset lowbyte */
617
618 c->length = 12;
619
620 if (avc_write(fdtv, c, r) < 0)
621 return -EIO;
622
623 if ((r->response != AVC_RESPONSE_STABLE &&
624 r->response != AVC_RESPONSE_ACCEPTED) ||
625 (r->operand[3] << 8) + r->operand[4] != 8) {
626 dev_err(fdtv->device, "cannot read subunit identifier\n");
627 return -EINVAL;
628 }
629 return 0;
630}
631
632#define SIZEOF_ANTENNA_INPUT_INFO 22
633
634int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat)
635{
636 char buffer[sizeof(struct avc_command_frame)];
637 struct avc_command_frame *c = (void *)buffer;
638 struct avc_response_frame *r = (void *)buffer;
639 int length;
640
641 memset(c, 0, sizeof(*c));
642
643 c->ctype = AVC_CTYPE_CONTROL;
644 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
645 c->opcode = AVC_OPCODE_READ_DESCRIPTOR;
646
647 c->operand[0] = DESCRIPTOR_TUNER_STATUS;
648 c->operand[1] = 0xff; /* read_result_status */
649 c->operand[2] = 0x00; /* reserved */
650 c->operand[3] = 0; /* SIZEOF_ANTENNA_INPUT_INFO >> 8; */
651 c->operand[4] = 0; /* SIZEOF_ANTENNA_INPUT_INFO & 0xff; */
652 c->operand[5] = 0x00;
653 c->operand[6] = 0x00;
654
655 c->length = 12;
656
657 if (avc_write(fdtv, c, r) < 0)
658 return -EIO;
659
660 if (r->response != AVC_RESPONSE_STABLE &&
661 r->response != AVC_RESPONSE_ACCEPTED) {
662 dev_err(fdtv->device, "cannot read tuner status\n");
663 return -EINVAL;
664 }
665
666 length = r->operand[9];
667 if (r->operand[1] != 0x10 || length != SIZEOF_ANTENNA_INPUT_INFO) {
668 dev_err(fdtv->device, "got invalid tuner status\n");
669 return -EINVAL;
670 }
671
672 stat->active_system = r->operand[10];
673 stat->searching = r->operand[11] >> 7 & 1;
674 stat->moving = r->operand[11] >> 6 & 1;
675 stat->no_rf = r->operand[11] >> 5 & 1;
676 stat->input = r->operand[12] >> 7 & 1;
677 stat->selected_antenna = r->operand[12] & 0x7f;
678 stat->ber = r->operand[13] << 24 |
679 r->operand[14] << 16 |
680 r->operand[15] << 8 |
681 r->operand[16];
682 stat->signal_strength = r->operand[17];
683 stat->raster_frequency = r->operand[18] >> 6 & 2;
684 stat->rf_frequency = (r->operand[18] & 0x3f) << 16 |
685 r->operand[19] << 8 |
686 r->operand[20];
687 stat->man_dep_info_length = r->operand[21];
688 stat->front_end_error = r->operand[22] >> 4 & 1;
689 stat->antenna_error = r->operand[22] >> 3 & 1;
690 stat->front_end_power_status = r->operand[22] >> 1 & 1;
691 stat->power_supply = r->operand[22] & 1;
692 stat->carrier_noise_ratio = r->operand[23] << 8 |
693 r->operand[24];
694 stat->power_supply_voltage = r->operand[27];
695 stat->antenna_voltage = r->operand[28];
696 stat->firewire_bus_voltage = r->operand[29];
697 stat->ca_mmi = r->operand[30] & 1;
698 stat->ca_pmt_reply = r->operand[31] >> 7 & 1;
699 stat->ca_date_time_request = r->operand[31] >> 6 & 1;
700 stat->ca_application_info = r->operand[31] >> 5 & 1;
701 stat->ca_module_present_status = r->operand[31] >> 4 & 1;
702 stat->ca_dvb_flag = r->operand[31] >> 3 & 1;
703 stat->ca_error_flag = r->operand[31] >> 2 & 1;
704 stat->ca_initialization_status = r->operand[31] >> 1 & 1;
705
706 return 0;
707}
708
709int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
710 char conttone, char nrdiseq,
711 struct dvb_diseqc_master_cmd *diseqcmd)
712{
713 char buffer[sizeof(struct avc_command_frame)];
714 struct avc_command_frame *c = (void *)buffer;
715 struct avc_response_frame *r = (void *)buffer;
716 int i, j, k;
717
718 memset(c, 0, sizeof(*c));
719
720 c->ctype = AVC_CTYPE_CONTROL;
721 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
722 c->opcode = AVC_OPCODE_VENDOR;
723
724 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
725 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
726 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
727 c->operand[3] = SFE_VENDOR_OPCODE_LNB_CONTROL;
728
729 c->operand[4] = voltage;
730 c->operand[5] = nrdiseq;
731
732 i = 6;
733
734 for (j = 0; j < nrdiseq; j++) {
735 c->operand[i++] = diseqcmd[j].msg_len;
736
737 for (k = 0; k < diseqcmd[j].msg_len; k++)
738 c->operand[i++] = diseqcmd[j].msg[k];
739 }
740
741 c->operand[i++] = burst;
742 c->operand[i++] = conttone;
743
744 c->length = ALIGN(3 + i, 4);
745
746 if (avc_write(fdtv, c, r) < 0)
747 return -EIO;
748
749 if (r->response != AVC_RESPONSE_ACCEPTED) {
750 dev_err(fdtv->device, "LNB control failed\n");
751 return -EINVAL;
752 }
753
754 return 0;
755}
756
757int avc_register_remote_control(struct firedtv *fdtv)
758{
759 char buffer[sizeof(struct avc_command_frame)];
760 struct avc_command_frame *c = (void *)buffer;
761
762 memset(c, 0, sizeof(*c));
763
764 c->ctype = AVC_CTYPE_NOTIFY;
765 c->subunit = AVC_SUBUNIT_TYPE_UNIT | 7;
766 c->opcode = AVC_OPCODE_VENDOR;
767
768 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
769 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
770 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
771 c->operand[3] = SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL;
772
773 c->length = 8;
774
775 return avc_write(fdtv, c, NULL);
776}
777
778void avc_remote_ctrl_work(struct work_struct *work)
779{
780 struct firedtv *fdtv =
781 container_of(work, struct firedtv, remote_ctrl_work);
782
783 /* Should it be rescheduled in failure cases? */
784 avc_register_remote_control(fdtv);
785}
786
787#if 0 /* FIXME: unused */
788int avc_tuner_host2ca(struct firedtv *fdtv)
789{
790 char buffer[sizeof(struct avc_command_frame)];
791 struct avc_command_frame *c = (void *)buffer;
792 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
793
794 memset(c, 0, sizeof(*c));
795
796 c->ctype = AVC_CTYPE_CONTROL;
797 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
798 c->opcode = AVC_OPCODE_VENDOR;
799
800 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
801 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
802 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
803 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
804 c->operand[4] = 0; /* slot */
805 c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
806 c->operand[6] = 0; /* more/last */
807 c->operand[7] = 0; /* length */
808
809 c->length = 12;
810
811 if (avc_write(fdtv, c, r) < 0)
812 return -EIO;
813
814 return 0;
815}
816#endif
817
818static int get_ca_object_pos(struct avc_response_frame *r)
819{
820 int length = 1;
821
822 /* Check length of length field */
823 if (r->operand[7] & 0x80)
824 length = (r->operand[7] & 0x7f) + 1;
825 return length + 7;
826}
827
828static int get_ca_object_length(struct avc_response_frame *r)
829{
830#if 0 /* FIXME: unused */
831 int size = 0;
832 int i;
833
834 if (r->operand[7] & 0x80)
835 for (i = 0; i < (r->operand[7] & 0x7f); i++) {
836 size <<= 8;
837 size += r->operand[8 + i];
838 }
839#endif
840 return r->operand[7];
841}
842
843int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
844{
845 char buffer[sizeof(struct avc_command_frame)];
846 struct avc_command_frame *c = (void *)buffer;
847 struct avc_response_frame *r = (void *)buffer;
848 int pos;
849
850 memset(c, 0, sizeof(*c));
851
852 c->ctype = AVC_CTYPE_STATUS;
853 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
854 c->opcode = AVC_OPCODE_VENDOR;
855
856 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
857 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
858 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
859 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
860 c->operand[4] = 0; /* slot */
861 c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
862
863 c->length = 12;
864
865 if (avc_write(fdtv, c, r) < 0)
866 return -EIO;
867
868 /* FIXME: check response code and validate response data */
869
870 pos = get_ca_object_pos(r);
871 app_info[0] = (EN50221_TAG_APP_INFO >> 16) & 0xff;
872 app_info[1] = (EN50221_TAG_APP_INFO >> 8) & 0xff;
873 app_info[2] = (EN50221_TAG_APP_INFO >> 0) & 0xff;
874 app_info[3] = 6 + r->operand[pos + 4];
875 app_info[4] = 0x01;
876 memcpy(&app_info[5], &r->operand[pos], 5 + r->operand[pos + 4]);
877 *len = app_info[3] + 4;
878
879 return 0;
880}
881
882int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
883{
884 char buffer[sizeof(struct avc_command_frame)];
885 struct avc_command_frame *c = (void *)buffer;
886 struct avc_response_frame *r = (void *)buffer;
887 int pos;
888
889 memset(c, 0, sizeof(*c));
890
891 c->ctype = AVC_CTYPE_STATUS;
892 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
893 c->opcode = AVC_OPCODE_VENDOR;
894
895 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
896 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
897 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
898 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
899 c->operand[4] = 0; /* slot */
900 c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
901
902 c->length = 12;
903
904 if (avc_write(fdtv, c, r) < 0)
905 return -EIO;
906
907 pos = get_ca_object_pos(r);
908 app_info[0] = (EN50221_TAG_CA_INFO >> 16) & 0xff;
909 app_info[1] = (EN50221_TAG_CA_INFO >> 8) & 0xff;
910 app_info[2] = (EN50221_TAG_CA_INFO >> 0) & 0xff;
911 app_info[3] = 2;
912 app_info[4] = r->operand[pos + 0];
913 app_info[5] = r->operand[pos + 1];
914 *len = app_info[3] + 4;
915
916 return 0;
917}
918
919int avc_ca_reset(struct firedtv *fdtv)
920{
921 char buffer[sizeof(struct avc_command_frame)];
922 struct avc_command_frame *c = (void *)buffer;
923 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
924
925 memset(c, 0, sizeof(*c));
926
927 c->ctype = AVC_CTYPE_CONTROL;
928 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
929 c->opcode = AVC_OPCODE_VENDOR;
930
931 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
932 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
933 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
934 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
935 c->operand[4] = 0; /* slot */
936 c->operand[5] = SFE_VENDOR_TAG_CA_RESET; /* ca tag */
937 c->operand[6] = 0; /* more/last */
938 c->operand[7] = 1; /* length */
939 c->operand[8] = 0; /* force hardware reset */
940
941 c->length = 12;
942
943 if (avc_write(fdtv, c, r) < 0)
944 return -EIO;
945
946 return 0;
947}
948
949int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
950{
951 char buffer[sizeof(struct avc_command_frame)];
952 struct avc_command_frame *c = (void *)buffer;
953 struct avc_response_frame *r = (void *)buffer;
954 int list_management;
955 int program_info_length;
956 int pmt_cmd_id;
957 int read_pos;
958 int write_pos;
959 int es_info_length;
960 int crc32_csum;
961
962 memset(c, 0, sizeof(*c));
963
964 c->ctype = AVC_CTYPE_CONTROL;
965 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
966 c->opcode = AVC_OPCODE_VENDOR;
967
968 if (msg[0] != EN50221_LIST_MANAGEMENT_ONLY) {
969 dev_info(fdtv->device, "forcing list_management to ONLY\n");
970 msg[0] = EN50221_LIST_MANAGEMENT_ONLY;
971 }
972 /* We take the cmd_id from the programme level only! */
973 list_management = msg[0];
974 program_info_length = ((msg[4] & 0x0f) << 8) + msg[5];
975 if (program_info_length > 0)
976 program_info_length--; /* Remove pmt_cmd_id */
977 pmt_cmd_id = msg[6];
978
979 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
980 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
981 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
982 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
983 c->operand[4] = 0; /* slot */
984 c->operand[5] = SFE_VENDOR_TAG_CA_PMT; /* ca tag */
985 c->operand[6] = 0; /* more/last */
986 /* c->operand[7] = XXXprogram_info_length + 17; */ /* length */
987 c->operand[8] = list_management;
988 c->operand[9] = 0x01; /* pmt_cmd=OK_descramble */
989
990 /* TS program map table */
991
992 c->operand[10] = 0x02; /* Table id=2 */
993 c->operand[11] = 0x80; /* Section syntax + length */
994 /* c->operand[12] = XXXprogram_info_length + 12; */
995 c->operand[13] = msg[1]; /* Program number */
996 c->operand[14] = msg[2];
997 c->operand[15] = 0x01; /* Version number=0 + current/next=1 */
998 c->operand[16] = 0x00; /* Section number=0 */
999 c->operand[17] = 0x00; /* Last section number=0 */
1000 c->operand[18] = 0x1f; /* PCR_PID=1FFF */
1001 c->operand[19] = 0xff;
1002 c->operand[20] = (program_info_length >> 8); /* Program info length */
1003 c->operand[21] = (program_info_length & 0xff);
1004
1005 /* CA descriptors at programme level */
1006 read_pos = 6;
1007 write_pos = 22;
1008 if (program_info_length > 0) {
1009 pmt_cmd_id = msg[read_pos++];
1010 if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
1011 dev_err(fdtv->device,
1012 "invalid pmt_cmd_id %d\n", pmt_cmd_id);
1013
1014 memcpy(&c->operand[write_pos], &msg[read_pos],
1015 program_info_length);
1016 read_pos += program_info_length;
1017 write_pos += program_info_length;
1018 }
1019 while (read_pos < length) {
1020 c->operand[write_pos++] = msg[read_pos++];
1021 c->operand[write_pos++] = msg[read_pos++];
1022 c->operand[write_pos++] = msg[read_pos++];
1023 es_info_length =
1024 ((msg[read_pos] & 0x0f) << 8) + msg[read_pos + 1];
1025 read_pos += 2;
1026 if (es_info_length > 0)
1027 es_info_length--; /* Remove pmt_cmd_id */
1028 c->operand[write_pos++] = es_info_length >> 8;
1029 c->operand[write_pos++] = es_info_length & 0xff;
1030 if (es_info_length > 0) {
1031 pmt_cmd_id = msg[read_pos++];
1032 if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
1033 dev_err(fdtv->device, "invalid pmt_cmd_id %d "
1034 "at stream level\n", pmt_cmd_id);
1035
1036 memcpy(&c->operand[write_pos], &msg[read_pos],
1037 es_info_length);
1038 read_pos += es_info_length;
1039 write_pos += es_info_length;
1040 }
1041 }
1042
1043 /* CRC */
1044 c->operand[write_pos++] = 0x00;
1045 c->operand[write_pos++] = 0x00;
1046 c->operand[write_pos++] = 0x00;
1047 c->operand[write_pos++] = 0x00;
1048
1049 c->operand[7] = write_pos - 8;
1050 c->operand[12] = write_pos - 13;
1051
1052 crc32_csum = crc32_be(0, &c->operand[10], c->operand[12] - 1);
1053 c->operand[write_pos - 4] = (crc32_csum >> 24) & 0xff;
1054 c->operand[write_pos - 3] = (crc32_csum >> 16) & 0xff;
1055 c->operand[write_pos - 2] = (crc32_csum >> 8) & 0xff;
1056 c->operand[write_pos - 1] = (crc32_csum >> 0) & 0xff;
1057
1058 c->length = ALIGN(3 + write_pos, 4);
1059
1060 if (avc_write(fdtv, c, r) < 0)
1061 return -EIO;
1062
1063 if (r->response != AVC_RESPONSE_ACCEPTED) {
1064 dev_err(fdtv->device,
1065 "CA PMT failed with response 0x%x\n", r->response);
1066 return -EFAULT;
1067 }
1068
1069 return 0;
1070}
1071
1072int avc_ca_get_time_date(struct firedtv *fdtv, int *interval)
1073{
1074 char buffer[sizeof(struct avc_command_frame)];
1075 struct avc_command_frame *c = (void *)buffer;
1076 struct avc_response_frame *r = (void *)buffer;
1077
1078 memset(c, 0, sizeof(*c));
1079
1080 c->ctype = AVC_CTYPE_STATUS;
1081 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
1082 c->opcode = AVC_OPCODE_VENDOR;
1083
1084 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
1085 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
1086 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
1087 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
1088 c->operand[4] = 0; /* slot */
1089 c->operand[5] = SFE_VENDOR_TAG_CA_DATE_TIME; /* ca tag */
1090 c->operand[6] = 0; /* more/last */
1091 c->operand[7] = 0; /* length */
1092
1093 c->length = 12;
1094
1095 if (avc_write(fdtv, c, r) < 0)
1096 return -EIO;
1097
1098 /* FIXME: check response code and validate response data */
1099
1100 *interval = r->operand[get_ca_object_pos(r)];
1101
1102 return 0;
1103}
1104
1105int avc_ca_enter_menu(struct firedtv *fdtv)
1106{
1107 char buffer[sizeof(struct avc_command_frame)];
1108 struct avc_command_frame *c = (void *)buffer;
1109 struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
1110
1111 memset(c, 0, sizeof(*c));
1112
1113 c->ctype = AVC_CTYPE_STATUS;
1114 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
1115 c->opcode = AVC_OPCODE_VENDOR;
1116
1117 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
1118 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
1119 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
1120 c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
1121 c->operand[4] = 0; /* slot */
1122 c->operand[5] = SFE_VENDOR_TAG_CA_ENTER_MENU;
1123 c->operand[6] = 0; /* more/last */
1124 c->operand[7] = 0; /* length */
1125
1126 c->length = 12;
1127
1128 if (avc_write(fdtv, c, r) < 0)
1129 return -EIO;
1130
1131 return 0;
1132}
1133
1134int avc_ca_get_mmi(struct firedtv *fdtv, char *mmi_object, unsigned int *len)
1135{
1136 char buffer[sizeof(struct avc_command_frame)];
1137 struct avc_command_frame *c = (void *)buffer;
1138 struct avc_response_frame *r = (void *)buffer;
1139
1140 memset(c, 0, sizeof(*c));
1141
1142 c->ctype = AVC_CTYPE_STATUS;
1143 c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
1144 c->opcode = AVC_OPCODE_VENDOR;
1145
1146 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
1147 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
1148 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
1149 c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
1150 c->operand[4] = 0; /* slot */
1151 c->operand[5] = SFE_VENDOR_TAG_CA_MMI;
1152 c->operand[6] = 0; /* more/last */
1153 c->operand[7] = 0; /* length */
1154
1155 c->length = 12;
1156
1157 if (avc_write(fdtv, c, r) < 0)
1158 return -EIO;
1159
1160 /* FIXME: check response code and validate response data */
1161
1162 *len = get_ca_object_length(r);
1163 memcpy(mmi_object, &r->operand[get_ca_object_pos(r)], *len);
1164
1165 return 0;
1166}
1167
1168#define CMP_OUTPUT_PLUG_CONTROL_REG_0 0xfffff0000904ULL
1169
1170static int cmp_read(struct firedtv *fdtv, void *buf, u64 addr, size_t len)
1171{
1172 int ret;
1173
1174 if (mutex_lock_interruptible(&fdtv->avc_mutex))
1175 return -EINTR;
1176
1177 ret = fdtv->backend->read(fdtv, addr, buf, len);
1178 if (ret < 0)
1179 dev_err(fdtv->device, "CMP: read I/O error\n");
1180
1181 mutex_unlock(&fdtv->avc_mutex);
1182 return ret;
1183}
1184
1185static int cmp_lock(struct firedtv *fdtv, void *data, u64 addr, __be32 arg)
1186{
1187 int ret;
1188
1189 if (mutex_lock_interruptible(&fdtv->avc_mutex))
1190 return -EINTR;
1191
1192 ret = fdtv->backend->lock(fdtv, addr, data, arg);
1193 if (ret < 0)
1194 dev_err(fdtv->device, "CMP: lock I/O error\n");
1195
1196 mutex_unlock(&fdtv->avc_mutex);
1197 return ret;
1198}
1199
1200static inline u32 get_opcr(__be32 opcr, u32 mask, u32 shift)
1201{
1202 return (be32_to_cpu(opcr) >> shift) & mask;
1203}
1204
1205static inline void set_opcr(__be32 *opcr, u32 value, u32 mask, u32 shift)
1206{
1207 *opcr &= ~cpu_to_be32(mask << shift);
1208 *opcr |= cpu_to_be32((value & mask) << shift);
1209}
1210
1211#define get_opcr_online(v) get_opcr((v), 0x1, 31)
1212#define get_opcr_p2p_connections(v) get_opcr((v), 0x3f, 24)
1213#define get_opcr_channel(v) get_opcr((v), 0x3f, 16)
1214
1215#define set_opcr_p2p_connections(p, v) set_opcr((p), (v), 0x3f, 24)
1216#define set_opcr_channel(p, v) set_opcr((p), (v), 0x3f, 16)
1217#define set_opcr_data_rate(p, v) set_opcr((p), (v), 0x3, 14)
1218#define set_opcr_overhead_id(p, v) set_opcr((p), (v), 0xf, 10)
1219
1220int cmp_establish_pp_connection(struct firedtv *fdtv, int plug, int channel)
1221{
1222 __be32 old_opcr, opcr;
1223 u64 opcr_address = CMP_OUTPUT_PLUG_CONTROL_REG_0 + (plug << 2);
1224 int attempts = 0;
1225 int ret;
1226
1227 ret = cmp_read(fdtv, &opcr, opcr_address, 4);
1228 if (ret < 0)
1229 return ret;
1230
1231repeat:
1232 if (!get_opcr_online(opcr)) {
1233 dev_err(fdtv->device, "CMP: output offline\n");
1234 return -EBUSY;
1235 }
1236
1237 old_opcr = opcr;
1238
1239 if (get_opcr_p2p_connections(opcr)) {
1240 if (get_opcr_channel(opcr) != channel) {
1241 dev_err(fdtv->device, "CMP: cannot change channel\n");
1242 return -EBUSY;
1243 }
1244 dev_info(fdtv->device, "CMP: overlaying connection\n");
1245
1246 /* We don't allocate isochronous resources. */
1247 } else {
1248 set_opcr_channel(&opcr, channel);
1249 set_opcr_data_rate(&opcr, 2); /* S400 */
1250
1251 /* FIXME: this is for the worst case - optimize */
1252 set_opcr_overhead_id(&opcr, 0);
1253
1254 /*
1255 * FIXME: allocate isochronous channel and bandwidth at IRM
1256 * fdtv->backend->alloc_resources(fdtv, channels_mask, bw);
1257 */
1258 }
1259
1260 set_opcr_p2p_connections(&opcr, get_opcr_p2p_connections(opcr) + 1);
1261
1262 ret = cmp_lock(fdtv, &opcr, opcr_address, old_opcr);
1263 if (ret < 0)
1264 return ret;
1265
1266 if (old_opcr != opcr) {
1267 /*
1268 * FIXME: if old_opcr.P2P_Connections > 0,
1269 * deallocate isochronous channel and bandwidth at IRM
1270 * if (...)
1271 * fdtv->backend->dealloc_resources(fdtv, channel, bw);
1272 */
1273
1274 if (++attempts < 6) /* arbitrary limit */
1275 goto repeat;
1276 return -EBUSY;
1277 }
1278
1279 return 0;
1280}
1281
1282void cmp_break_pp_connection(struct firedtv *fdtv, int plug, int channel)
1283{
1284 __be32 old_opcr, opcr;
1285 u64 opcr_address = CMP_OUTPUT_PLUG_CONTROL_REG_0 + (plug << 2);
1286 int attempts = 0;
1287
1288 if (cmp_read(fdtv, &opcr, opcr_address, 4) < 0)
1289 return;
1290
1291repeat:
1292 if (!get_opcr_online(opcr) || !get_opcr_p2p_connections(opcr) ||
1293 get_opcr_channel(opcr) != channel) {
1294 dev_err(fdtv->device, "CMP: no connection to break\n");
1295 return;
1296 }
1297
1298 old_opcr = opcr;
1299 set_opcr_p2p_connections(&opcr, get_opcr_p2p_connections(opcr) - 1);
1300
1301 if (cmp_lock(fdtv, &opcr, opcr_address, old_opcr) < 0)
1302 return;
1303
1304 if (old_opcr != opcr) {
1305 /*
1306 * FIXME: if old_opcr.P2P_Connections == 1, i.e. we were last
1307 * owner, deallocate isochronous channel and bandwidth at IRM
1308 * if (...)
1309 * fdtv->backend->dealloc_resources(fdtv, channel, bw);
1310 */
1311
1312 if (++attempts < 6) /* arbitrary limit */
1313 goto repeat;
1314 }
1315}
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
new file mode 100644
index 000000000000..eeb80d0ea3ff
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -0,0 +1,260 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/dvb/ca.h>
15#include <linux/fs.h>
16#include <linux/module.h>
17
18#include <dvbdev.h>
19
20#include "firedtv.h"
21
22#define EN50221_TAG_APP_INFO_ENQUIRY 0x9f8020
23#define EN50221_TAG_CA_INFO_ENQUIRY 0x9f8030
24#define EN50221_TAG_CA_PMT 0x9f8032
25#define EN50221_TAG_ENTER_MENU 0x9f8022
26
27static int fdtv_ca_ready(struct firedtv_tuner_status *stat)
28{
29 return stat->ca_initialization_status == 1 &&
30 stat->ca_error_flag == 0 &&
31 stat->ca_dvb_flag == 1 &&
32 stat->ca_module_present_status == 1;
33}
34
35static int fdtv_get_ca_flags(struct firedtv_tuner_status *stat)
36{
37 int flags = 0;
38
39 if (stat->ca_module_present_status == 1)
40 flags |= CA_CI_MODULE_PRESENT;
41 if (stat->ca_initialization_status == 1 &&
42 stat->ca_error_flag == 0 &&
43 stat->ca_dvb_flag == 1)
44 flags |= CA_CI_MODULE_READY;
45 return flags;
46}
47
48static int fdtv_ca_reset(struct firedtv *fdtv)
49{
50 return avc_ca_reset(fdtv) ? -EFAULT : 0;
51}
52
53static int fdtv_ca_get_caps(void *arg)
54{
55 struct ca_caps *cap = arg;
56
57 cap->slot_num = 1;
58 cap->slot_type = CA_CI;
59 cap->descr_num = 1;
60 cap->descr_type = CA_ECD;
61 return 0;
62}
63
64static int fdtv_ca_get_slot_info(struct firedtv *fdtv, void *arg)
65{
66 struct firedtv_tuner_status stat;
67 struct ca_slot_info *slot = arg;
68
69 if (avc_tuner_status(fdtv, &stat))
70 return -EFAULT;
71
72 if (slot->num != 0)
73 return -EFAULT;
74
75 slot->type = CA_CI;
76 slot->flags = fdtv_get_ca_flags(&stat);
77 return 0;
78}
79
80static int fdtv_ca_app_info(struct firedtv *fdtv, void *arg)
81{
82 struct ca_msg *reply = arg;
83
84 return avc_ca_app_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
85}
86
87static int fdtv_ca_info(struct firedtv *fdtv, void *arg)
88{
89 struct ca_msg *reply = arg;
90
91 return avc_ca_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
92}
93
94static int fdtv_ca_get_mmi(struct firedtv *fdtv, void *arg)
95{
96 struct ca_msg *reply = arg;
97
98 return avc_ca_get_mmi(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
99}
100
101static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
102{
103 struct firedtv_tuner_status stat;
104 int err;
105
106 switch (fdtv->ca_last_command) {
107 case EN50221_TAG_APP_INFO_ENQUIRY:
108 err = fdtv_ca_app_info(fdtv, arg);
109 break;
110 case EN50221_TAG_CA_INFO_ENQUIRY:
111 err = fdtv_ca_info(fdtv, arg);
112 break;
113 default:
114 if (avc_tuner_status(fdtv, &stat))
115 err = -EFAULT;
116 else if (stat.ca_mmi == 1)
117 err = fdtv_ca_get_mmi(fdtv, arg);
118 else {
119 dev_info(fdtv->device, "unhandled CA message 0x%08x\n",
120 fdtv->ca_last_command);
121 err = -EFAULT;
122 }
123 }
124 fdtv->ca_last_command = 0;
125 return err;
126}
127
128static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
129{
130 struct ca_msg *msg = arg;
131 int data_pos;
132 int data_length;
133 int i;
134
135 data_pos = 4;
136 if (msg->msg[3] & 0x80) {
137 data_length = 0;
138 for (i = 0; i < (msg->msg[3] & 0x7f); i++)
139 data_length = (data_length << 8) + msg->msg[data_pos++];
140 } else {
141 data_length = msg->msg[3];
142 }
143
144 return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length) ? -EFAULT : 0;
145}
146
147static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
148{
149 struct ca_msg *msg = arg;
150 int err;
151
152 /* Do we need a semaphore for this? */
153 fdtv->ca_last_command =
154 (msg->msg[0] << 16) + (msg->msg[1] << 8) + msg->msg[2];
155 switch (fdtv->ca_last_command) {
156 case EN50221_TAG_CA_PMT:
157 err = fdtv_ca_pmt(fdtv, arg);
158 break;
159 case EN50221_TAG_APP_INFO_ENQUIRY:
160 /* handled in ca_get_msg */
161 err = 0;
162 break;
163 case EN50221_TAG_CA_INFO_ENQUIRY:
164 /* handled in ca_get_msg */
165 err = 0;
166 break;
167 case EN50221_TAG_ENTER_MENU:
168 err = avc_ca_enter_menu(fdtv);
169 break;
170 default:
171 dev_err(fdtv->device, "unhandled CA message 0x%08x\n",
172 fdtv->ca_last_command);
173 err = -EFAULT;
174 }
175 return err;
176}
177
178static int fdtv_ca_ioctl(struct inode *inode, struct file *file,
179 unsigned int cmd, void *arg)
180{
181 struct dvb_device *dvbdev = file->private_data;
182 struct firedtv *fdtv = dvbdev->priv;
183 struct firedtv_tuner_status stat;
184 int err;
185
186 switch (cmd) {
187 case CA_RESET:
188 err = fdtv_ca_reset(fdtv);
189 break;
190 case CA_GET_CAP:
191 err = fdtv_ca_get_caps(arg);
192 break;
193 case CA_GET_SLOT_INFO:
194 err = fdtv_ca_get_slot_info(fdtv, arg);
195 break;
196 case CA_GET_MSG:
197 err = fdtv_ca_get_msg(fdtv, arg);
198 break;
199 case CA_SEND_MSG:
200 err = fdtv_ca_send_msg(fdtv, arg);
201 break;
202 default:
203 dev_info(fdtv->device, "unhandled CA ioctl %u\n", cmd);
204 err = -EOPNOTSUPP;
205 }
206
207 /* FIXME Is this necessary? */
208 avc_tuner_status(fdtv, &stat);
209
210 return err;
211}
212
213static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
214{
215 return POLLIN;
216}
217
218static struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open,
222 .release = dvb_generic_release,
223 .poll = fdtv_ca_io_poll,
224};
225
226static struct dvb_device fdtv_ca = {
227 .users = 1,
228 .readers = 1,
229 .writers = 1,
230 .fops = &fdtv_ca_fops,
231 .kernel_ioctl = fdtv_ca_ioctl,
232};
233
234int fdtv_ca_register(struct firedtv *fdtv)
235{
236 struct firedtv_tuner_status stat;
237 int err;
238
239 if (avc_tuner_status(fdtv, &stat))
240 return -EINVAL;
241
242 if (!fdtv_ca_ready(&stat))
243 return -EFAULT;
244
245 err = dvb_register_device(&fdtv->adapter, &fdtv->cadev,
246 &fdtv_ca, fdtv, DVB_DEVICE_CA);
247
248 if (stat.ca_application_info == 0)
249 dev_err(fdtv->device, "CaApplicationInfo is not set\n");
250 if (stat.ca_date_time_request == 1)
251 avc_ca_get_time_date(fdtv, &fdtv->ca_time_interval);
252
253 return err;
254}
255
256void fdtv_ca_release(struct firedtv *fdtv)
257{
258 if (fdtv->cadev)
259 dvb_unregister_device(fdtv->cadev);
260}
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
new file mode 100644
index 000000000000..9d308dd32a5c
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -0,0 +1,364 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/device.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/mod_devicetable.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/wait.h>
24#include <linux/workqueue.h>
25
26#include <dmxdev.h>
27#include <dvb_demux.h>
28#include <dvbdev.h>
29#include <dvb_frontend.h>
30
31#include "firedtv.h"
32
33static int alloc_channel(struct firedtv *fdtv)
34{
35 int i;
36
37 for (i = 0; i < 16; i++)
38 if (!__test_and_set_bit(i, &fdtv->channel_active))
39 break;
40 return i;
41}
42
43static void collect_channels(struct firedtv *fdtv, int *pidc, u16 pid[])
44{
45 int i, n;
46
47 for (i = 0, n = 0; i < 16; i++)
48 if (test_bit(i, &fdtv->channel_active))
49 pid[n++] = fdtv->channel_pid[i];
50 *pidc = n;
51}
52
53static inline void dealloc_channel(struct firedtv *fdtv, int i)
54{
55 __clear_bit(i, &fdtv->channel_active);
56}
57
58int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed)
59{
60 struct firedtv *fdtv = dvbdmxfeed->demux->priv;
61 int pidc, c, ret;
62 u16 pids[16];
63
64 switch (dvbdmxfeed->type) {
65 case DMX_TYPE_TS:
66 case DMX_TYPE_SEC:
67 break;
68 default:
69 dev_err(fdtv->device, "can't start dmx feed: invalid type %u\n",
70 dvbdmxfeed->type);
71 return -EINVAL;
72 }
73
74 if (mutex_lock_interruptible(&fdtv->demux_mutex))
75 return -EINTR;
76
77 if (dvbdmxfeed->type == DMX_TYPE_TS) {
78 switch (dvbdmxfeed->pes_type) {
79 case DMX_TS_PES_VIDEO:
80 case DMX_TS_PES_AUDIO:
81 case DMX_TS_PES_TELETEXT:
82 case DMX_TS_PES_PCR:
83 case DMX_TS_PES_OTHER:
84 c = alloc_channel(fdtv);
85 break;
86 default:
87 dev_err(fdtv->device,
88 "can't start dmx feed: invalid pes type %u\n",
89 dvbdmxfeed->pes_type);
90 ret = -EINVAL;
91 goto out;
92 }
93 } else {
94 c = alloc_channel(fdtv);
95 }
96
97 if (c > 15) {
98 dev_err(fdtv->device, "can't start dmx feed: busy\n");
99 ret = -EBUSY;
100 goto out;
101 }
102
103 dvbdmxfeed->priv = (typeof(dvbdmxfeed->priv))(unsigned long)c;
104 fdtv->channel_pid[c] = dvbdmxfeed->pid;
105 collect_channels(fdtv, &pidc, pids);
106
107 if (dvbdmxfeed->pid == 8192) {
108 ret = avc_tuner_get_ts(fdtv);
109 if (ret) {
110 dealloc_channel(fdtv, c);
111 dev_err(fdtv->device, "can't get TS\n");
112 goto out;
113 }
114 } else {
115 ret = avc_tuner_set_pids(fdtv, pidc, pids);
116 if (ret) {
117 dealloc_channel(fdtv, c);
118 dev_err(fdtv->device, "can't set PIDs\n");
119 goto out;
120 }
121 }
122out:
123 mutex_unlock(&fdtv->demux_mutex);
124
125 return ret;
126}
127
128int fdtv_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
129{
130 struct dvb_demux *demux = dvbdmxfeed->demux;
131 struct firedtv *fdtv = demux->priv;
132 int pidc, c, ret;
133 u16 pids[16];
134
135 if (dvbdmxfeed->type == DMX_TYPE_TS &&
136 !((dvbdmxfeed->ts_type & TS_PACKET) &&
137 (demux->dmx.frontend->source != DMX_MEMORY_FE))) {
138
139 if (dvbdmxfeed->ts_type & TS_DECODER) {
140 if (dvbdmxfeed->pes_type >= DMX_TS_PES_OTHER ||
141 !demux->pesfilter[dvbdmxfeed->pes_type])
142 return -EINVAL;
143
144 demux->pids[dvbdmxfeed->pes_type] |= 0x8000;
145 demux->pesfilter[dvbdmxfeed->pes_type] = NULL;
146 }
147
148 if (!(dvbdmxfeed->ts_type & TS_DECODER &&
149 dvbdmxfeed->pes_type < DMX_TS_PES_OTHER))
150 return 0;
151 }
152
153 if (mutex_lock_interruptible(&fdtv->demux_mutex))
154 return -EINTR;
155
156 c = (unsigned long)dvbdmxfeed->priv;
157 dealloc_channel(fdtv, c);
158 collect_channels(fdtv, &pidc, pids);
159
160 ret = avc_tuner_set_pids(fdtv, pidc, pids);
161
162 mutex_unlock(&fdtv->demux_mutex);
163
164 return ret;
165}
166
167DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
168
169int fdtv_dvb_register(struct firedtv *fdtv)
170{
171 int err;
172
173 err = dvb_register_adapter(&fdtv->adapter, fdtv_model_names[fdtv->type],
174 THIS_MODULE, fdtv->device, adapter_nr);
175 if (err < 0)
176 goto fail_log;
177
178 /*DMX_TS_FILTERING | DMX_SECTION_FILTERING*/
179 fdtv->demux.dmx.capabilities = 0;
180
181 fdtv->demux.priv = fdtv;
182 fdtv->demux.filternum = 16;
183 fdtv->demux.feednum = 16;
184 fdtv->demux.start_feed = fdtv_start_feed;
185 fdtv->demux.stop_feed = fdtv_stop_feed;
186 fdtv->demux.write_to_decoder = NULL;
187
188 err = dvb_dmx_init(&fdtv->demux);
189 if (err)
190 goto fail_unreg_adapter;
191
192 fdtv->dmxdev.filternum = 16;
193 fdtv->dmxdev.demux = &fdtv->demux.dmx;
194 fdtv->dmxdev.capabilities = 0;
195
196 err = dvb_dmxdev_init(&fdtv->dmxdev, &fdtv->adapter);
197 if (err)
198 goto fail_dmx_release;
199
200 fdtv->frontend.source = DMX_FRONTEND_0;
201
202 err = fdtv->demux.dmx.add_frontend(&fdtv->demux.dmx, &fdtv->frontend);
203 if (err)
204 goto fail_dmxdev_release;
205
206 err = fdtv->demux.dmx.connect_frontend(&fdtv->demux.dmx,
207 &fdtv->frontend);
208 if (err)
209 goto fail_rem_frontend;
210
211 dvb_net_init(&fdtv->adapter, &fdtv->dvbnet, &fdtv->demux.dmx);
212
213 fdtv_frontend_init(fdtv);
214 err = dvb_register_frontend(&fdtv->adapter, &fdtv->fe);
215 if (err)
216 goto fail_net_release;
217
218 err = fdtv_ca_register(fdtv);
219 if (err)
220 dev_info(fdtv->device,
221 "Conditional Access Module not enabled\n");
222 return 0;
223
224fail_net_release:
225 dvb_net_release(&fdtv->dvbnet);
226 fdtv->demux.dmx.close(&fdtv->demux.dmx);
227fail_rem_frontend:
228 fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
229fail_dmxdev_release:
230 dvb_dmxdev_release(&fdtv->dmxdev);
231fail_dmx_release:
232 dvb_dmx_release(&fdtv->demux);
233fail_unreg_adapter:
234 dvb_unregister_adapter(&fdtv->adapter);
235fail_log:
236 dev_err(fdtv->device, "DVB initialization failed\n");
237 return err;
238}
239
240void fdtv_dvb_unregister(struct firedtv *fdtv)
241{
242 fdtv_ca_release(fdtv);
243 dvb_unregister_frontend(&fdtv->fe);
244 dvb_net_release(&fdtv->dvbnet);
245 fdtv->demux.dmx.close(&fdtv->demux.dmx);
246 fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
247 dvb_dmxdev_release(&fdtv->dmxdev);
248 dvb_dmx_release(&fdtv->demux);
249 dvb_unregister_adapter(&fdtv->adapter);
250}
251
252const char *fdtv_model_names[] = {
253 [FIREDTV_UNKNOWN] = "unknown type",
254 [FIREDTV_DVB_S] = "FireDTV S/CI",
255 [FIREDTV_DVB_C] = "FireDTV C/CI",
256 [FIREDTV_DVB_T] = "FireDTV T/CI",
257 [FIREDTV_DVB_S2] = "FireDTV S2 ",
258};
259
260struct firedtv *fdtv_alloc(struct device *dev,
261 const struct firedtv_backend *backend,
262 const char *name, size_t name_len)
263{
264 struct firedtv *fdtv;
265 int i;
266
267 fdtv = kzalloc(sizeof(*fdtv), GFP_KERNEL);
268 if (!fdtv)
269 return NULL;
270
271 dev->driver_data = fdtv;
272 fdtv->device = dev;
273 fdtv->isochannel = -1;
274 fdtv->voltage = 0xff;
275 fdtv->tone = 0xff;
276 fdtv->backend = backend;
277
278 mutex_init(&fdtv->avc_mutex);
279 init_waitqueue_head(&fdtv->avc_wait);
280 fdtv->avc_reply_received = true;
281 mutex_init(&fdtv->demux_mutex);
282 INIT_WORK(&fdtv->remote_ctrl_work, avc_remote_ctrl_work);
283
284 for (i = ARRAY_SIZE(fdtv_model_names); --i; )
285 if (strlen(fdtv_model_names[i]) <= name_len &&
286 strncmp(name, fdtv_model_names[i], name_len) == 0)
287 break;
288 fdtv->type = i;
289
290 return fdtv;
291}
292
293#define MATCH_FLAGS (IEEE1394_MATCH_VENDOR_ID | IEEE1394_MATCH_MODEL_ID | \
294 IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION)
295
296#define DIGITAL_EVERYWHERE_OUI 0x001287
297#define AVC_UNIT_SPEC_ID_ENTRY 0x00a02d
298#define AVC_SW_VERSION_ENTRY 0x010001
299
300static struct ieee1394_device_id fdtv_id_table[] = {
301 {
302 /* FloppyDTV S/CI and FloppyDTV S2 */
303 .match_flags = MATCH_FLAGS,
304 .vendor_id = DIGITAL_EVERYWHERE_OUI,
305 .model_id = 0x000024,
306 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
307 .version = AVC_SW_VERSION_ENTRY,
308 }, {
309 /* FloppyDTV T/CI */
310 .match_flags = MATCH_FLAGS,
311 .vendor_id = DIGITAL_EVERYWHERE_OUI,
312 .model_id = 0x000025,
313 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
314 .version = AVC_SW_VERSION_ENTRY,
315 }, {
316 /* FloppyDTV C/CI */
317 .match_flags = MATCH_FLAGS,
318 .vendor_id = DIGITAL_EVERYWHERE_OUI,
319 .model_id = 0x000026,
320 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
321 .version = AVC_SW_VERSION_ENTRY,
322 }, {
323 /* FireDTV S/CI and FloppyDTV S2 */
324 .match_flags = MATCH_FLAGS,
325 .vendor_id = DIGITAL_EVERYWHERE_OUI,
326 .model_id = 0x000034,
327 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
328 .version = AVC_SW_VERSION_ENTRY,
329 }, {
330 /* FireDTV T/CI */
331 .match_flags = MATCH_FLAGS,
332 .vendor_id = DIGITAL_EVERYWHERE_OUI,
333 .model_id = 0x000035,
334 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
335 .version = AVC_SW_VERSION_ENTRY,
336 }, {
337 /* FireDTV C/CI */
338 .match_flags = MATCH_FLAGS,
339 .vendor_id = DIGITAL_EVERYWHERE_OUI,
340 .model_id = 0x000036,
341 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY,
342 .version = AVC_SW_VERSION_ENTRY,
343 }, {}
344};
345MODULE_DEVICE_TABLE(ieee1394, fdtv_id_table);
346
347static int __init fdtv_init(void)
348{
349 return fdtv_1394_init(fdtv_id_table);
350}
351
352static void __exit fdtv_exit(void)
353{
354 fdtv_1394_exit();
355}
356
357module_init(fdtv_init);
358module_exit(fdtv_exit);
359
360MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>");
361MODULE_AUTHOR("Ben Backx <ben@bbackx.com>");
362MODULE_DESCRIPTION("FireDTV DVB Driver");
363MODULE_LICENSE("GPL");
364MODULE_SUPPORTED_DEVICE("FireDTV DVB");
diff --git a/drivers/media/dvb/firewire/firedtv-fe.c b/drivers/media/dvb/firewire/firedtv-fe.c
new file mode 100644
index 000000000000..7ba43630a25d
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-fe.c
@@ -0,0 +1,247 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/types.h>
18
19#include <dvb_frontend.h>
20
21#include "firedtv.h"
22
23static int fdtv_dvb_init(struct dvb_frontend *fe)
24{
25 struct firedtv *fdtv = fe->sec_priv;
26 int err;
27
28 /* FIXME - allocate free channel at IRM */
29 fdtv->isochannel = fdtv->adapter.num;
30
31 err = cmp_establish_pp_connection(fdtv, fdtv->subunit,
32 fdtv->isochannel);
33 if (err) {
34 dev_err(fdtv->device,
35 "could not establish point to point connection\n");
36 return err;
37 }
38
39 return fdtv->backend->start_iso(fdtv);
40}
41
42static int fdtv_sleep(struct dvb_frontend *fe)
43{
44 struct firedtv *fdtv = fe->sec_priv;
45
46 fdtv->backend->stop_iso(fdtv);
47 cmp_break_pp_connection(fdtv, fdtv->subunit, fdtv->isochannel);
48 fdtv->isochannel = -1;
49 return 0;
50}
51
52#define LNBCONTROL_DONTCARE 0xff
53
54static int fdtv_diseqc_send_master_cmd(struct dvb_frontend *fe,
55 struct dvb_diseqc_master_cmd *cmd)
56{
57 struct firedtv *fdtv = fe->sec_priv;
58
59 return avc_lnb_control(fdtv, LNBCONTROL_DONTCARE, LNBCONTROL_DONTCARE,
60 LNBCONTROL_DONTCARE, 1, cmd);
61}
62
63static int fdtv_diseqc_send_burst(struct dvb_frontend *fe,
64 fe_sec_mini_cmd_t minicmd)
65{
66 return 0;
67}
68
69static int fdtv_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
70{
71 struct firedtv *fdtv = fe->sec_priv;
72
73 fdtv->tone = tone;
74 return 0;
75}
76
77static int fdtv_set_voltage(struct dvb_frontend *fe,
78 fe_sec_voltage_t voltage)
79{
80 struct firedtv *fdtv = fe->sec_priv;
81
82 fdtv->voltage = voltage;
83 return 0;
84}
85
86static int fdtv_read_status(struct dvb_frontend *fe, fe_status_t *status)
87{
88 struct firedtv *fdtv = fe->sec_priv;
89 struct firedtv_tuner_status stat;
90
91 if (avc_tuner_status(fdtv, &stat))
92 return -EINVAL;
93
94 if (stat.no_rf)
95 *status = 0;
96 else
97 *status = FE_HAS_SIGNAL | FE_HAS_VITERBI | FE_HAS_SYNC |
98 FE_HAS_CARRIER | FE_HAS_LOCK;
99 return 0;
100}
101
102static int fdtv_read_ber(struct dvb_frontend *fe, u32 *ber)
103{
104 struct firedtv *fdtv = fe->sec_priv;
105 struct firedtv_tuner_status stat;
106
107 if (avc_tuner_status(fdtv, &stat))
108 return -EINVAL;
109
110 *ber = stat.ber;
111 return 0;
112}
113
114static int fdtv_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
115{
116 struct firedtv *fdtv = fe->sec_priv;
117 struct firedtv_tuner_status stat;
118
119 if (avc_tuner_status(fdtv, &stat))
120 return -EINVAL;
121
122 *strength = stat.signal_strength << 8;
123 return 0;
124}
125
126static int fdtv_read_snr(struct dvb_frontend *fe, u16 *snr)
127{
128 struct firedtv *fdtv = fe->sec_priv;
129 struct firedtv_tuner_status stat;
130
131 if (avc_tuner_status(fdtv, &stat))
132 return -EINVAL;
133
134 /* C/N[dB] = -10 * log10(snr / 65535) */
135 *snr = stat.carrier_noise_ratio * 257;
136 return 0;
137}
138
139static int fdtv_read_uncorrected_blocks(struct dvb_frontend *fe, u32 *ucblocks)
140{
141 return -EOPNOTSUPP;
142}
143
144#define ACCEPTED 0x9
145
146static int fdtv_set_frontend(struct dvb_frontend *fe,
147 struct dvb_frontend_parameters *params)
148{
149 struct firedtv *fdtv = fe->sec_priv;
150
151 /* FIXME: avc_tuner_dsd never returns ACCEPTED. Check status? */
152 if (avc_tuner_dsd(fdtv, params) != ACCEPTED)
153 return -EINVAL;
154 else
155 return 0; /* not sure of this... */
156}
157
158static int fdtv_get_frontend(struct dvb_frontend *fe,
159 struct dvb_frontend_parameters *params)
160{
161 return -EOPNOTSUPP;
162}
163
164void fdtv_frontend_init(struct firedtv *fdtv)
165{
166 struct dvb_frontend_ops *ops = &fdtv->fe.ops;
167 struct dvb_frontend_info *fi = &ops->info;
168
169 ops->init = fdtv_dvb_init;
170 ops->sleep = fdtv_sleep;
171
172 ops->set_frontend = fdtv_set_frontend;
173 ops->get_frontend = fdtv_get_frontend;
174
175 ops->read_status = fdtv_read_status;
176 ops->read_ber = fdtv_read_ber;
177 ops->read_signal_strength = fdtv_read_signal_strength;
178 ops->read_snr = fdtv_read_snr;
179 ops->read_ucblocks = fdtv_read_uncorrected_blocks;
180
181 ops->diseqc_send_master_cmd = fdtv_diseqc_send_master_cmd;
182 ops->diseqc_send_burst = fdtv_diseqc_send_burst;
183 ops->set_tone = fdtv_set_tone;
184 ops->set_voltage = fdtv_set_voltage;
185
186 switch (fdtv->type) {
187 case FIREDTV_DVB_S:
188 case FIREDTV_DVB_S2:
189 fi->type = FE_QPSK;
190
191 fi->frequency_min = 950000;
192 fi->frequency_max = 2150000;
193 fi->frequency_stepsize = 125;
194 fi->symbol_rate_min = 1000000;
195 fi->symbol_rate_max = 40000000;
196
197 fi->caps = FE_CAN_INVERSION_AUTO |
198 FE_CAN_FEC_1_2 |
199 FE_CAN_FEC_2_3 |
200 FE_CAN_FEC_3_4 |
201 FE_CAN_FEC_5_6 |
202 FE_CAN_FEC_7_8 |
203 FE_CAN_FEC_AUTO |
204 FE_CAN_QPSK;
205 break;
206
207 case FIREDTV_DVB_C:
208 fi->type = FE_QAM;
209
210 fi->frequency_min = 47000000;
211 fi->frequency_max = 866000000;
212 fi->frequency_stepsize = 62500;
213 fi->symbol_rate_min = 870000;
214 fi->symbol_rate_max = 6900000;
215
216 fi->caps = FE_CAN_INVERSION_AUTO |
217 FE_CAN_QAM_16 |
218 FE_CAN_QAM_32 |
219 FE_CAN_QAM_64 |
220 FE_CAN_QAM_128 |
221 FE_CAN_QAM_256 |
222 FE_CAN_QAM_AUTO;
223 break;
224
225 case FIREDTV_DVB_T:
226 fi->type = FE_OFDM;
227
228 fi->frequency_min = 49000000;
229 fi->frequency_max = 861000000;
230 fi->frequency_stepsize = 62500;
231
232 fi->caps = FE_CAN_INVERSION_AUTO |
233 FE_CAN_FEC_2_3 |
234 FE_CAN_TRANSMISSION_MODE_AUTO |
235 FE_CAN_GUARD_INTERVAL_AUTO |
236 FE_CAN_HIERARCHY_AUTO;
237 break;
238
239 default:
240 dev_err(fdtv->device, "no frontend for model type %d\n",
241 fdtv->type);
242 }
243 strcpy(fi->name, fdtv_model_names[fdtv->type]);
244
245 fdtv->fe.dvb = &fdtv->adapter;
246 fdtv->fe.sec_priv = fdtv;
247}
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
new file mode 100644
index 000000000000..46a6324d7b73
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -0,0 +1,190 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of
9 * the License, or (at your option) any later version.
10 */
11
12#include <linux/bitops.h>
13#include <linux/input.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/types.h>
17
18#include "firedtv.h"
19
20/* fixed table with older keycodes, geared towards MythTV */
21const static u16 oldtable[] = {
22
23 /* code from device: 0x4501...0x451f */
24
25 KEY_ESC,
26 KEY_F9,
27 KEY_1,
28 KEY_2,
29 KEY_3,
30 KEY_4,
31 KEY_5,
32 KEY_6,
33 KEY_7,
34 KEY_8,
35 KEY_9,
36 KEY_I,
37 KEY_0,
38 KEY_ENTER,
39 KEY_RED,
40 KEY_UP,
41 KEY_GREEN,
42 KEY_F10,
43 KEY_SPACE,
44 KEY_F11,
45 KEY_YELLOW,
46 KEY_DOWN,
47 KEY_BLUE,
48 KEY_Z,
49 KEY_P,
50 KEY_PAGEDOWN,
51 KEY_LEFT,
52 KEY_W,
53 KEY_RIGHT,
54 KEY_P,
55 KEY_M,
56
57 /* code from device: 0x4540...0x4542 */
58
59 KEY_R,
60 KEY_V,
61 KEY_C,
62};
63
64/* user-modifiable table for a remote as sold in 2008 */
65const static u16 keytable[] = {
66
67 /* code from device: 0x0300...0x031f */
68
69 [0x00] = KEY_POWER,
70 [0x01] = KEY_SLEEP,
71 [0x02] = KEY_STOP,
72 [0x03] = KEY_OK,
73 [0x04] = KEY_RIGHT,
74 [0x05] = KEY_1,
75 [0x06] = KEY_2,
76 [0x07] = KEY_3,
77 [0x08] = KEY_LEFT,
78 [0x09] = KEY_4,
79 [0x0a] = KEY_5,
80 [0x0b] = KEY_6,
81 [0x0c] = KEY_UP,
82 [0x0d] = KEY_7,
83 [0x0e] = KEY_8,
84 [0x0f] = KEY_9,
85 [0x10] = KEY_DOWN,
86 [0x11] = KEY_TITLE, /* "OSD" - fixme */
87 [0x12] = KEY_0,
88 [0x13] = KEY_F20, /* "16:9" - fixme */
89 [0x14] = KEY_SCREEN, /* "FULL" - fixme */
90 [0x15] = KEY_MUTE,
91 [0x16] = KEY_SUBTITLE,
92 [0x17] = KEY_RECORD,
93 [0x18] = KEY_TEXT,
94 [0x19] = KEY_AUDIO,
95 [0x1a] = KEY_RED,
96 [0x1b] = KEY_PREVIOUS,
97 [0x1c] = KEY_REWIND,
98 [0x1d] = KEY_PLAYPAUSE,
99 [0x1e] = KEY_NEXT,
100 [0x1f] = KEY_VOLUMEUP,
101
102 /* code from device: 0x0340...0x0354 */
103
104 [0x20] = KEY_CHANNELUP,
105 [0x21] = KEY_F21, /* "4:3" - fixme */
106 [0x22] = KEY_TV,
107 [0x23] = KEY_DVD,
108 [0x24] = KEY_VCR,
109 [0x25] = KEY_AUX,
110 [0x26] = KEY_GREEN,
111 [0x27] = KEY_YELLOW,
112 [0x28] = KEY_BLUE,
113 [0x29] = KEY_CHANNEL, /* "CH.LIST" */
114 [0x2a] = KEY_VENDOR, /* "CI" - fixme */
115 [0x2b] = KEY_VOLUMEDOWN,
116 [0x2c] = KEY_CHANNELDOWN,
117 [0x2d] = KEY_LAST,
118 [0x2e] = KEY_INFO,
119 [0x2f] = KEY_FORWARD,
120 [0x30] = KEY_LIST,
121 [0x31] = KEY_FAVORITES,
122 [0x32] = KEY_MENU,
123 [0x33] = KEY_EPG,
124 [0x34] = KEY_EXIT,
125};
126
127int fdtv_register_rc(struct firedtv *fdtv, struct device *dev)
128{
129 struct input_dev *idev;
130 int i, err;
131
132 idev = input_allocate_device();
133 if (!idev)
134 return -ENOMEM;
135
136 fdtv->remote_ctrl_dev = idev;
137 idev->name = "FireDTV remote control";
138 idev->dev.parent = dev;
139 idev->evbit[0] = BIT_MASK(EV_KEY);
140 idev->keycode = kmemdup(keytable, sizeof(keytable), GFP_KERNEL);
141 if (!idev->keycode) {
142 err = -ENOMEM;
143 goto fail;
144 }
145 idev->keycodesize = sizeof(keytable[0]);
146 idev->keycodemax = ARRAY_SIZE(keytable);
147
148 for (i = 0; i < ARRAY_SIZE(keytable); i++)
149 set_bit(keytable[i], idev->keybit);
150
151 err = input_register_device(idev);
152 if (err)
153 goto fail_free_keymap;
154
155 return 0;
156
157fail_free_keymap:
158 kfree(idev->keycode);
159fail:
160 input_free_device(idev);
161 return err;
162}
163
164void fdtv_unregister_rc(struct firedtv *fdtv)
165{
166 kfree(fdtv->remote_ctrl_dev->keycode);
167 input_unregister_device(fdtv->remote_ctrl_dev);
168}
169
170void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
171{
172 u16 *keycode = fdtv->remote_ctrl_dev->keycode;
173
174 if (code >= 0x0300 && code <= 0x031f)
175 code = keycode[code - 0x0300];
176 else if (code >= 0x0340 && code <= 0x0354)
177 code = keycode[code - 0x0320];
178 else if (code >= 0x4501 && code <= 0x451f)
179 code = oldtable[code - 0x4501];
180 else if (code >= 0x4540 && code <= 0x4542)
181 code = oldtable[code - 0x4521];
182 else {
183 printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
184 "from remote control\n", code);
185 return;
186 }
187
188 input_report_key(fdtv->remote_ctrl_dev, code, 1);
189 input_report_key(fdtv->remote_ctrl_dev, code, 0);
190}
diff --git a/drivers/media/dvb/firewire/firedtv.h b/drivers/media/dvb/firewire/firedtv.h
new file mode 100644
index 000000000000..d48530b81e61
--- /dev/null
+++ b/drivers/media/dvb/firewire/firedtv.h
@@ -0,0 +1,182 @@
1/*
2 * FireDTV driver (formerly known as FireSAT)
3 *
4 * Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
5 * Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 */
12
13#ifndef _FIREDTV_H
14#define _FIREDTV_H
15
16#include <linux/dvb/dmx.h>
17#include <linux/dvb/frontend.h>
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/spinlock_types.h>
21#include <linux/types.h>
22#include <linux/wait.h>
23#include <linux/workqueue.h>
24
25#include <demux.h>
26#include <dmxdev.h>
27#include <dvb_demux.h>
28#include <dvb_frontend.h>
29#include <dvb_net.h>
30#include <dvbdev.h>
31
32struct firedtv_tuner_status {
33 unsigned active_system:8;
34 unsigned searching:1;
35 unsigned moving:1;
36 unsigned no_rf:1;
37 unsigned input:1;
38 unsigned selected_antenna:7;
39 unsigned ber:32;
40 unsigned signal_strength:8;
41 unsigned raster_frequency:2;
42 unsigned rf_frequency:22;
43 unsigned man_dep_info_length:8;
44 unsigned front_end_error:1;
45 unsigned antenna_error:1;
46 unsigned front_end_power_status:1;
47 unsigned power_supply:1;
48 unsigned carrier_noise_ratio:16;
49 unsigned power_supply_voltage:8;
50 unsigned antenna_voltage:8;
51 unsigned firewire_bus_voltage:8;
52 unsigned ca_mmi:1;
53 unsigned ca_pmt_reply:1;
54 unsigned ca_date_time_request:1;
55 unsigned ca_application_info:1;
56 unsigned ca_module_present_status:1;
57 unsigned ca_dvb_flag:1;
58 unsigned ca_error_flag:1;
59 unsigned ca_initialization_status:1;
60};
61
62enum model_type {
63 FIREDTV_UNKNOWN = 0,
64 FIREDTV_DVB_S = 1,
65 FIREDTV_DVB_C = 2,
66 FIREDTV_DVB_T = 3,
67 FIREDTV_DVB_S2 = 4,
68};
69
70struct device;
71struct input_dev;
72struct firedtv;
73
74struct firedtv_backend {
75 int (*lock)(struct firedtv *fdtv, u64 addr, void *data, __be32 arg);
76 int (*read)(struct firedtv *fdtv, u64 addr, void *data, size_t len);
77 int (*write)(struct firedtv *fdtv, u64 addr, void *data, size_t len);
78 int (*start_iso)(struct firedtv *fdtv);
79 void (*stop_iso)(struct firedtv *fdtv);
80};
81
82struct firedtv {
83 struct device *device;
84 struct list_head list;
85
86 struct dvb_adapter adapter;
87 struct dmxdev dmxdev;
88 struct dvb_demux demux;
89 struct dmx_frontend frontend;
90 struct dvb_net dvbnet;
91 struct dvb_frontend fe;
92
93 struct dvb_device *cadev;
94 int ca_last_command;
95 int ca_time_interval;
96
97 struct mutex avc_mutex;
98 wait_queue_head_t avc_wait;
99 bool avc_reply_received;
100 struct work_struct remote_ctrl_work;
101 struct input_dev *remote_ctrl_dev;
102
103 enum model_type type;
104 char subunit;
105 char isochannel;
106 fe_sec_voltage_t voltage;
107 fe_sec_tone_mode_t tone;
108
109 const struct firedtv_backend *backend;
110 void *backend_data;
111
112 struct mutex demux_mutex;
113 unsigned long channel_active;
114 u16 channel_pid[16];
115
116 size_t response_length;
117 u8 response[512];
118};
119
120/* firedtv-1394.c */
121#ifdef CONFIG_DVB_FIREDTV_IEEE1394
122int fdtv_1394_init(struct ieee1394_device_id id_table[]);
123void fdtv_1394_exit(void);
124#else
125static inline int fdtv_1394_init(struct ieee1394_device_id it[]) { return 0; }
126static inline void fdtv_1394_exit(void) {}
127#endif
128
129/* firedtv-avc.c */
130int avc_recv(struct firedtv *fdtv, void *data, size_t length);
131int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat);
132struct dvb_frontend_parameters;
133int avc_tuner_dsd(struct firedtv *fdtv, struct dvb_frontend_parameters *params);
134int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[]);
135int avc_tuner_get_ts(struct firedtv *fdtv);
136int avc_identify_subunit(struct firedtv *fdtv);
137struct dvb_diseqc_master_cmd;
138int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
139 char conttone, char nrdiseq,
140 struct dvb_diseqc_master_cmd *diseqcmd);
141void avc_remote_ctrl_work(struct work_struct *work);
142int avc_register_remote_control(struct firedtv *fdtv);
143int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
144int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
145int avc_ca_reset(struct firedtv *fdtv);
146int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
147int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
148int avc_ca_enter_menu(struct firedtv *fdtv);
149int avc_ca_get_mmi(struct firedtv *fdtv, char *mmi_object, unsigned int *len);
150int cmp_establish_pp_connection(struct firedtv *fdtv, int plug, int channel);
151void cmp_break_pp_connection(struct firedtv *fdtv, int plug, int channel);
152
153/* firedtv-ci.c */
154int fdtv_ca_register(struct firedtv *fdtv);
155void fdtv_ca_release(struct firedtv *fdtv);
156
157/* firedtv-dvb.c */
158int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed);
159int fdtv_stop_feed(struct dvb_demux_feed *dvbdmxfeed);
160int fdtv_dvb_register(struct firedtv *fdtv);
161void fdtv_dvb_unregister(struct firedtv *fdtv);
162struct firedtv *fdtv_alloc(struct device *dev,
163 const struct firedtv_backend *backend,
164 const char *name, size_t name_len);
165extern const char *fdtv_model_names[];
166
167/* firedtv-fe.c */
168void fdtv_frontend_init(struct firedtv *fdtv);
169
170/* firedtv-rc.c */
171#ifdef CONFIG_DVB_FIREDTV_INPUT
172int fdtv_register_rc(struct firedtv *fdtv, struct device *dev);
173void fdtv_unregister_rc(struct firedtv *fdtv);
174void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code);
175#else
176static inline int fdtv_register_rc(struct firedtv *fdtv,
177 struct device *dev) { return 0; }
178static inline void fdtv_unregister_rc(struct firedtv *fdtv) {}
179static inline void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code) {}
180#endif
181
182#endif /* _FIREDTV_H */
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 5d882a44e3ee..2ac738fa6a07 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -463,6 +463,8 @@ static int em28xx_audio_init(struct em28xx *dev)
463 pcm->info_flags = 0; 463 pcm->info_flags = 0;
464 pcm->private_data = dev; 464 pcm->private_data = dev;
465 strcpy(pcm->name, "Empia 28xx Capture"); 465 strcpy(pcm->name, "Empia 28xx Capture");
466
467 snd_card_set_dev(card, &dev->udev->dev);
466 strcpy(card->driver, "Empia Em28xx Audio"); 468 strcpy(card->driver, "Empia Em28xx Audio");
467 strcpy(card->shortname, "Em28xx Audio"); 469 strcpy(card->shortname, "Em28xx Audio");
468 strcpy(card->longname, "Empia Em28xx Audio"); 470 strcpy(card->longname, "Empia Em28xx Audio");
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index a1d6008efcbb..07c334f25aae 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -1155,23 +1155,23 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1155{ 1155{
1156 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1156 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1157 struct pxa_camera_dev *pcdev = ici->priv; 1157 struct pxa_camera_dev *pcdev = ici->priv;
1158 const struct soc_camera_data_format *host_fmt, *cam_fmt = NULL; 1158 const struct soc_camera_data_format *cam_fmt = NULL;
1159 const struct soc_camera_format_xlate *xlate; 1159 const struct soc_camera_format_xlate *xlate = NULL;
1160 struct soc_camera_sense sense = { 1160 struct soc_camera_sense sense = {
1161 .master_clock = pcdev->mclk, 1161 .master_clock = pcdev->mclk,
1162 .pixel_clock_max = pcdev->ciclk / 4, 1162 .pixel_clock_max = pcdev->ciclk / 4,
1163 }; 1163 };
1164 int ret, buswidth; 1164 int ret;
1165 1165
1166 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1166 if (pixfmt) {
1167 if (!xlate) { 1167 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1168 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 1168 if (!xlate) {
1169 return -EINVAL; 1169 dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
1170 } 1170 return -EINVAL;
1171 }
1171 1172
1172 buswidth = xlate->buswidth; 1173 cam_fmt = xlate->cam_fmt;
1173 host_fmt = xlate->host_fmt; 1174 }
1174 cam_fmt = xlate->cam_fmt;
1175 1175
1176 /* If PCLK is used to latch data from the sensor, check sense */ 1176 /* If PCLK is used to latch data from the sensor, check sense */
1177 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) 1177 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
@@ -1201,8 +1201,8 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1201 } 1201 }
1202 1202
1203 if (pixfmt && !ret) { 1203 if (pixfmt && !ret) {
1204 icd->buswidth = buswidth; 1204 icd->buswidth = xlate->buswidth;
1205 icd->current_fmt = host_fmt; 1205 icd->current_fmt = xlate->host_fmt;
1206 } 1206 }
1207 1207
1208 return ret; 1208 return ret;
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 9a2586b07a05..ddcb81d0b81a 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -603,21 +603,18 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
603 const struct soc_camera_format_xlate *xlate; 603 const struct soc_camera_format_xlate *xlate;
604 int ret; 604 int ret;
605 605
606 if (!pixfmt)
607 return icd->ops->set_fmt(icd, pixfmt, rect);
608
606 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 609 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
607 if (!xlate) { 610 if (!xlate) {
608 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 611 dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
609 return -EINVAL; 612 return -EINVAL;
610 } 613 }
611 614
612 switch (pixfmt) { 615 ret = icd->ops->set_fmt(icd, xlate->cam_fmt->fourcc, rect);
613 case 0: /* Only geometry change */
614 ret = icd->ops->set_fmt(icd, pixfmt, rect);
615 break;
616 default:
617 ret = icd->ops->set_fmt(icd, xlate->cam_fmt->fourcc, rect);
618 }
619 616
620 if (pixfmt && !ret) { 617 if (!ret) {
621 icd->buswidth = xlate->buswidth; 618 icd->buswidth = xlate->buswidth;
622 icd->current_fmt = xlate->host_fmt; 619 icd->current_fmt = xlate->host_fmt;
623 pcdev->camera_fmt = xlate->cam_fmt; 620 pcdev->camera_fmt = xlate->cam_fmt;
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
index c1e4ae27c613..c705f248da88 100644
--- a/drivers/media/video/uvc/uvc_status.c
+++ b/drivers/media/video/uvc/uvc_status.c
@@ -46,8 +46,8 @@ static int uvc_input_init(struct uvc_device *dev)
46 usb_to_input_id(udev, &input->id); 46 usb_to_input_id(udev, &input->id);
47 input->dev.parent = &dev->intf->dev; 47 input->dev.parent = &dev->intf->dev;
48 48
49 set_bit(EV_KEY, input->evbit); 49 __set_bit(EV_KEY, input->evbit);
50 set_bit(BTN_0, input->keybit); 50 __set_bit(KEY_CAMERA, input->keybit);
51 51
52 if ((ret = input_register_device(input)) < 0) 52 if ((ret = input_register_device(input)) < 0)
53 goto error; 53 goto error;
@@ -70,8 +70,10 @@ static void uvc_input_cleanup(struct uvc_device *dev)
70static void uvc_input_report_key(struct uvc_device *dev, unsigned int code, 70static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
71 int value) 71 int value)
72{ 72{
73 if (dev->input) 73 if (dev->input) {
74 input_report_key(dev->input, code, value); 74 input_report_key(dev->input, code, value);
75 input_sync(dev->input);
76 }
75} 77}
76 78
77#else 79#else
@@ -96,7 +98,7 @@ static void uvc_event_streaming(struct uvc_device *dev, __u8 *data, int len)
96 return; 98 return;
97 uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n", 99 uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
98 data[1], data[3] ? "pressed" : "released", len); 100 data[1], data[3] ? "pressed" : "released", len);
99 uvc_input_report_key(dev, BTN_0, data[3]); 101 uvc_input_report_key(dev, KEY_CAMERA, data[3]);
100 } else { 102 } else {
101 uvc_trace(UVC_TRACE_STATUS, "Stream %u error event %02x %02x " 103 uvc_trace(UVC_TRACE_STATUS, "Stream %u error event %02x %02x "
102 "len %d.\n", data[1], data[2], data[3], len); 104 "len %d.\n", data[1], data[2], data[3], len);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 96ac88317b8e..ea3aafbbda44 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -91,9 +91,9 @@ MODULE_PARM_DESC(mpt_msi_enable_fc, " Enable MSI Support for FC \
91 controllers (default=0)"); 91 controllers (default=0)");
92 92
93static int mpt_msi_enable_sas; 93static int mpt_msi_enable_sas;
94module_param(mpt_msi_enable_sas, int, 1); 94module_param(mpt_msi_enable_sas, int, 0);
95MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \ 95MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \
96 controllers (default=1)"); 96 controllers (default=0)");
97 97
98 98
99static int mpt_channel_mapping; 99static int mpt_channel_mapping;
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index f26667a7abf7..cf991850f01b 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -710,6 +710,7 @@ out:
710 710
711static struct pci_device_id ilo_devices[] = { 711static struct pci_device_id ilo_devices[] = {
712 { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, 712 { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
713 { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
713 { } 714 { }
714}; 715};
715MODULE_DEVICE_TABLE(pci, ilo_devices); 716MODULE_DEVICE_TABLE(pci, ilo_devices);
@@ -758,7 +759,7 @@ static void __exit ilo_exit(void)
758 class_destroy(ilo_class); 759 class_destroy(ilo_class);
759} 760}
760 761
761MODULE_VERSION("0.06"); 762MODULE_VERSION("1.0");
762MODULE_ALIAS(ILO_NAME); 763MODULE_ALIAS(ILO_NAME);
763MODULE_DESCRIPTION(ILO_NAME); 764MODULE_DESCRIPTION(ILO_NAME);
764MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); 765MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 8cff5f5e7f86..406da9a8d453 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -107,6 +107,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
107 107
108static const struct sdhci_pci_fixes sdhci_cafe = { 108static const struct sdhci_pci_fixes sdhci_cafe = {
109 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 109 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
110 SDHCI_QUIRK_NO_BUSY_IRQ |
110 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 111 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
111}; 112};
112 113
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index f52f3053ed92..accb592764ed 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1291,8 +1291,11 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1291 if (host->cmd->data) 1291 if (host->cmd->data)
1292 DBG("Cannot wait for busy signal when also " 1292 DBG("Cannot wait for busy signal when also "
1293 "doing a data transfer"); 1293 "doing a data transfer");
1294 else 1294 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1295 return; 1295 return;
1296
1297 /* The controller does not support the end-of-busy IRQ,
1298 * fall through and take the SDHCI_INT_RESPONSE */
1296 } 1299 }
1297 1300
1298 if (intmask & SDHCI_INT_RESPONSE) 1301 if (intmask & SDHCI_INT_RESPONSE)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index ebb83657e27a..43c37c68d07a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -208,6 +208,8 @@ struct sdhci_host {
208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) 208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
209/* Controller has an issue with buffer bits for small transfers */ 209/* Controller has an issue with buffer bits for small transfers */
210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13) 210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
211/* Controller does not provide transfer-complete interrupt when not busy */
212#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
211 213
212 int irq; /* Device IRQ */ 214 int irq; /* Device IRQ */
213 void __iomem * ioaddr; /* Mapped address */ 215 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 821d0ed6bae3..c76d6e5f47ee 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -19,6 +19,7 @@ static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
19static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 19static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
20static void maprom_nop (struct mtd_info *); 20static void maprom_nop (struct mtd_info *);
21static struct mtd_info *map_rom_probe(struct map_info *map); 21static struct mtd_info *map_rom_probe(struct map_info *map);
22static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
22 23
23static struct mtd_chip_driver maprom_chipdrv = { 24static struct mtd_chip_driver maprom_chipdrv = {
24 .probe = map_rom_probe, 25 .probe = map_rom_probe,
@@ -42,6 +43,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
42 mtd->read = maprom_read; 43 mtd->read = maprom_read;
43 mtd->write = maprom_write; 44 mtd->write = maprom_write;
44 mtd->sync = maprom_nop; 45 mtd->sync = maprom_nop;
46 mtd->erase = maprom_erase;
45 mtd->flags = MTD_CAP_ROM; 47 mtd->flags = MTD_CAP_ROM;
46 mtd->erasesize = map->size; 48 mtd->erasesize = map->size;
47 mtd->writesize = 1; 49 mtd->writesize = 1;
@@ -71,6 +73,12 @@ static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *re
71 return -EIO; 73 return -EIO;
72} 74}
73 75
76static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
77{
78 /* We do our best 8) */
79 return -EROFS;
80}
81
74static int __init map_rom_init(void) 82static int __init map_rom_init(void)
75{ 83{
76 register_mtd_chip_driver(&maprom_chipdrv); 84 register_mtd_chip_driver(&maprom_chipdrv);
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index a425d09f35a0..00248e81ecd5 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -267,22 +267,28 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
267 if (*(szlength) != '+') { 267 if (*(szlength) != '+') {
268 devlength = simple_strtoul(szlength, &buffer, 0); 268 devlength = simple_strtoul(szlength, &buffer, 0);
269 devlength = handle_unit(devlength, buffer) - devstart; 269 devlength = handle_unit(devlength, buffer) - devstart;
270 if (devlength < devstart)
271 goto err_out;
272
273 devlength -= devstart;
270 } else { 274 } else {
271 devlength = simple_strtoul(szlength + 1, &buffer, 0); 275 devlength = simple_strtoul(szlength + 1, &buffer, 0);
272 devlength = handle_unit(devlength, buffer); 276 devlength = handle_unit(devlength, buffer);
273 } 277 }
274 T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n", 278 T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
275 devname, devstart, devlength); 279 devname, devstart, devlength);
276 if ((devstart < 0) || (devlength < 0) || (devlength % SLRAM_BLK_SZ != 0)) { 280 if (devlength % SLRAM_BLK_SZ != 0)
277 E("slram: Illegal start / length parameter.\n"); 281 goto err_out;
278 return(-EINVAL);
279 }
280 282
281 if ((devstart = register_device(devname, devstart, devlength))){ 283 if ((devstart = register_device(devname, devstart, devlength))){
282 unregister_devices(); 284 unregister_devices();
283 return((int)devstart); 285 return((int)devstart);
284 } 286 }
285 return(0); 287 return(0);
288
289err_out:
290 E("slram: Illegal length parameter.\n");
291 return(-EINVAL);
286} 292}
287 293
288#ifndef MODULE 294#ifndef MODULE
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
index acd4ea9b2278..5a401d8047ab 100644
--- a/drivers/mtd/lpddr/Kconfig
+++ b/drivers/mtd/lpddr/Kconfig
@@ -12,6 +12,7 @@ config MTD_LPDDR
12 DDR memories, intended for battery-operated systems. 12 DDR memories, intended for battery-operated systems.
13 13
14config MTD_QINFO_PROBE 14config MTD_QINFO_PROBE
15 depends on MTD_LPDDR
15 tristate "Detect flash chips by QINFO probe" 16 tristate "Detect flash chips by QINFO probe"
16 help 17 help
17 Device Information for LPDDR chips is offered through the Overlay 18 Device Information for LPDDR chips is offered through the Overlay
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 0225cbbf22de..043d50fb6ef6 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -491,7 +491,7 @@ config MTD_PCMCIA_ANONYMOUS
491 491
492config MTD_BFIN_ASYNC 492config MTD_BFIN_ASYNC
493 tristate "Blackfin BF533-STAMP Flash Chip Support" 493 tristate "Blackfin BF533-STAMP Flash Chip Support"
494 depends on BFIN533_STAMP && MTD_CFI 494 depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
495 select MTD_PARTITIONS 495 select MTD_PARTITIONS
496 default y 496 default y
497 help 497 help
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 6fec86aaed7e..576611f605db 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -152,14 +152,18 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
152 152
153 if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) { 153 if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) {
154 pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin); 154 pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin);
155 kfree(state);
155 return -EBUSY; 156 return -EBUSY;
156 } 157 }
157 gpio_direction_output(state->enet_flash_pin, 1); 158 gpio_direction_output(state->enet_flash_pin, 1);
158 159
159 pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8); 160 pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8);
160 state->mtd = do_map_probe(memory->name, &state->map); 161 state->mtd = do_map_probe(memory->name, &state->map);
161 if (!state->mtd) 162 if (!state->mtd) {
163 gpio_free(state->enet_flash_pin);
164 kfree(state);
162 return -ENXIO; 165 return -ENXIO;
166 }
163 167
164#ifdef CONFIG_MTD_PARTITIONS 168#ifdef CONFIG_MTD_PARTITIONS
165 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 169 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 5f7a245ed132..424f17d6ffd1 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -342,9 +342,9 @@ static struct pci_device_id ck804xrom_pci_tbl[] = {
342 { 0, } 342 { 0, }
343}; 343};
344 344
345#if 0
345MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl); 346MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl);
346 347
347#if 0
348static struct pci_driver ck804xrom_driver = { 348static struct pci_driver ck804xrom_driver = {
349 .name = MOD_NAME, 349 .name = MOD_NAME,
350 .id_table = ck804xrom_pci_tbl, 350 .id_table = ck804xrom_pci_tbl,
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 87743661d48e..4b122e7ab4b3 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -29,6 +29,7 @@ struct physmap_flash_info {
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30#ifdef CONFIG_MTD_PARTITIONS 30#ifdef CONFIG_MTD_PARTITIONS
31 int nr_parts; 31 int nr_parts;
32 struct mtd_partition *parts;
32#endif 33#endif
33}; 34};
34 35
@@ -45,25 +46,26 @@ static int physmap_flash_remove(struct platform_device *dev)
45 46
46 physmap_data = dev->dev.platform_data; 47 physmap_data = dev->dev.platform_data;
47 48
48#ifdef CONFIG_MTD_CONCAT 49#ifdef CONFIG_MTD_PARTITIONS
49 if (info->cmtd != info->mtd[0]) { 50 if (info->nr_parts) {
51 del_mtd_partitions(info->cmtd);
52 kfree(info->parts);
53 } else if (physmap_data->nr_parts)
54 del_mtd_partitions(info->cmtd);
55 else
50 del_mtd_device(info->cmtd); 56 del_mtd_device(info->cmtd);
57#else
58 del_mtd_device(info->cmtd);
59#endif
60
61#ifdef CONFIG_MTD_CONCAT
62 if (info->cmtd != info->mtd[0])
51 mtd_concat_destroy(info->cmtd); 63 mtd_concat_destroy(info->cmtd);
52 }
53#endif 64#endif
54 65
55 for (i = 0; i < MAX_RESOURCES; i++) { 66 for (i = 0; i < MAX_RESOURCES; i++) {
56 if (info->mtd[i] != NULL) { 67 if (info->mtd[i] != NULL)
57#ifdef CONFIG_MTD_PARTITIONS
58 if (info->nr_parts || physmap_data->nr_parts)
59 del_mtd_partitions(info->mtd[i]);
60 else
61 del_mtd_device(info->mtd[i]);
62#else
63 del_mtd_device(info->mtd[i]);
64#endif
65 map_destroy(info->mtd[i]); 68 map_destroy(info->mtd[i]);
66 }
67 } 69 }
68 return 0; 70 return 0;
69} 71}
@@ -86,9 +88,6 @@ static int physmap_flash_probe(struct platform_device *dev)
86 int err = 0; 88 int err = 0;
87 int i; 89 int i;
88 int devices_found = 0; 90 int devices_found = 0;
89#ifdef CONFIG_MTD_PARTITIONS
90 struct mtd_partition *parts;
91#endif
92 91
93 physmap_data = dev->dev.platform_data; 92 physmap_data = dev->dev.platform_data;
94 if (physmap_data == NULL) 93 if (physmap_data == NULL)
@@ -167,10 +166,11 @@ static int physmap_flash_probe(struct platform_device *dev)
167 goto err_out; 166 goto err_out;
168 167
169#ifdef CONFIG_MTD_PARTITIONS 168#ifdef CONFIG_MTD_PARTITIONS
170 err = parse_mtd_partitions(info->cmtd, part_probe_types, &parts, 0); 169 err = parse_mtd_partitions(info->cmtd, part_probe_types,
170 &info->parts, 0);
171 if (err > 0) { 171 if (err > 0) {
172 add_mtd_partitions(info->cmtd, parts, err); 172 add_mtd_partitions(info->cmtd, info->parts, err);
173 kfree(parts); 173 info->nr_parts = err;
174 return 0; 174 return 0;
175 } 175 }
176 176
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6bdfd47d679d..a2f185fd7072 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2342,6 +2342,17 @@ config ATL1E
2342 To compile this driver as a module, choose M here. The module 2342 To compile this driver as a module, choose M here. The module
2343 will be called atl1e. 2343 will be called atl1e.
2344 2344
2345config ATL1C
2346 tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
2347 depends on PCI && EXPERIMENTAL
2348 select CRC32
2349 select MII
2350 help
2351 This driver supports the Atheros L1C gigabit ethernet adapter.
2352
2353 To compile this driver as a module, choose M here. The module
2354 will be called atl1c.
2355
2345config JME 2356config JME
2346 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 2357 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
2347 depends on PCI 2358 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a3c5c002f224..aca8492db654 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_BONDING) += bonding/
17obj-$(CONFIG_ATL1) += atlx/ 17obj-$(CONFIG_ATL1) += atlx/
18obj-$(CONFIG_ATL2) += atlx/ 18obj-$(CONFIG_ATL2) += atlx/
19obj-$(CONFIG_ATL1E) += atl1e/ 19obj-$(CONFIG_ATL1E) += atl1e/
20obj-$(CONFIG_ATL1C) += atl1c/
20obj-$(CONFIG_GIANFAR) += gianfar_driver.o 21obj-$(CONFIG_GIANFAR) += gianfar_driver.o
21obj-$(CONFIG_TEHUTI) += tehuti.o 22obj-$(CONFIG_TEHUTI) += tehuti.o
22obj-$(CONFIG_ENIC) += enic/ 23obj-$(CONFIG_ENIC) += enic/
diff --git a/drivers/net/atl1c/Makefile b/drivers/net/atl1c/Makefile
new file mode 100644
index 000000000000..c37d966952ee
--- /dev/null
+++ b/drivers/net/atl1c/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_ATL1C) += atl1c.o
2atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
new file mode 100644
index 000000000000..ac11b84b8377
--- /dev/null
+++ b/drivers/net/atl1c/atl1c.h
@@ -0,0 +1,606 @@
1/*
2 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#ifndef _ATL1C_H_
23#define _ATL1C_H_
24
25#include <linux/version.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ioport.h>
35#include <linux/slab.h>
36#include <linux/list.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/ipv6.h>
42#include <linux/udp.h>
43#include <linux/mii.h>
44#include <linux/io.h>
45#include <linux/vmalloc.h>
46#include <linux/pagemap.h>
47#include <linux/tcp.h>
48#include <linux/mii.h>
49#include <linux/ethtool.h>
50#include <linux/if_vlan.h>
51#include <linux/workqueue.h>
52#include <net/checksum.h>
53#include <net/ip6_checksum.h>
54
55#include "atl1c_hw.h"
56
57/* Wake Up Filter Control */
58#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
59#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
60#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
61#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
62#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
63
64#define AT_VLAN_TO_TAG(_vlan, _tag) \
65 _tag = ((((_vlan) >> 8) & 0xFF) |\
66 (((_vlan) & 0xFF) << 8))
67
68#define AT_TAG_TO_VLAN(_tag, _vlan) \
69 _vlan = ((((_tag) >> 8) & 0xFF) |\
70 (((_tag) & 0xFF) << 8))
71
72#define SPEED_0 0xffff
73#define HALF_DUPLEX 1
74#define FULL_DUPLEX 2
75
76#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
77#define MAX_JUMBO_FRAME_SIZE (9*1024)
78#define MAX_TX_OFFLOAD_THRESH (9*1024)
79
80#define AT_MAX_RECEIVE_QUEUE 4
81#define AT_DEF_RECEIVE_QUEUE 1
82#define AT_MAX_TRANSMIT_QUEUE 2
83
84#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
85#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
86
87#define AT_TX_WATCHDOG (5 * HZ)
88#define AT_MAX_INT_WORK 5
89#define AT_TWSI_EEPROM_TIMEOUT 100
90#define AT_HW_MAX_IDLE_DELAY 10
91#define AT_SUSPEND_LINK_TIMEOUT 28
92
93#define AT_ASPM_L0S_TIMER 6
94#define AT_ASPM_L1_TIMER 12
95
96#define ATL1C_PCIE_L0S_L1_DISABLE 0x01
97#define ATL1C_PCIE_PHY_RESET 0x02
98
99#define ATL1C_ASPM_L0s_ENABLE 0x0001
100#define ATL1C_ASPM_L1_ENABLE 0x0002
101
102#define AT_REGS_LEN (75 * sizeof(u32))
103#define AT_EEPROM_LEN 512
104
105#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
106#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc)
107#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc)
108#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status)
109
110/* tpd word 1 bit 0:7 General Checksum task offload */
111#define TPD_L4HDR_OFFSET_MASK 0x00FF
112#define TPD_L4HDR_OFFSET_SHIFT 0
113
114/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */
115#define TPD_TCPHDR_OFFSET_MASK 0x00FF
116#define TPD_TCPHDR_OFFSET_SHIFT 0
117
118/* tpd word 1 bit 0:7 Custom Checksum task offload */
119#define TPD_PLOADOFFSET_MASK 0x00FF
120#define TPD_PLOADOFFSET_SHIFT 0
121
122/* tpd word 1 bit 8:17 */
123#define TPD_CCSUM_EN_MASK 0x0001
124#define TPD_CCSUM_EN_SHIFT 8
125#define TPD_IP_CSUM_MASK 0x0001
126#define TPD_IP_CSUM_SHIFT 9
127#define TPD_TCP_CSUM_MASK 0x0001
128#define TPD_TCP_CSUM_SHIFT 10
129#define TPD_UDP_CSUM_MASK 0x0001
130#define TPD_UDP_CSUM_SHIFT 11
131#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */
132#define TPD_LSO_EN_SHIFT 12
133#define TPD_LSO_VER_MASK 0x0001
134#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */
135#define TPD_CON_VTAG_MASK 0x0001
136#define TPD_CON_VTAG_SHIFT 14
137#define TPD_INS_VTAG_MASK 0x0001
138#define TPD_INS_VTAG_SHIFT 15
139#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */
140#define TPD_IPV4_PACKET_SHIFT 16
141#define TPD_ETH_TYPE_MASK 0x0001
142#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */
143
144/* tpd word 18:25 Custom Checksum task offload */
145#define TPD_CCSUM_OFFSET_MASK 0x00FF
146#define TPD_CCSUM_OFFSET_SHIFT 18
147#define TPD_CCSUM_EPAD_MASK 0x0001
148#define TPD_CCSUM_EPAD_SHIFT 30
149
150/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */
151#define TPD_MSS_MASK 0x1FFF
152#define TPD_MSS_SHIFT 18
153
154#define TPD_EOP_MASK 0x0001
155#define TPD_EOP_SHIFT 31
156
157struct atl1c_tpd_desc {
158 __le16 buffer_len; /* include 4-byte CRC */
159 __le16 vlan_tag;
160 __le32 word1;
161 __le64 buffer_addr;
162};
163
164struct atl1c_tpd_ext_desc {
165 u32 reservd_0;
166 __le32 word1;
167 __le32 pkt_len;
168 u32 reservd_1;
169};
170/* rrs word 0 bit 0:31 */
171#define RRS_RX_CSUM_MASK 0xFFFF
172#define RRS_RX_CSUM_SHIFT 0
173#define RRS_RX_RFD_CNT_MASK 0x000F
174#define RRS_RX_RFD_CNT_SHIFT 16
175#define RRS_RX_RFD_INDEX_MASK 0x0FFF
176#define RRS_RX_RFD_INDEX_SHIFT 20
177
178/* rrs flag bit 0:16 */
179#define RRS_HEAD_LEN_MASK 0x00FF
180#define RRS_HEAD_LEN_SHIFT 0
181#define RRS_HDS_TYPE_MASK 0x0003
182#define RRS_HDS_TYPE_SHIFT 8
183#define RRS_CPU_NUM_MASK 0x0003
184#define RRS_CPU_NUM_SHIFT 10
185#define RRS_HASH_FLG_MASK 0x000F
186#define RRS_HASH_FLG_SHIFT 12
187
188#define RRS_HDS_TYPE_HEAD 1
189#define RRS_HDS_TYPE_DATA 2
190
191#define RRS_IS_NO_HDS_TYPE(flag) \
192 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0)
193
194#define RRS_IS_HDS_HEAD(flag) \
195 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
196 RRS_HDS_TYPE_HEAD)
197
198#define RRS_IS_HDS_DATA(flag) \
199 (((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
200 RRS_HDS_TYPE_DATA)
201
202/* rrs word 3 bit 0:31 */
203#define RRS_PKT_SIZE_MASK 0x3FFF
204#define RRS_PKT_SIZE_SHIFT 0
205#define RRS_ERR_L4_CSUM_MASK 0x0001
206#define RRS_ERR_L4_CSUM_SHIFT 14
207#define RRS_ERR_IP_CSUM_MASK 0x0001
208#define RRS_ERR_IP_CSUM_SHIFT 15
209#define RRS_VLAN_INS_MASK 0x0001
210#define RRS_VLAN_INS_SHIFT 16
211#define RRS_PROT_ID_MASK 0x0007
212#define RRS_PROT_ID_SHIFT 17
213#define RRS_RX_ERR_SUM_MASK 0x0001
214#define RRS_RX_ERR_SUM_SHIFT 20
215#define RRS_RX_ERR_CRC_MASK 0x0001
216#define RRS_RX_ERR_CRC_SHIFT 21
217#define RRS_RX_ERR_FAE_MASK 0x0001
218#define RRS_RX_ERR_FAE_SHIFT 22
219#define RRS_RX_ERR_TRUNC_MASK 0x0001
220#define RRS_RX_ERR_TRUNC_SHIFT 23
221#define RRS_RX_ERR_RUNC_MASK 0x0001
222#define RRS_RX_ERR_RUNC_SHIFT 24
223#define RRS_RX_ERR_ICMP_MASK 0x0001
224#define RRS_RX_ERR_ICMP_SHIFT 25
225#define RRS_PACKET_BCAST_MASK 0x0001
226#define RRS_PACKET_BCAST_SHIFT 26
227#define RRS_PACKET_MCAST_MASK 0x0001
228#define RRS_PACKET_MCAST_SHIFT 27
229#define RRS_PACKET_TYPE_MASK 0x0001
230#define RRS_PACKET_TYPE_SHIFT 28
231#define RRS_FIFO_FULL_MASK 0x0001
232#define RRS_FIFO_FULL_SHIFT 29
233#define RRS_802_3_LEN_ERR_MASK 0x0001
234#define RRS_802_3_LEN_ERR_SHIFT 30
235#define RRS_RXD_UPDATED_MASK 0x0001
236#define RRS_RXD_UPDATED_SHIFT 31
237
238#define RRS_ERR_L4_CSUM 0x00004000
239#define RRS_ERR_IP_CSUM 0x00008000
240#define RRS_VLAN_INS 0x00010000
241#define RRS_RX_ERR_SUM 0x00100000
242#define RRS_RX_ERR_CRC 0x00200000
243#define RRS_802_3_LEN_ERR 0x40000000
244#define RRS_RXD_UPDATED 0x80000000
245
246#define RRS_PACKET_TYPE_802_3 1
247#define RRS_PACKET_TYPE_ETH 0
248#define RRS_PACKET_IS_ETH(word) \
249 (((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \
250 RRS_PACKET_TYPE_ETH)
251#define RRS_RXD_IS_VALID(word) \
252 ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
253
254#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \
255 ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1)
256#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \
257 ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6)
258
259struct atl1c_recv_ret_status {
260 __le32 word0;
261 __le32 rss_hash;
262 __le16 vlan_tag;
263 __le16 flag;
264 __le32 word3;
265};
266
267/* RFD desciptor */
268struct atl1c_rx_free_desc {
269 __le64 buffer_addr;
270};
271
272/* DMA Order Settings */
273enum atl1c_dma_order {
274 atl1c_dma_ord_in = 1,
275 atl1c_dma_ord_enh = 2,
276 atl1c_dma_ord_out = 4
277};
278
279enum atl1c_dma_rcb {
280 atl1c_rcb_64 = 0,
281 atl1c_rcb_128 = 1
282};
283
284enum atl1c_mac_speed {
285 atl1c_mac_speed_0 = 0,
286 atl1c_mac_speed_10_100 = 1,
287 atl1c_mac_speed_1000 = 2
288};
289
290enum atl1c_dma_req_block {
291 atl1c_dma_req_128 = 0,
292 atl1c_dma_req_256 = 1,
293 atl1c_dma_req_512 = 2,
294 atl1c_dma_req_1024 = 3,
295 atl1c_dma_req_2048 = 4,
296 atl1c_dma_req_4096 = 5
297};
298
299enum atl1c_rss_mode {
300 atl1c_rss_mode_disable = 0,
301 atl1c_rss_sig_que = 1,
302 atl1c_rss_mul_que_sig_int = 2,
303 atl1c_rss_mul_que_mul_int = 4,
304};
305
306enum atl1c_rss_type {
307 atl1c_rss_disable = 0,
308 atl1c_rss_ipv4 = 1,
309 atl1c_rss_ipv4_tcp = 2,
310 atl1c_rss_ipv6 = 4,
311 atl1c_rss_ipv6_tcp = 8
312};
313
314enum atl1c_nic_type {
315 athr_l1c = 0,
316 athr_l2c = 1,
317};
318
319enum atl1c_trans_queue {
320 atl1c_trans_normal = 0,
321 atl1c_trans_high = 1
322};
323
324struct atl1c_hw_stats {
325 /* rx */
326 unsigned long rx_ok; /* The number of good packet received. */
327 unsigned long rx_bcast; /* The number of good broadcast packet received. */
328 unsigned long rx_mcast; /* The number of good multicast packet received. */
329 unsigned long rx_pause; /* The number of Pause packet received. */
330 unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */
331 unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
332 unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */
333 unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
334 unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
335 unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
336 unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
337 unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
338 unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
339 unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
340 unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
341 unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
342 unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
343 unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */
344 unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
345 unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
346 unsigned long rx_align_err; /* Alignment Error */
347 unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
348 unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
349 unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */
350
351 /* tx */
352 unsigned long tx_ok; /* The number of good packet transmitted. */
353 unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */
354 unsigned long tx_mcast; /* The number of good multicast packet transmitted. */
355 unsigned long tx_pause; /* The number of Pause packet transmitted. */
356 unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
357 unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
358 unsigned long tx_defer; /* The number of packets transmitted that is deferred. */
359 unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
360 unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
361 unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
362 unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
363 unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
364 unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
365 unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
366 unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
367 unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
368 unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
369 unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */
370 unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
371 unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
372 unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
373 unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
374 unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
375 unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
376 unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
377};
378
379struct atl1c_hw {
380 u8 __iomem *hw_addr; /* inner register address */
381 struct atl1c_adapter *adapter;
382 enum atl1c_nic_type nic_type;
383 enum atl1c_dma_order dma_order;
384 enum atl1c_dma_rcb rcb_value;
385 enum atl1c_dma_req_block dmar_block;
386 enum atl1c_dma_req_block dmaw_block;
387
388 u16 device_id;
389 u16 vendor_id;
390 u16 subsystem_id;
391 u16 subsystem_vendor_id;
392 u8 revision_id;
393
394 u32 intr_mask;
395 u8 dmaw_dly_cnt;
396 u8 dmar_dly_cnt;
397
398 u8 preamble_len;
399 u16 max_frame_size;
400 u16 min_frame_size;
401
402 enum atl1c_mac_speed mac_speed;
403 bool mac_duplex;
404 bool hibernate;
405 u16 media_type;
406#define MEDIA_TYPE_AUTO_SENSOR 0
407#define MEDIA_TYPE_100M_FULL 1
408#define MEDIA_TYPE_100M_HALF 2
409#define MEDIA_TYPE_10M_FULL 3
410#define MEDIA_TYPE_10M_HALF 4
411
412 u16 autoneg_advertised;
413 u16 mii_autoneg_adv_reg;
414 u16 mii_1000t_ctrl_reg;
415
416 u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */
417 u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */
418 u16 ict; /* Interrupt Clear timer (2us resolution) */
419 u16 ctrl_flags;
420#define ATL1C_INTR_CLEAR_ON_READ 0x0001
421#define ATL1C_INTR_MODRT_ENABLE 0x0002
422#define ATL1C_CMB_ENABLE 0x0004
423#define ATL1C_SMB_ENABLE 0x0010
424#define ATL1C_TXQ_MODE_ENHANCE 0x0020
425#define ATL1C_RX_IPV6_CHKSUM 0x0040
426#define ATL1C_ASPM_L0S_SUPPORT 0x0080
427#define ATL1C_ASPM_L1_SUPPORT 0x0100
428#define ATL1C_ASPM_CTRL_MON 0x0200
429#define ATL1C_HIB_DISABLE 0x0400
430#define ATL1C_LINK_CAP_1000M 0x0800
431#define ATL1C_FPGA_VERSION 0x8000
432 u16 cmb_tpd;
433 u16 cmb_rrd;
434 u16 cmb_rx_timer; /* 2us resolution */
435 u16 cmb_tx_timer;
436 u32 smb_timer;
437
438 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
439 interrupt request */
440 u16 tpd_thresh;
441 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
442 u8 rfd_burst;
443 enum atl1c_rss_type rss_type;
444 enum atl1c_rss_mode rss_mode;
445 u8 rss_hash_bits;
446 u32 base_cpu;
447 u32 indirect_tab;
448 u8 mac_addr[ETH_ALEN];
449 u8 perm_mac_addr[ETH_ALEN];
450
451 bool phy_configured;
452 bool re_autoneg;
453 bool emi_ca;
454};
455
456/*
457 * atl1c_ring_header represents a single, contiguous block of DMA space
458 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
459 * message blocks (cmb, smb) described below
460 */
461struct atl1c_ring_header {
462 void *desc; /* virtual address */
463 dma_addr_t dma; /* physical address*/
464 unsigned int size; /* length in bytes */
465};
466
467/*
468 * atl1c_buffer is wrapper around a pointer to a socket buffer
469 * so a DMA handle can be stored along with the skb
470 */
471struct atl1c_buffer {
472 struct sk_buff *skb; /* socket buffer */
473 u16 length; /* rx buffer length */
474 u16 state; /* state of buffer */
475#define ATL1_BUFFER_FREE 0
476#define ATL1_BUFFER_BUSY 1
477 dma_addr_t dma;
478};
479
480/* transimit packet descriptor (tpd) ring */
481struct atl1c_tpd_ring {
482 void *desc; /* descriptor ring virtual address */
483 dma_addr_t dma; /* descriptor ring physical address */
484 u16 size; /* descriptor ring length in bytes */
485 u16 count; /* number of descriptors in the ring */
486 u16 next_to_use; /* this is protectd by adapter->tx_lock */
487 atomic_t next_to_clean;
488 struct atl1c_buffer *buffer_info;
489};
490
491/* receive free descriptor (rfd) ring */
492struct atl1c_rfd_ring {
493 void *desc; /* descriptor ring virtual address */
494 dma_addr_t dma; /* descriptor ring physical address */
495 u16 size; /* descriptor ring length in bytes */
496 u16 count; /* number of descriptors in the ring */
497 u16 next_to_use;
498 u16 next_to_clean;
499 struct atl1c_buffer *buffer_info;
500};
501
502/* receive return desciptor (rrd) ring */
503struct atl1c_rrd_ring {
504 void *desc; /* descriptor ring virtual address */
505 dma_addr_t dma; /* descriptor ring physical address */
506 u16 size; /* descriptor ring length in bytes */
507 u16 count; /* number of descriptors in the ring */
508 u16 next_to_use;
509 u16 next_to_clean;
510};
511
512struct atl1c_cmb {
513 void *cmb;
514 dma_addr_t dma;
515};
516
517struct atl1c_smb {
518 void *smb;
519 dma_addr_t dma;
520};
521
522/* board specific private data structure */
523struct atl1c_adapter {
524 struct net_device *netdev;
525 struct pci_dev *pdev;
526 struct vlan_group *vlgrp;
527 struct napi_struct napi;
528 struct atl1c_hw hw;
529 struct atl1c_hw_stats hw_stats;
530 struct net_device_stats net_stats;
531 struct mii_if_info mii; /* MII interface info */
532 u16 rx_buffer_len;
533
534 unsigned long flags;
535#define __AT_TESTING 0x0001
536#define __AT_RESETTING 0x0002
537#define __AT_DOWN 0x0003
538 u32 msg_enable;
539
540 bool have_msi;
541 u32 wol;
542 u16 link_speed;
543 u16 link_duplex;
544
545 spinlock_t mdio_lock;
546 spinlock_t tx_lock;
547 atomic_t irq_sem;
548
549 struct work_struct reset_task;
550 struct work_struct link_chg_task;
551 struct timer_list watchdog_timer;
552 struct timer_list phy_config_timer;
553
554 /* All Descriptor memory */
555 struct atl1c_ring_header ring_header;
556 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
557 struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
558 struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
559 struct atl1c_cmb cmb;
560 struct atl1c_smb smb;
561 int num_rx_queues;
562 u32 bd_number; /* board number;*/
563};
564
565#define AT_WRITE_REG(a, reg, value) ( \
566 writel((value), ((a)->hw_addr + reg)))
567
568#define AT_WRITE_FLUSH(a) (\
569 readl((a)->hw_addr))
570
571#define AT_READ_REG(a, reg, pdata) do { \
572 if (unlikely((a)->hibernate)) { \
573 readl((a)->hw_addr + reg); \
574 *(u32 *)pdata = readl((a)->hw_addr + reg); \
575 } else { \
576 *(u32 *)pdata = readl((a)->hw_addr + reg); \
577 } \
578 } while (0)
579
580#define AT_WRITE_REGB(a, reg, value) (\
581 writeb((value), ((a)->hw_addr + reg)))
582
583#define AT_READ_REGB(a, reg) (\
584 readb((a)->hw_addr + reg))
585
586#define AT_WRITE_REGW(a, reg, value) (\
587 writew((value), ((a)->hw_addr + reg)))
588
589#define AT_READ_REGW(a, reg) (\
590 readw((a)->hw_addr + reg))
591
592#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
593 writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
594
595#define AT_READ_REG_ARRAY(a, reg, offset) ( \
596 readl(((a)->hw_addr + reg) + ((offset) << 2)))
597
598extern char atl1c_driver_name[];
599extern char atl1c_driver_version[];
600
601extern int atl1c_up(struct atl1c_adapter *adapter);
602extern void atl1c_down(struct atl1c_adapter *adapter);
603extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
604extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
605extern void atl1c_set_ethtool_ops(struct net_device *netdev);
606#endif /* _ATL1C_H_ */
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
new file mode 100644
index 000000000000..45c5b7332cd3
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22
23#include <linux/netdevice.h>
24#include <linux/ethtool.h>
25
26#include "atl1c.h"
27
28static int atl1c_get_settings(struct net_device *netdev,
29 struct ethtool_cmd *ecmd)
30{
31 struct atl1c_adapter *adapter = netdev_priv(netdev);
32 struct atl1c_hw *hw = &adapter->hw;
33
34 ecmd->supported = (SUPPORTED_10baseT_Half |
35 SUPPORTED_10baseT_Full |
36 SUPPORTED_100baseT_Half |
37 SUPPORTED_100baseT_Full |
38 SUPPORTED_Autoneg |
39 SUPPORTED_TP);
40 if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M)
41 ecmd->supported |= SUPPORTED_1000baseT_Full;
42
43 ecmd->advertising = ADVERTISED_TP;
44
45 ecmd->advertising |= hw->autoneg_advertised;
46
47 ecmd->port = PORT_TP;
48 ecmd->phy_address = 0;
49 ecmd->transceiver = XCVR_INTERNAL;
50
51 if (adapter->link_speed != SPEED_0) {
52 ecmd->speed = adapter->link_speed;
53 if (adapter->link_duplex == FULL_DUPLEX)
54 ecmd->duplex = DUPLEX_FULL;
55 else
56 ecmd->duplex = DUPLEX_HALF;
57 } else {
58 ecmd->speed = -1;
59 ecmd->duplex = -1;
60 }
61
62 ecmd->autoneg = AUTONEG_ENABLE;
63 return 0;
64}
65
66static int atl1c_set_settings(struct net_device *netdev,
67 struct ethtool_cmd *ecmd)
68{
69 struct atl1c_adapter *adapter = netdev_priv(netdev);
70 struct atl1c_hw *hw = &adapter->hw;
71 u16 autoneg_advertised;
72
73 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
74 msleep(1);
75
76 if (ecmd->autoneg == AUTONEG_ENABLE) {
77 autoneg_advertised = ADVERTISED_Autoneg;
78 } else {
79 if (ecmd->speed == SPEED_1000) {
80 if (ecmd->duplex != DUPLEX_FULL) {
81 if (netif_msg_link(adapter))
82 dev_warn(&adapter->pdev->dev,
83 "1000M half is invalid\n");
84 clear_bit(__AT_RESETTING, &adapter->flags);
85 return -EINVAL;
86 }
87 autoneg_advertised = ADVERTISED_1000baseT_Full;
88 } else if (ecmd->speed == SPEED_100) {
89 if (ecmd->duplex == DUPLEX_FULL)
90 autoneg_advertised = ADVERTISED_100baseT_Full;
91 else
92 autoneg_advertised = ADVERTISED_100baseT_Half;
93 } else {
94 if (ecmd->duplex == DUPLEX_FULL)
95 autoneg_advertised = ADVERTISED_10baseT_Full;
96 else
97 autoneg_advertised = ADVERTISED_10baseT_Half;
98 }
99 }
100
101 if (hw->autoneg_advertised != autoneg_advertised) {
102 hw->autoneg_advertised = autoneg_advertised;
103 if (atl1c_restart_autoneg(hw) != 0) {
104 if (netif_msg_link(adapter))
105 dev_warn(&adapter->pdev->dev,
106 "ethtool speed/duplex setting failed\n");
107 clear_bit(__AT_RESETTING, &adapter->flags);
108 return -EINVAL;
109 }
110 }
111 clear_bit(__AT_RESETTING, &adapter->flags);
112 return 0;
113}
114
115static u32 atl1c_get_tx_csum(struct net_device *netdev)
116{
117 return (netdev->features & NETIF_F_HW_CSUM) != 0;
118}
119
120static u32 atl1c_get_msglevel(struct net_device *netdev)
121{
122 struct atl1c_adapter *adapter = netdev_priv(netdev);
123 return adapter->msg_enable;
124}
125
126static void atl1c_set_msglevel(struct net_device *netdev, u32 data)
127{
128 struct atl1c_adapter *adapter = netdev_priv(netdev);
129 adapter->msg_enable = data;
130}
131
132static int atl1c_get_regs_len(struct net_device *netdev)
133{
134 return AT_REGS_LEN;
135}
136
137static void atl1c_get_regs(struct net_device *netdev,
138 struct ethtool_regs *regs, void *p)
139{
140 struct atl1c_adapter *adapter = netdev_priv(netdev);
141 struct atl1c_hw *hw = &adapter->hw;
142 u32 *regs_buff = p;
143 u16 phy_data;
144
145 memset(p, 0, AT_REGS_LEN);
146
147 regs->version = 0;
148 AT_READ_REG(hw, REG_VPD_CAP, p++);
149 AT_READ_REG(hw, REG_PM_CTRL, p++);
150 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
151 AT_READ_REG(hw, REG_TWSI_CTRL, p++);
152 AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++);
153 AT_READ_REG(hw, REG_MASTER_CTRL, p++);
154 AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++);
155 AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++);
156 AT_READ_REG(hw, REG_GPHY_CTRL, p++);
157 AT_READ_REG(hw, REG_LINK_CTRL, p++);
158 AT_READ_REG(hw, REG_IDLE_STATUS, p++);
159 AT_READ_REG(hw, REG_MDIO_CTRL, p++);
160 AT_READ_REG(hw, REG_SERDES_LOCK, p++);
161 AT_READ_REG(hw, REG_MAC_CTRL, p++);
162 AT_READ_REG(hw, REG_MAC_IPG_IFG, p++);
163 AT_READ_REG(hw, REG_MAC_STA_ADDR, p++);
164 AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++);
165 AT_READ_REG(hw, REG_RX_HASH_TABLE, p++);
166 AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++);
167 AT_READ_REG(hw, REG_RXQ_CTRL, p++);
168 AT_READ_REG(hw, REG_TXQ_CTRL, p++);
169 AT_READ_REG(hw, REG_MTU, p++);
170 AT_READ_REG(hw, REG_WOL_CTRL, p++);
171
172 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
173 regs_buff[73] = (u32) phy_data;
174 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
175 regs_buff[74] = (u32) phy_data;
176}
177
178static int atl1c_get_eeprom_len(struct net_device *netdev)
179{
180 struct atl1c_adapter *adapter = netdev_priv(netdev);
181
182 if (atl1c_check_eeprom_exist(&adapter->hw))
183 return AT_EEPROM_LEN;
184 else
185 return 0;
186}
187
188static int atl1c_get_eeprom(struct net_device *netdev,
189 struct ethtool_eeprom *eeprom, u8 *bytes)
190{
191 struct atl1c_adapter *adapter = netdev_priv(netdev);
192 struct atl1c_hw *hw = &adapter->hw;
193 u32 *eeprom_buff;
194 int first_dword, last_dword;
195 int ret_val = 0;
196 int i;
197
198 if (eeprom->len == 0)
199 return -EINVAL;
200
201 if (!atl1c_check_eeprom_exist(hw)) /* not exist */
202 return -EINVAL;
203
204 eeprom->magic = adapter->pdev->vendor |
205 (adapter->pdev->device << 16);
206
207 first_dword = eeprom->offset >> 2;
208 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
209
210 eeprom_buff = kmalloc(sizeof(u32) *
211 (last_dword - first_dword + 1), GFP_KERNEL);
212 if (eeprom_buff == NULL)
213 return -ENOMEM;
214
215 for (i = first_dword; i < last_dword; i++) {
216 if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
217 kfree(eeprom_buff);
218 return -EIO;
219 }
220 }
221
222 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
223 eeprom->len);
224 kfree(eeprom_buff);
225
226 return ret_val;
227 return 0;
228}
229
230static void atl1c_get_drvinfo(struct net_device *netdev,
231 struct ethtool_drvinfo *drvinfo)
232{
233 struct atl1c_adapter *adapter = netdev_priv(netdev);
234
235 strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
236 strncpy(drvinfo->version, atl1c_driver_version,
237 sizeof(drvinfo->version));
238 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
239 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
240 sizeof(drvinfo->bus_info));
241 drvinfo->n_stats = 0;
242 drvinfo->testinfo_len = 0;
243 drvinfo->regdump_len = atl1c_get_regs_len(netdev);
244 drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
245}
246
247static void atl1c_get_wol(struct net_device *netdev,
248 struct ethtool_wolinfo *wol)
249{
250 struct atl1c_adapter *adapter = netdev_priv(netdev);
251
252 wol->supported = WAKE_MAGIC | WAKE_PHY;
253 wol->wolopts = 0;
254
255 if (adapter->wol & AT_WUFC_EX)
256 wol->wolopts |= WAKE_UCAST;
257 if (adapter->wol & AT_WUFC_MC)
258 wol->wolopts |= WAKE_MCAST;
259 if (adapter->wol & AT_WUFC_BC)
260 wol->wolopts |= WAKE_BCAST;
261 if (adapter->wol & AT_WUFC_MAG)
262 wol->wolopts |= WAKE_MAGIC;
263 if (adapter->wol & AT_WUFC_LNKC)
264 wol->wolopts |= WAKE_PHY;
265
266 return;
267}
268
269static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
270{
271 struct atl1c_adapter *adapter = netdev_priv(netdev);
272
273 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
274 WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
275 return -EOPNOTSUPP;
276 /* these settings will always override what we currently have */
277 adapter->wol = 0;
278
279 if (wol->wolopts & WAKE_MAGIC)
280 adapter->wol |= AT_WUFC_MAG;
281 if (wol->wolopts & WAKE_PHY)
282 adapter->wol |= AT_WUFC_LNKC;
283
284 return 0;
285}
286
287static int atl1c_nway_reset(struct net_device *netdev)
288{
289 struct atl1c_adapter *adapter = netdev_priv(netdev);
290 if (netif_running(netdev))
291 atl1c_reinit_locked(adapter);
292 return 0;
293}
294
295static struct ethtool_ops atl1c_ethtool_ops = {
296 .get_settings = atl1c_get_settings,
297 .set_settings = atl1c_set_settings,
298 .get_drvinfo = atl1c_get_drvinfo,
299 .get_regs_len = atl1c_get_regs_len,
300 .get_regs = atl1c_get_regs,
301 .get_wol = atl1c_get_wol,
302 .set_wol = atl1c_set_wol,
303 .get_msglevel = atl1c_get_msglevel,
304 .set_msglevel = atl1c_set_msglevel,
305 .nway_reset = atl1c_nway_reset,
306 .get_link = ethtool_op_get_link,
307 .get_eeprom_len = atl1c_get_eeprom_len,
308 .get_eeprom = atl1c_get_eeprom,
309 .get_tx_csum = atl1c_get_tx_csum,
310 .get_sg = ethtool_op_get_sg,
311 .set_sg = ethtool_op_set_sg,
312};
313
314void atl1c_set_ethtool_ops(struct net_device *netdev)
315{
316 SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
317}
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
new file mode 100644
index 000000000000..3e69b940b8f7
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/mii.h>
24#include <linux/crc32.h>
25
26#include "atl1c.h"
27
28/*
29 * check_eeprom_exist
30 * return 1 if eeprom exist
31 */
32int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
33{
34 u32 data;
35
36 AT_READ_REG(hw, REG_TWSI_DEBUG, &data);
37 if (data & TWSI_DEBUG_DEV_EXIST)
38 return 1;
39
40 return 0;
41}
42
43void atl1c_hw_set_mac_addr(struct atl1c_hw *hw)
44{
45 u32 value;
46 /*
47 * 00-0B-6A-F6-00-DC
48 * 0: 6AF600DC 1: 000B
49 * low dword
50 */
51 value = (((u32)hw->mac_addr[2]) << 24) |
52 (((u32)hw->mac_addr[3]) << 16) |
53 (((u32)hw->mac_addr[4]) << 8) |
54 (((u32)hw->mac_addr[5])) ;
55 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
56 /* hight dword */
57 value = (((u32)hw->mac_addr[0]) << 8) |
58 (((u32)hw->mac_addr[1])) ;
59 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
60}
61
62/*
63 * atl1c_get_permanent_address
64 * return 0 if get valid mac address,
65 */
66static int atl1c_get_permanent_address(struct atl1c_hw *hw)
67{
68 u32 addr[2];
69 u32 i;
70 u32 otp_ctrl_data;
71 u32 twsi_ctrl_data;
72 u8 eth_addr[ETH_ALEN];
73
74 /* init */
75 addr[0] = addr[1] = 0;
76 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
77 if (atl1c_check_eeprom_exist(hw)) {
78 /* Enable OTP CLK */
79 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
80 otp_ctrl_data |= OTP_CTRL_CLK_EN;
81 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
82 AT_WRITE_FLUSH(hw);
83 msleep(1);
84 }
85
86 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
87 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
88 AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
89 for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
90 msleep(10);
91 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
92 if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
93 break;
94 }
95 if (i >= AT_TWSI_EEPROM_TIMEOUT)
96 return -1;
97 }
98 /* Disable OTP_CLK */
99 if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
100 otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
101 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
102 AT_WRITE_FLUSH(hw);
103 msleep(1);
104 }
105
106 /* maybe MAC-address is from BIOS */
107 AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
108 AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
109 *(u32 *) &eth_addr[2] = swab32(addr[0]);
110 *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
111
112 if (is_valid_ether_addr(eth_addr)) {
113 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
114 return 0;
115 }
116
117 return -1;
118}
119
120bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
121{
122 int i;
123 int ret = false;
124 u32 otp_ctrl_data;
125 u32 control;
126 u32 data;
127
128 if (offset & 3)
129 return ret; /* address do not align */
130
131 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
132 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN))
133 AT_WRITE_REG(hw, REG_OTP_CTRL,
134 (otp_ctrl_data | OTP_CTRL_CLK_EN));
135
136 AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0);
137 control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT;
138 AT_WRITE_REG(hw, REG_EEPROM_CTRL, control);
139
140 for (i = 0; i < 10; i++) {
141 udelay(100);
142 AT_READ_REG(hw, REG_EEPROM_CTRL, &control);
143 if (control & EEPROM_CTRL_RW)
144 break;
145 }
146 if (control & EEPROM_CTRL_RW) {
147 AT_READ_REG(hw, REG_EEPROM_CTRL, &data);
148 AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value);
149 data = data & 0xFFFF;
150 *p_value = swab32((data << 16) | (*p_value >> 16));
151 ret = true;
152 }
153 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN))
154 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
155
156 return ret;
157}
158/*
159 * Reads the adapter's MAC address from the EEPROM
160 *
161 * hw - Struct containing variables accessed by shared code
162 */
163int atl1c_read_mac_addr(struct atl1c_hw *hw)
164{
165 int err = 0;
166
167 err = atl1c_get_permanent_address(hw);
168 if (err)
169 random_ether_addr(hw->perm_mac_addr);
170
171 memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
172 return 0;
173}
174
175/*
176 * atl1c_hash_mc_addr
177 * purpose
178 * set hash value for a multicast address
179 * hash calcu processing :
180 * 1. calcu 32bit CRC for multicast address
181 * 2. reverse crc with MSB to LSB
182 */
183u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr)
184{
185 u32 crc32;
186 u32 value = 0;
187 int i;
188
189 crc32 = ether_crc_le(6, mc_addr);
190 for (i = 0; i < 32; i++)
191 value |= (((crc32 >> i) & 1) << (31 - i));
192
193 return value;
194}
195
196/*
197 * Sets the bit in the multicast table corresponding to the hash value.
198 * hw - Struct containing variables accessed by shared code
199 * hash_value - Multicast address hash value
200 */
201void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value)
202{
203 u32 hash_bit, hash_reg;
204 u32 mta;
205
206 /*
207 * The HASH Table is a register array of 2 32-bit registers.
208 * It is treated like an array of 64 bits. We want to set
209 * bit BitArray[hash_value]. So we figure out what register
210 * the bit is in, read it, OR in the new bit, then write
211 * back the new value. The register is determined by the
212 * upper bit of the hash value and the bit within that
213 * register are determined by the lower 5 bits of the value.
214 */
215 hash_reg = (hash_value >> 31) & 0x1;
216 hash_bit = (hash_value >> 26) & 0x1F;
217
218 mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
219
220 mta |= (1 << hash_bit);
221
222 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
223}
224
225/*
226 * Reads the value from a PHY register
227 * hw - Struct containing variables accessed by shared code
228 * reg_addr - address of the PHY register to read
229 */
230int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
231{
232 u32 val;
233 int i;
234
235 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
236 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
237 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
238
239 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
240
241 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
242 udelay(2);
243 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
244 if (!(val & (MDIO_START | MDIO_BUSY)))
245 break;
246 }
247 if (!(val & (MDIO_START | MDIO_BUSY))) {
248 *phy_data = (u16)val;
249 return 0;
250 }
251
252 return -1;
253}
254
255/*
256 * Writes a value to a PHY register
257 * hw - Struct containing variables accessed by shared code
258 * reg_addr - address of the PHY register to write
259 * data - data to write to the PHY
260 */
261int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
262{
263 int i;
264 u32 val;
265
266 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
267 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
268 MDIO_SUP_PREAMBLE | MDIO_START |
269 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
270
271 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
272
273 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
274 udelay(2);
275 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
276 if (!(val & (MDIO_START | MDIO_BUSY)))
277 break;
278 }
279
280 if (!(val & (MDIO_START | MDIO_BUSY)))
281 return 0;
282
283 return -1;
284}
285
286/*
287 * Configures PHY autoneg and flow control advertisement settings
288 *
289 * hw - Struct containing variables accessed by shared code
290 */
291static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
292{
293 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
294 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
295 ~GIGA_CR_1000T_SPEED_MASK;
296
297 if (hw->autoneg_advertised & ADVERTISED_10baseT_Half)
298 mii_adv_data |= ADVERTISE_10HALF;
299 if (hw->autoneg_advertised & ADVERTISED_10baseT_Full)
300 mii_adv_data |= ADVERTISE_10FULL;
301 if (hw->autoneg_advertised & ADVERTISED_100baseT_Half)
302 mii_adv_data |= ADVERTISE_100HALF;
303 if (hw->autoneg_advertised & ADVERTISED_100baseT_Full)
304 mii_adv_data |= ADVERTISE_100FULL;
305
306 if (hw->autoneg_advertised & ADVERTISED_Autoneg)
307 mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
308 ADVERTISE_100HALF | ADVERTISE_100FULL;
309
310 if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) {
311 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
312 mii_giga_ctrl_data |= ADVERTISE_1000HALF;
313 if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
314 mii_giga_ctrl_data |= ADVERTISE_1000FULL;
315 if (hw->autoneg_advertised & ADVERTISED_Autoneg)
316 mii_giga_ctrl_data |= ADVERTISE_1000HALF |
317 ADVERTISE_1000FULL;
318 }
319
320 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
321 atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
322 return -1;
323 return 0;
324}
325
326void atl1c_phy_disable(struct atl1c_hw *hw)
327{
328 AT_WRITE_REGW(hw, REG_GPHY_CTRL,
329 GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
330}
331
332static void atl1c_phy_magic_data(struct atl1c_hw *hw)
333{
334 u16 data;
335
336 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
337 ((1 & ANA_INTERVAL_SEL_TIMER_MASK) <<
338 ANA_INTERVAL_SEL_TIMER_SHIFT);
339
340 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18);
341 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
342
343 data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG |
344 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
345 ANA_SERDES_EN_LCKDT;
346
347 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5);
348 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
349
350 data = (44 & ANA_LONG_CABLE_TH_100_MASK) |
351 ((33 & ANA_SHORT_CABLE_TH_100_MASK) <<
352 ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM |
353 ANA_BP_SMALL_BW;
354
355 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54);
356 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
357
358 data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) <<
359 ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
360 ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
361 ANA_IECHO_ADJ_0_SHIFT);
362
363 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4);
364 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
365
366 data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) <<
367 ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE |
368 ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M;
369
370 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0);
371 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
372
373 if (hw->ctrl_flags & ATL1C_HIB_DISABLE) {
374 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41);
375 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
376 return;
377 data &= ~ANA_TOP_PS_EN;
378 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
379
380 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11);
381 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
382 return;
383 data &= ~ANA_PS_HIB_EN;
384 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
385 }
386}
387
388int atl1c_phy_reset(struct atl1c_hw *hw)
389{
390 struct atl1c_adapter *adapter = hw->adapter;
391 struct pci_dev *pdev = adapter->pdev;
392 u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
393 u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
394 int err;
395
396 if (hw->ctrl_flags & ATL1C_HIB_DISABLE)
397 phy_ctrl_data &= ~GPHY_CTRL_HIB_EN;
398
399 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
400 AT_WRITE_FLUSH(hw);
401 msleep(40);
402 phy_ctrl_data |= GPHY_CTRL_EXT_RESET;
403 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
404 AT_WRITE_FLUSH(hw);
405 msleep(10);
406
407 /*Enable PHY LinkChange Interrupt */
408 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
409 if (err) {
410 if (netif_msg_hw(adapter))
411 dev_err(&pdev->dev,
412 "Error enable PHY linkChange Interrupt\n");
413 return err;
414 }
415 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
416 atl1c_phy_magic_data(hw);
417 return 0;
418}
419
420int atl1c_phy_init(struct atl1c_hw *hw)
421{
422 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
423 struct pci_dev *pdev = adapter->pdev;
424 int ret_val;
425 u16 mii_bmcr_data = BMCR_RESET;
426 u16 phy_id1, phy_id2;
427
428 if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) ||
429 (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) {
430 if (netif_msg_link(adapter))
431 dev_err(&pdev->dev, "Error get phy ID\n");
432 return -1;
433 }
434 switch (hw->media_type) {
435 case MEDIA_TYPE_AUTO_SENSOR:
436 ret_val = atl1c_phy_setup_adv(hw);
437 if (ret_val) {
438 if (netif_msg_link(adapter))
439 dev_err(&pdev->dev,
440 "Error Setting up Auto-Negotiation\n");
441 return ret_val;
442 }
443 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
444 break;
445 case MEDIA_TYPE_100M_FULL:
446 mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
447 break;
448 case MEDIA_TYPE_100M_HALF:
449 mii_bmcr_data |= BMCR_SPEED_100;
450 break;
451 case MEDIA_TYPE_10M_FULL:
452 mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
453 break;
454 case MEDIA_TYPE_10M_HALF:
455 mii_bmcr_data |= BMCR_SPEED_10;
456 break;
457 default:
458 if (netif_msg_link(adapter))
459 dev_err(&pdev->dev, "Wrong Media type %d\n",
460 hw->media_type);
461 return -1;
462 break;
463 }
464
465 ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
466 if (ret_val)
467 return ret_val;
468 hw->phy_configured = true;
469
470 return 0;
471}
472
473/*
474 * Detects the current speed and duplex settings of the hardware.
475 *
476 * hw - Struct containing variables accessed by shared code
477 * speed - Speed of the connection
478 * duplex - Duplex setting of the connection
479 */
480int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
481{
482 int err;
483 u16 phy_data;
484
485 /* Read PHY Specific Status Register (17) */
486 err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data);
487 if (err)
488 return err;
489
490 if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED))
491 return -1;
492
493 switch (phy_data & GIGA_PSSR_SPEED) {
494 case GIGA_PSSR_1000MBS:
495 *speed = SPEED_1000;
496 break;
497 case GIGA_PSSR_100MBS:
498 *speed = SPEED_100;
499 break;
500 case GIGA_PSSR_10MBS:
501 *speed = SPEED_10;
502 break;
503 default:
504 return -1;
505 break;
506 }
507
508 if (phy_data & GIGA_PSSR_DPLX)
509 *duplex = FULL_DUPLEX;
510 else
511 *duplex = HALF_DUPLEX;
512
513 return 0;
514}
515
516int atl1c_restart_autoneg(struct atl1c_hw *hw)
517{
518 int err = 0;
519 u16 mii_bmcr_data = BMCR_RESET;
520
521 err = atl1c_phy_setup_adv(hw);
522 if (err)
523 return err;
524 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
525
526 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
527}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
new file mode 100644
index 000000000000..c2c738df5c63
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -0,0 +1,859 @@
1/*
2 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#ifndef _ATL1C_HW_H_
23#define _ATL1C_HW_H_
24
25#include <linux/types.h>
26#include <linux/mii.h>
27
28struct atl1c_adapter;
29struct atl1c_hw;
30
31/* function prototype */
32void atl1c_phy_disable(struct atl1c_hw *hw);
33void atl1c_hw_set_mac_addr(struct atl1c_hw *hw);
34int atl1c_phy_reset(struct atl1c_hw *hw);
35int atl1c_read_mac_addr(struct atl1c_hw *hw);
36int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
37u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr);
38void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value);
39int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data);
40int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data);
41bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
42int atl1c_phy_init(struct atl1c_hw *hw);
43int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
44int atl1c_restart_autoneg(struct atl1c_hw *hw);
45
46/* register definition */
47#define REG_DEVICE_CAP 0x5C
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
49#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
50
51#define REG_DEVICE_CTRL 0x60
52#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
53#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
54#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
55#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
56
57#define REG_LINK_CTRL 0x68
58#define LINK_CTRL_L0S_EN 0x01
59#define LINK_CTRL_L1_EN 0x02
60
61#define REG_VPD_CAP 0x6C
62#define VPD_CAP_ID_MASK 0xff
63#define VPD_CAP_ID_SHIFT 0
64#define VPD_CAP_NEXT_PTR_MASK 0xFF
65#define VPD_CAP_NEXT_PTR_SHIFT 8
66#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
67#define VPD_CAP_VPD_ADDR_SHIFT 16
68#define VPD_CAP_VPD_FLAG 0x80000000
69
70#define REG_VPD_DATA 0x70
71
72#define REG_PCIE_UC_SEVERITY 0x10C
73#define PCIE_UC_SERVRITY_TRN 0x00000001
74#define PCIE_UC_SERVRITY_DLP 0x00000010
75#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000
76#define PCIE_UC_SERVRITY_FCP 0x00002000
77#define PCIE_UC_SERVRITY_CPL_TO 0x00004000
78#define PCIE_UC_SERVRITY_CA 0x00008000
79#define PCIE_UC_SERVRITY_UC 0x00010000
80#define PCIE_UC_SERVRITY_ROV 0x00020000
81#define PCIE_UC_SERVRITY_MLFP 0x00040000
82#define PCIE_UC_SERVRITY_ECRC 0x00080000
83#define PCIE_UC_SERVRITY_UR 0x00100000
84
85#define REG_DEV_SERIALNUM_CTRL 0x200
86#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
87#define REG_DEV_MAC_SEL_SHIFT 0
88#define REG_DEV_SERIAL_NUM_EN_MASK 0x1
89#define REG_DEV_SERIAL_NUM_EN_SHIFT 1
90
91#define REG_TWSI_CTRL 0x218
92#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
93#define TWSI_CTRL_LD_OFFSET_SHIFT 0
94#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
95#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
96#define TWSI_CTRL_SW_LDSTART 0x800
97#define TWSI_CTRL_HW_LDSTART 0x1000
98#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
99#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
100#define TWSI_CTRL_LD_EXIST 0x400000
101#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
102#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
103#define TWSI_CTRL_FREQ_SEL_100K 0
104#define TWSI_CTRL_FREQ_SEL_200K 1
105#define TWSI_CTRL_FREQ_SEL_300K 2
106#define TWSI_CTRL_FREQ_SEL_400K 3
107#define TWSI_CTRL_SMB_SLV_ADDR
108#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
109#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
110
111
112#define REG_PCIE_DEV_MISC_CTRL 0x21C
113#define PCIE_DEV_MISC_EXT_PIPE 0x2
114#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1
115#define PCIE_DEV_MISC_SPIROM_EXIST 0x4
116#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8
117#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10
118
119#define REG_PCIE_PHYMISC 0x1000
120#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
121
122#define REG_TWSI_DEBUG 0x1108
123#define TWSI_DEBUG_DEV_EXIST 0x20000000
124
125#define REG_EEPROM_CTRL 0x12C0
126#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF
127#define EEPROM_CTRL_DATA_HI_SHIFT 0
128#define EEPROM_CTRL_ADDR_MASK 0x3FF
129#define EEPROM_CTRL_ADDR_SHIFT 16
130#define EEPROM_CTRL_ACK 0x40000000
131#define EEPROM_CTRL_RW 0x80000000
132
133#define REG_EEPROM_DATA_LO 0x12C4
134
135#define REG_OTP_CTRL 0x12F0
136#define OTP_CTRL_CLK_EN 0x0002
137
138#define REG_PM_CTRL 0x12F8
139#define PM_CTRL_SDES_EN 0x00000001
140#define PM_CTRL_RBER_EN 0x00000002
141#define PM_CTRL_CLK_REQ_EN 0x00000004
142#define PM_CTRL_ASPM_L1_EN 0x00000008
143#define PM_CTRL_SERDES_L1_EN 0x00000010
144#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020
145#define PM_CTRL_SERDES_PD_EX_L1 0x00000040
146#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080
147#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF
148#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
149#define PM_CTRL_ASPM_L0S_EN 0x00001000
150#define PM_CTRL_CLK_SWH_L1 0x00002000
151#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
152#define PM_CTRL_PCIE_RECV 0x00008000
153#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
154#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
155#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
156#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
157#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
158#define PM_CTRL_LCKDET_TIMER_SHIFT 24
159#define PM_CTRL_MAC_ASPM_CHK 0x40000000
160#define PM_CTRL_HOTRST 0x80000000
161
162/* Selene Master Control Register */
163#define REG_MASTER_CTRL 0x1400
164#define MASTER_CTRL_SOFT_RST 0x1
165#define MASTER_CTRL_TEST_MODE_MASK 0x3
166#define MASTER_CTRL_TEST_MODE_SHIFT 2
167#define MASTER_CTRL_BERT_START 0x10
168#define MASTER_CTRL_MTIMER_EN 0x100
169#define MASTER_CTRL_MANUAL_INT 0x200
170#define MASTER_CTRL_TX_ITIMER_EN 0x400
171#define MASTER_CTRL_RX_ITIMER_EN 0x800
172#define MASTER_CTRL_CLK_SEL_DIS 0x1000
173#define MASTER_CTRL_CLK_SWH_MODE 0x2000
174#define MASTER_CTRL_INT_RDCLR 0x4000
175#define MASTER_CTRL_REV_NUM_SHIFT 16
176#define MASTER_CTRL_REV_NUM_MASK 0xff
177#define MASTER_CTRL_DEV_ID_SHIFT 24
178#define MASTER_CTRL_DEV_ID_MASK 0x7f
179#define MASTER_CTRL_OTP_SEL 0x80000000
180
181/* Timer Initial Value Register */
182#define REG_MANUAL_TIMER_INIT 0x1404
183
184/* IRQ ModeratorTimer Initial Value Register */
185#define REG_IRQ_MODRT_TIMER_INIT 0x1408
186#define IRQ_MODRT_TIMER_MASK 0xffff
187#define IRQ_MODRT_TX_TIMER_SHIFT 0
188#define IRQ_MODRT_RX_TIMER_SHIFT 16
189
190#define REG_GPHY_CTRL 0x140C
191#define GPHY_CTRL_EXT_RESET 0x1
192#define GPHY_CTRL_RTL_MODE 0x2
193#define GPHY_CTRL_LED_MODE 0x4
194#define GPHY_CTRL_ANEG_NOW 0x8
195#define GPHY_CTRL_REV_ANEG 0x10
196#define GPHY_CTRL_GATE_25M_EN 0x20
197#define GPHY_CTRL_LPW_EXIT 0x40
198#define GPHY_CTRL_PHY_IDDQ 0x80
199#define GPHY_CTRL_PHY_IDDQ_DIS 0x100
200#define GPHY_CTRL_GIGA_DIS 0x200
201#define GPHY_CTRL_HIB_EN 0x400
202#define GPHY_CTRL_HIB_PULSE 0x800
203#define GPHY_CTRL_SEL_ANA_RST 0x1000
204#define GPHY_CTRL_PHY_PLL_ON 0x2000
205#define GPHY_CTRL_PWDOWN_HW 0x4000
206#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000
207
208#define GPHY_CTRL_DEFAULT ( \
209 GPHY_CTRL_SEL_ANA_RST |\
210 GPHY_CTRL_HIB_PULSE |\
211 GPHY_CTRL_HIB_EN)
212
213#define GPHY_CTRL_PW_WOL_DIS ( \
214 GPHY_CTRL_SEL_ANA_RST |\
215 GPHY_CTRL_HIB_PULSE |\
216 GPHY_CTRL_HIB_EN |\
217 GPHY_CTRL_PWDOWN_HW |\
218 GPHY_CTRL_PHY_IDDQ)
219
220/* Block IDLE Status Register */
221#define REG_IDLE_STATUS 0x1410
222#define IDLE_STATUS_MASK 0x00FF
223#define IDLE_STATUS_RXMAC_NO_IDLE 0x1
224#define IDLE_STATUS_TXMAC_NO_IDLE 0x2
225#define IDLE_STATUS_RXQ_NO_IDLE 0x4
226#define IDLE_STATUS_TXQ_NO_IDLE 0x8
227#define IDLE_STATUS_DMAR_NO_IDLE 0x10
228#define IDLE_STATUS_DMAW_NO_IDLE 0x20
229#define IDLE_STATUS_SMB_NO_IDLE 0x40
230#define IDLE_STATUS_CMB_NO_IDLE 0x80
231
232/* MDIO Control Register */
233#define REG_MDIO_CTRL 0x1414
234#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit
235 * control data to write to PHY
236 * MII management register */
237#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit
238 * status data that was read
239 * from the PHY MII management register */
240#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */
241#define MDIO_REG_ADDR_SHIFT 16
242#define MDIO_RW 0x200000 /* 1: read, 0: write */
243#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */
244#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO
245 * master. And this bit is self
246 * cleared after one cycle */
247#define MDIO_CLK_SEL_SHIFT 24
248#define MDIO_CLK_25_4 0
249#define MDIO_CLK_25_6 2
250#define MDIO_CLK_25_8 3
251#define MDIO_CLK_25_10 4
252#define MDIO_CLK_25_14 5
253#define MDIO_CLK_25_20 6
254#define MDIO_CLK_25_28 7
255#define MDIO_BUSY 0x8000000
256#define MDIO_AP_EN 0x10000000
257#define MDIO_WAIT_TIMES 10
258
259/* MII PHY Status Register */
260#define REG_PHY_STATUS 0x1418
261#define PHY_GENERAL_STATUS_MASK 0xFFFF
262#define PHY_STATUS_RECV_ENABLE 0x0001
263#define PHY_OE_PWSP_STATUS_MASK 0x07FF
264#define PHY_OE_PWSP_STATUS_SHIFT 16
265#define PHY_STATUS_LPW_STATE 0x80000000
266/* BIST Control and Status Register0 (for the Packet Memory) */
267#define REG_BIST0_CTRL 0x141c
268#define BIST0_NOW 0x1
269#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is
270 * un-repairable because
271 * it has address decoder
272 * failure or more than 1 cell
273 * stuck-to-x failure */
274#define BIST0_FUSE_FLAG 0x4
275
276/* BIST Control and Status Register1(for the retry buffer of PCI Express) */
277#define REG_BIST1_CTRL 0x1420
278#define BIST1_NOW 0x1
279#define BIST1_SRAM_FAIL 0x2
280#define BIST1_FUSE_FLAG 0x4
281
282/* SerDes Lock Detect Control and Status Register */
283#define REG_SERDES_LOCK 0x1424
284#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal
285 * comes from Analog SerDes */
286#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */
287
288/* MAC Control Register */
289#define REG_MAC_CTRL 0x1480
290#define MAC_CTRL_TX_EN 0x1
291#define MAC_CTRL_RX_EN 0x2
292#define MAC_CTRL_TX_FLOW 0x4
293#define MAC_CTRL_RX_FLOW 0x8
294#define MAC_CTRL_LOOPBACK 0x10
295#define MAC_CTRL_DUPLX 0x20
296#define MAC_CTRL_ADD_CRC 0x40
297#define MAC_CTRL_PAD 0x80
298#define MAC_CTRL_LENCHK 0x100
299#define MAC_CTRL_HUGE_EN 0x200
300#define MAC_CTRL_PRMLEN_SHIFT 10
301#define MAC_CTRL_PRMLEN_MASK 0xf
302#define MAC_CTRL_RMV_VLAN 0x4000
303#define MAC_CTRL_PROMIS_EN 0x8000
304#define MAC_CTRL_TX_PAUSE 0x10000
305#define MAC_CTRL_SCNT 0x20000
306#define MAC_CTRL_SRST_TX 0x40000
307#define MAC_CTRL_TX_SIMURST 0x80000
308#define MAC_CTRL_SPEED_SHIFT 20
309#define MAC_CTRL_SPEED_MASK 0x3
310#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
311#define MAC_CTRL_TX_HUGE 0x800000
312#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
313#define MAC_CTRL_MC_ALL_EN 0x2000000
314#define MAC_CTRL_BC_EN 0x4000000
315#define MAC_CTRL_DBG 0x8000000
316#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
317
318/* MAC IPG/IFG Control Register */
319#define REG_MAC_IPG_IFG 0x1484
320#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back
321 * inter-packet gap. The
322 * default is 96-bit time */
323#define MAC_IPG_IFG_IPGT_MASK 0x7f
324#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to
325 * enforce in between RX frames */
326#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */
327#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */
328#define MAC_IPG_IFG_IPGR1_MASK 0x7f
329#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */
330#define MAC_IPG_IFG_IPGR2_MASK 0x7f
331
332/* MAC STATION ADDRESS */
333#define REG_MAC_STA_ADDR 0x1488
334
335/* Hash table for multicast address */
336#define REG_RX_HASH_TABLE 0x1490
337
338/* MAC Half-Duplex Control Register */
339#define REG_MAC_HALF_DUPLX_CTRL 0x1498
340#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */
341#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
342#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
343#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
344#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
345#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
346#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure,
347 * immediately start the
348 * transmission after back pressure */
349#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */
350#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */
351#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
352#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */
353#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */
354
355/* Maximum Frame Length Control Register */
356#define REG_MTU 0x149c
357
358/* Wake-On-Lan control register */
359#define REG_WOL_CTRL 0x14a0
360#define WOL_PATTERN_EN 0x00000001
361#define WOL_PATTERN_PME_EN 0x00000002
362#define WOL_MAGIC_EN 0x00000004
363#define WOL_MAGIC_PME_EN 0x00000008
364#define WOL_LINK_CHG_EN 0x00000010
365#define WOL_LINK_CHG_PME_EN 0x00000020
366#define WOL_PATTERN_ST 0x00000100
367#define WOL_MAGIC_ST 0x00000200
368#define WOL_LINKCHG_ST 0x00000400
369#define WOL_CLK_SWITCH_EN 0x00008000
370#define WOL_PT0_EN 0x00010000
371#define WOL_PT1_EN 0x00020000
372#define WOL_PT2_EN 0x00040000
373#define WOL_PT3_EN 0x00080000
374#define WOL_PT4_EN 0x00100000
375#define WOL_PT5_EN 0x00200000
376#define WOL_PT6_EN 0x00400000
377
378/* WOL Length ( 2 DWORD ) */
379#define REG_WOL_PATTERN_LEN 0x14a4
380#define WOL_PT_LEN_MASK 0x7f
381#define WOL_PT0_LEN_SHIFT 0
382#define WOL_PT1_LEN_SHIFT 8
383#define WOL_PT2_LEN_SHIFT 16
384#define WOL_PT3_LEN_SHIFT 24
385#define WOL_PT4_LEN_SHIFT 0
386#define WOL_PT5_LEN_SHIFT 8
387#define WOL_PT6_LEN_SHIFT 16
388
389/* Internal SRAM Partition Register */
390#define RFDX_HEAD_ADDR_MASK 0x03FF
391#define RFDX_HARD_ADDR_SHIFT 0
392#define RFDX_TAIL_ADDR_MASK 0x03FF
393#define RFDX_TAIL_ADDR_SHIFT 16
394
395#define REG_SRAM_RFD0_INFO 0x1500
396#define REG_SRAM_RFD1_INFO 0x1504
397#define REG_SRAM_RFD2_INFO 0x1508
398#define REG_SRAM_RFD3_INFO 0x150C
399
400#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */
401#define RFD_NIC_LEN_MASK 0x03FF
402
403#define REG_SRAM_TRD_ADDR 0x1518
404#define TPD_HEAD_ADDR_MASK 0x03FF
405#define TPD_HEAD_ADDR_SHIFT 0
406#define TPD_TAIL_ADDR_MASK 0x03FF
407#define TPD_TAIL_ADDR_SHIFT 16
408
409#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */
410#define TPD_NIC_LEN_MASK 0x03FF
411
412#define REG_SRAM_RXF_ADDR 0x1520
413#define REG_SRAM_RXF_LEN 0x1524
414#define REG_SRAM_TXF_ADDR 0x1528
415#define REG_SRAM_TXF_LEN 0x152C
416#define REG_SRAM_TCPH_ADDR 0x1530
417#define REG_SRAM_PKTH_ADDR 0x1532
418
419/*
420 * Load Ptr Register
421 * Software sets this bit after the initialization of the head and tail */
422#define REG_LOAD_PTR 0x1534
423
424/*
425 * addresses of all descriptors, as well as the following descriptor
426 * control register, which triggers each function block to load the head
427 * pointer to prepare for the operation. This bit is then self-cleared
428 * after one cycle.
429 */
430#define REG_RX_BASE_ADDR_HI 0x1540
431#define REG_TX_BASE_ADDR_HI 0x1544
432#define REG_SMB_BASE_ADDR_HI 0x1548
433#define REG_SMB_BASE_ADDR_LO 0x154C
434#define REG_RFD0_HEAD_ADDR_LO 0x1550
435#define REG_RFD1_HEAD_ADDR_LO 0x1554
436#define REG_RFD2_HEAD_ADDR_LO 0x1558
437#define REG_RFD3_HEAD_ADDR_LO 0x155C
438#define REG_RFD_RING_SIZE 0x1560
439#define RFD_RING_SIZE_MASK 0x0FFF
440#define REG_RX_BUF_SIZE 0x1564
441#define RX_BUF_SIZE_MASK 0xFFFF
442#define REG_RRD0_HEAD_ADDR_LO 0x1568
443#define REG_RRD1_HEAD_ADDR_LO 0x156C
444#define REG_RRD2_HEAD_ADDR_LO 0x1570
445#define REG_RRD3_HEAD_ADDR_LO 0x1574
446#define REG_RRD_RING_SIZE 0x1578
447#define RRD_RING_SIZE_MASK 0x0FFF
448#define REG_HTPD_HEAD_ADDR_LO 0x157C
449#define REG_NTPD_HEAD_ADDR_LO 0x1580
450#define REG_TPD_RING_SIZE 0x1584
451#define TPD_RING_SIZE_MASK 0xFFFF
452#define REG_CMB_BASE_ADDR_LO 0x1588
453
454/* RSS about */
455#define REG_RSS_KEY0 0x14B0
456#define REG_RSS_KEY1 0x14B4
457#define REG_RSS_KEY2 0x14B8
458#define REG_RSS_KEY3 0x14BC
459#define REG_RSS_KEY4 0x14C0
460#define REG_RSS_KEY5 0x14C4
461#define REG_RSS_KEY6 0x14C8
462#define REG_RSS_KEY7 0x14CC
463#define REG_RSS_KEY8 0x14D0
464#define REG_RSS_KEY9 0x14D4
465#define REG_IDT_TABLE0 0x14E0
466#define REG_IDT_TABLE1 0x14E4
467#define REG_IDT_TABLE2 0x14E8
468#define REG_IDT_TABLE3 0x14EC
469#define REG_IDT_TABLE4 0x14F0
470#define REG_IDT_TABLE5 0x14F4
471#define REG_IDT_TABLE6 0x14F8
472#define REG_IDT_TABLE7 0x14FC
473#define REG_IDT_TABLE REG_IDT_TABLE0
474#define REG_RSS_HASH_VALUE 0x15B0
475#define REG_RSS_HASH_FLAG 0x15B4
476#define REG_BASE_CPU_NUMBER 0x15B8
477
478/* TXQ Control Register */
479#define REG_TXQ_CTRL 0x1590
480#define TXQ_NUM_TPD_BURST_MASK 0xF
481#define TXQ_NUM_TPD_BURST_SHIFT 0
482#define TXQ_CTRL_IP_OPTION_EN 0x10
483#define TXQ_CTRL_EN 0x20
484#define TXQ_CTRL_ENH_MODE 0x40
485#define TXQ_CTRL_LS_8023_EN 0x80
486#define TXQ_TXF_BURST_NUM_SHIFT 16
487#define TXQ_TXF_BURST_NUM_MASK 0xFFFF
488
489/* Jumbo packet Threshold for task offload */
490#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */
491#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF
492
493#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */
494#define TXF_WATER_MARK_MASK 0x0FFF
495#define TXF_LOW_WATER_MARK_SHIFT 0
496#define TXF_HIGH_WATER_MARK_SHIFT 16
497#define TXQ_CTRL_BURST_MODE_EN 0x80000000
498
499#define REG_THRUPUT_MON_CTRL 0x159C
500#define THRUPUT_MON_RATE_MASK 0x3
501#define THRUPUT_MON_RATE_SHIFT 0
502#define THRUPUT_MON_EN 0x80
503
504/* RXQ Control Register */
505#define REG_RXQ_CTRL 0x15A0
506#define ASPM_THRUPUT_LIMIT_MASK 0x3
507#define ASPM_THRUPUT_LIMIT_SHIFT 0
508#define ASPM_THRUPUT_LIMIT_NO 0x00
509#define ASPM_THRUPUT_LIMIT_1M 0x01
510#define ASPM_THRUPUT_LIMIT_10M 0x02
511#define ASPM_THRUPUT_LIMIT_100M 0x04
512#define RXQ1_CTRL_EN 0x10
513#define RXQ2_CTRL_EN 0x20
514#define RXQ3_CTRL_EN 0x40
515#define IPV6_CHKSUM_CTRL_EN 0x80
516#define RSS_HASH_BITS_MASK 0x00FF
517#define RSS_HASH_BITS_SHIFT 8
518#define RSS_HASH_IPV4 0x10000
519#define RSS_HASH_IPV4_TCP 0x20000
520#define RSS_HASH_IPV6 0x40000
521#define RSS_HASH_IPV6_TCP 0x80000
522#define RXQ_RFD_BURST_NUM_MASK 0x003F
523#define RXQ_RFD_BURST_NUM_SHIFT 20
524#define RSS_MODE_MASK 0x0003
525#define RSS_MODE_SHIFT 26
526#define RSS_NIP_QUEUE_SEL_MASK 0x1
527#define RSS_NIP_QUEUE_SEL_SHIFT 28
528#define RRS_HASH_CTRL_EN 0x20000000
529#define RX_CUT_THRU_EN 0x40000000
530#define RXQ_CTRL_EN 0x80000000
531
532#define REG_RFD_FREE_THRESH 0x15A4
533#define RFD_FREE_THRESH_MASK 0x003F
534#define RFD_FREE_HI_THRESH_SHIFT 0
535#define RFD_FREE_LO_THRESH_SHIFT 6
536
537/* RXF flow control register */
538#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
539#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0
540#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF
541#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16
542#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF
543
544#define REG_RXD_DMA_CTRL 0x15AC
545#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */
546#define RXD_DMA_THRESH_SHIFT 0
547#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF
548#define RXD_DMA_DOWN_TIMER_SHIFT 16
549
550/* DMA Engine Control Register */
551#define REG_DMA_CTRL 0x15C0
552#define DMA_CTRL_DMAR_IN_ORDER 0x1
553#define DMA_CTRL_DMAR_ENH_ORDER 0x2
554#define DMA_CTRL_DMAR_OUT_ORDER 0x4
555#define DMA_CTRL_RCB_VALUE 0x8
556#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007
557#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
558#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007
559#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
560#define DMA_CTRL_DMAR_REQ_PRI 0x400
561#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F
562#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11
563#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F
564#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16
565#define DMA_CTRL_CMB_EN 0x100000
566#define DMA_CTRL_SMB_EN 0x200000
567#define DMA_CTRL_CMB_NOW 0x400000
568#define MAC_CTRL_SMB_DIS 0x1000000
569#define DMA_CTRL_SMB_NOW 0x80000000
570
571/* CMB/SMB Control Register */
572#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */
573#define SMB_STAT_TIMER_MASK 0xFFFFFF
574#define REG_CMB_TPD_THRESH 0x15C8
575#define CMB_TPD_THRESH_MASK 0xFFFF
576#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */
577#define CMB_TX_TIMER_MASK 0xFFFF
578
579/* Mail box */
580#define MB_RFDX_PROD_IDX_MASK 0xFFFF
581#define REG_MB_RFD0_PROD_IDX 0x15E0
582#define REG_MB_RFD1_PROD_IDX 0x15E4
583#define REG_MB_RFD2_PROD_IDX 0x15E8
584#define REG_MB_RFD3_PROD_IDX 0x15EC
585
586#define MB_PRIO_PROD_IDX_MASK 0xFFFF
587#define REG_MB_PRIO_PROD_IDX 0x15F0
588#define MB_HTPD_PROD_IDX_SHIFT 0
589#define MB_NTPD_PROD_IDX_SHIFT 16
590
591#define MB_PRIO_CONS_IDX_MASK 0xFFFF
592#define REG_MB_PRIO_CONS_IDX 0x15F4
593#define MB_HTPD_CONS_IDX_SHIFT 0
594#define MB_NTPD_CONS_IDX_SHIFT 16
595
596#define REG_MB_RFD01_CONS_IDX 0x15F8
597#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
598#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
599#define REG_MB_RFD23_CONS_IDX 0x15FC
600#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
601#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
602
603/* Interrupt Status Register */
604#define REG_ISR 0x1600
605#define ISR_SMB 0x00000001
606#define ISR_TIMER 0x00000002
607/*
608 * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set
609 * in Table 51 Selene Master Control Register (Offset 0x1400).
610 */
611#define ISR_MANUAL 0x00000004
612#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */
613#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */
614#define ISR_RFD1_UR 0x00000020
615#define ISR_RFD2_UR 0x00000040
616#define ISR_RFD3_UR 0x00000080
617#define ISR_TXF_UR 0x00000100
618#define ISR_DMAR_TO_RST 0x00000200
619#define ISR_DMAW_TO_RST 0x00000400
620#define ISR_TX_CREDIT 0x00000800
621#define ISR_GPHY 0x00001000
622/* GPHY low power state interrupt */
623#define ISR_GPHY_LPW 0x00002000
624#define ISR_TXQ_TO_RST 0x00004000
625#define ISR_TX_PKT 0x00008000
626#define ISR_RX_PKT_0 0x00010000
627#define ISR_RX_PKT_1 0x00020000
628#define ISR_RX_PKT_2 0x00040000
629#define ISR_RX_PKT_3 0x00080000
630#define ISR_MAC_RX 0x00100000
631#define ISR_MAC_TX 0x00200000
632#define ISR_UR_DETECTED 0x00400000
633#define ISR_FERR_DETECTED 0x00800000
634#define ISR_NFERR_DETECTED 0x01000000
635#define ISR_CERR_DETECTED 0x02000000
636#define ISR_PHY_LINKDOWN 0x04000000
637#define ISR_DIS_INT 0x80000000
638
639/* Interrupt Mask Register */
640#define REG_IMR 0x1604
641
642#define IMR_NORMAL_MASK (\
643 ISR_MANUAL |\
644 ISR_HW_RXF_OV |\
645 ISR_RFD0_UR |\
646 ISR_TXF_UR |\
647 ISR_DMAR_TO_RST |\
648 ISR_TXQ_TO_RST |\
649 ISR_DMAW_TO_RST |\
650 ISR_GPHY |\
651 ISR_TX_PKT |\
652 ISR_RX_PKT_0 |\
653 ISR_GPHY_LPW |\
654 ISR_PHY_LINKDOWN)
655
656#define ISR_RX_PKT (\
657 ISR_RX_PKT_0 |\
658 ISR_RX_PKT_1 |\
659 ISR_RX_PKT_2 |\
660 ISR_RX_PKT_3)
661
662#define ISR_OVER (\
663 ISR_RFD0_UR |\
664 ISR_RFD1_UR |\
665 ISR_RFD2_UR |\
666 ISR_RFD3_UR |\
667 ISR_HW_RXF_OV |\
668 ISR_TXF_UR)
669
670#define ISR_ERROR (\
671 ISR_DMAR_TO_RST |\
672 ISR_TXQ_TO_RST |\
673 ISR_DMAW_TO_RST |\
674 ISR_PHY_LINKDOWN)
675
676#define REG_INT_RETRIG_TIMER 0x1608
677#define INT_RETRIG_TIMER_MASK 0xFFFF
678
679#define REG_HDS_CTRL 0x160C
680#define HDS_CTRL_EN 0x0001
681#define HDS_CTRL_BACKFILLSIZE_SHIFT 8
682#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF
683#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20
684#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF
685
686#define REG_MAC_RX_STATUS_BIN 0x1700
687#define REG_MAC_RX_STATUS_END 0x175c
688#define REG_MAC_TX_STATUS_BIN 0x1760
689#define REG_MAC_TX_STATUS_END 0x17c0
690
691/* DEBUG ADDR */
692#define REG_DEBUG_DATA0 0x1900
693#define REG_DEBUG_DATA1 0x1904
694
695/* PHY Control Register */
696#define MII_BMCR 0x00
697#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
698#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
699#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
700#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
701#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
702#define BMCR_POWER_DOWN 0x0800 /* Power down */
703#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
704#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
705#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
706#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
707#define BMCR_SPEED_MASK 0x2040
708#define BMCR_SPEED_1000 0x0040
709#define BMCR_SPEED_100 0x2000
710#define BMCR_SPEED_10 0x0000
711
712/* PHY Status Register */
713#define MII_BMSR 0x01
714#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
715#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
716#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
717#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
718#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
719#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
720#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
721#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
722#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
723#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
724#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
725#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
726#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
727#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
728#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
729
730#define MII_PHYSID1 0x02
731#define MII_PHYSID2 0x03
732
733/* Autoneg Advertisement Register */
734#define MII_ADVERTISE 0x04
735#define ADVERTISE_SPEED_MASK 0x01E0
736#define ADVERTISE_DEFAULT_CAP 0x0DE0
737
738/* 1000BASE-T Control Register */
739#define MII_GIGA_CR 0x09
740#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
741
742#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
743#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
744#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
745#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
746#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
747#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
748#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
749#define GIGA_CR_1000T_SPEED_MASK 0x0300
750#define GIGA_CR_1000T_DEFAULT_CAP 0x0300
751
752/* PHY Specific Status Register */
753#define MII_GIGA_PSSR 0x11
754#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
755#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
756#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
757#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */
758#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */
759#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
760
761/* PHY Interrupt Enable Register */
762#define MII_IER 0x12
763#define IER_LINK_UP 0x0400
764#define IER_LINK_DOWN 0x0800
765
766/* PHY Interrupt Status Register */
767#define MII_ISR 0x13
768#define ISR_LINK_UP 0x0400
769#define ISR_LINK_DOWN 0x0800
770
771/* Cable-Detect-Test Control Register */
772#define MII_CDTC 0x16
773#define CDTC_EN_OFF 0 /* sc */
774#define CDTC_EN_BITS 1
775#define CDTC_PAIR_OFF 8
776#define CDTC_PAIR_BIT 2
777
778/* Cable-Detect-Test Status Register */
779#define MII_CDTS 0x1C
780#define CDTS_STATUS_OFF 8
781#define CDTS_STATUS_BITS 2
782#define CDTS_STATUS_NORMAL 0
783#define CDTS_STATUS_SHORT 1
784#define CDTS_STATUS_OPEN 2
785#define CDTS_STATUS_INVALID 3
786
787#define MII_DBG_ADDR 0x1D
788#define MII_DBG_DATA 0x1E
789
790#define MII_ANA_CTRL_0 0x0
791#define ANA_RESTART_CAL 0x0001
792#define ANA_MANUL_SWICH_ON_SHIFT 0x1
793#define ANA_MANUL_SWICH_ON_MASK 0xF
794#define ANA_MAN_ENABLE 0x0020
795#define ANA_SEL_HSP 0x0040
796#define ANA_EN_HB 0x0080
797#define ANA_EN_HBIAS 0x0100
798#define ANA_OEN_125M 0x0200
799#define ANA_EN_LCKDT 0x0400
800#define ANA_LCKDT_PHY 0x0800
801#define ANA_AFE_MODE 0x1000
802#define ANA_VCO_SLOW 0x2000
803#define ANA_VCO_FAST 0x4000
804#define ANA_SEL_CLK125M_DSP 0x8000
805
806#define MII_ANA_CTRL_4 0x4
807#define ANA_IECHO_ADJ_MASK 0xF
808#define ANA_IECHO_ADJ_3_SHIFT 0
809#define ANA_IECHO_ADJ_2_SHIFT 4
810#define ANA_IECHO_ADJ_1_SHIFT 8
811#define ANA_IECHO_ADJ_0_SHIFT 12
812
813#define MII_ANA_CTRL_5 0x5
814#define ANA_SERDES_CDR_BW_SHIFT 0
815#define ANA_SERDES_CDR_BW_MASK 0x3
816#define ANA_MS_PAD_DBG 0x0004
817#define ANA_SPEEDUP_DBG 0x0008
818#define ANA_SERDES_TH_LOS_SHIFT 4
819#define ANA_SERDES_TH_LOS_MASK 0x3
820#define ANA_SERDES_EN_DEEM 0x0040
821#define ANA_SERDES_TXELECIDLE 0x0080
822#define ANA_SERDES_BEACON 0x0100
823#define ANA_SERDES_HALFTXDR 0x0200
824#define ANA_SERDES_SEL_HSP 0x0400
825#define ANA_SERDES_EN_PLL 0x0800
826#define ANA_SERDES_EN 0x1000
827#define ANA_SERDES_EN_LCKDT 0x2000
828
829#define MII_ANA_CTRL_11 0xB
830#define ANA_PS_HIB_EN 0x8000
831
832#define MII_ANA_CTRL_18 0x12
833#define ANA_TEST_MODE_10BT_01SHIFT 0
834#define ANA_TEST_MODE_10BT_01MASK 0x3
835#define ANA_LOOP_SEL_10BT 0x0004
836#define ANA_RGMII_MODE_SW 0x0008
837#define ANA_EN_LONGECABLE 0x0010
838#define ANA_TEST_MODE_10BT_2 0x0020
839#define ANA_EN_10BT_IDLE 0x0400
840#define ANA_EN_MASK_TB 0x0800
841#define ANA_TRIGGER_SEL_TIMER_SHIFT 12
842#define ANA_TRIGGER_SEL_TIMER_MASK 0x3
843#define ANA_INTERVAL_SEL_TIMER_SHIFT 14
844#define ANA_INTERVAL_SEL_TIMER_MASK 0x3
845
846#define MII_ANA_CTRL_41 0x29
847#define ANA_TOP_PS_EN 0x8000
848
849#define MII_ANA_CTRL_54 0x36
850#define ANA_LONG_CABLE_TH_100_SHIFT 0
851#define ANA_LONG_CABLE_TH_100_MASK 0x3F
852#define ANA_DESERVED 0x0040
853#define ANA_EN_LIT_CH 0x0080
854#define ANA_SHORT_CABLE_TH_100_SHIFT 8
855#define ANA_SHORT_CABLE_TH_100_MASK 0x3F
856#define ANA_BP_BAD_LINK_ACCUM 0x4000
857#define ANA_BP_SMALL_BW 0x8000
858
859#endif /*_ATL1C_HW_H_*/
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
new file mode 100644
index 000000000000..deb7b53167ee
--- /dev/null
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -0,0 +1,2797 @@
1/*
2 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include "atl1c.h"
23
24#define ATL1C_DRV_VERSION "1.0.0.1-NAPI"
25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
28#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
29/*
30 * atl1c_pci_tbl - PCI Device ID Table
31 *
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
34 *
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
37 */
38static struct pci_device_id atl1c_pci_tbl[] = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
41 /* required last entry */
42 { 0 }
43};
44MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
45
46MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>");
47MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
48MODULE_LICENSE("GPL");
49MODULE_VERSION(ATL1C_DRV_VERSION);
50
51static int atl1c_stop_mac(struct atl1c_hw *hw);
52static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
53static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
54static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
55static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
56static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
57static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
58 int *work_done, int work_to_do);
59
60static const u16 atl1c_pay_load_size[] = {
61 128, 256, 512, 1024, 2048, 4096,
62};
63
64static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
65{
66 REG_MB_RFD0_PROD_IDX,
67 REG_MB_RFD1_PROD_IDX,
68 REG_MB_RFD2_PROD_IDX,
69 REG_MB_RFD3_PROD_IDX
70};
71
72static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
73{
74 REG_RFD0_HEAD_ADDR_LO,
75 REG_RFD1_HEAD_ADDR_LO,
76 REG_RFD2_HEAD_ADDR_LO,
77 REG_RFD3_HEAD_ADDR_LO
78};
79
80static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
81{
82 REG_RRD0_HEAD_ADDR_LO,
83 REG_RRD1_HEAD_ADDR_LO,
84 REG_RRD2_HEAD_ADDR_LO,
85 REG_RRD3_HEAD_ADDR_LO
86};
87
88static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
89 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
90
91/*
92 * atl1c_init_pcie - init PCIE module
93 */
94static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
95{
96 u32 data;
97 u32 pci_cmd;
98 struct pci_dev *pdev = hw->adapter->pdev;
99
100 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
101 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
102 pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
103 PCI_COMMAND_IO);
104 AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
105
106 /*
107 * Clear any PowerSaveing Settings
108 */
109 pci_enable_wake(pdev, PCI_D3hot, 0);
110 pci_enable_wake(pdev, PCI_D3cold, 0);
111
112 /*
113 * Mask some pcie error bits
114 */
115 AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data);
116 data &= ~PCIE_UC_SERVRITY_DLP;
117 data &= ~PCIE_UC_SERVRITY_FCP;
118 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
119
120 if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
121 atl1c_disable_l0s_l1(hw);
122 if (flag & ATL1C_PCIE_PHY_RESET)
123 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
124 else
125 AT_WRITE_REG(hw, REG_GPHY_CTRL,
126 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
127
128 msleep(1);
129}
130
131/*
132 * atl1c_irq_enable - Enable default interrupt generation settings
133 * @adapter: board private structure
134 */
135static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
136{
137 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
138 AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
139 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
140 AT_WRITE_FLUSH(&adapter->hw);
141 }
142}
143
144/*
145 * atl1c_irq_disable - Mask off interrupt generation on the NIC
146 * @adapter: board private structure
147 */
148static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
149{
150 atomic_inc(&adapter->irq_sem);
151 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
152 AT_WRITE_FLUSH(&adapter->hw);
153 synchronize_irq(adapter->pdev->irq);
154}
155
156/*
157 * atl1c_irq_reset - reset interrupt confiure on the NIC
158 * @adapter: board private structure
159 */
160static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
161{
162 atomic_set(&adapter->irq_sem, 1);
163 atl1c_irq_enable(adapter);
164}
165
166/*
167 * atl1c_phy_config - Timer Call-back
168 * @data: pointer to netdev cast into an unsigned long
169 */
170static void atl1c_phy_config(unsigned long data)
171{
172 struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
173 struct atl1c_hw *hw = &adapter->hw;
174 unsigned long flags;
175
176 spin_lock_irqsave(&adapter->mdio_lock, flags);
177 atl1c_restart_autoneg(hw);
178 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
179}
180
181void atl1c_reinit_locked(struct atl1c_adapter *adapter)
182{
183
184 WARN_ON(in_interrupt());
185 atl1c_down(adapter);
186 atl1c_up(adapter);
187 clear_bit(__AT_RESETTING, &adapter->flags);
188}
189
190static void atl1c_reset_task(struct work_struct *work)
191{
192 struct atl1c_adapter *adapter;
193 struct net_device *netdev;
194
195 adapter = container_of(work, struct atl1c_adapter, reset_task);
196 netdev = adapter->netdev;
197
198 netif_device_detach(netdev);
199 atl1c_down(adapter);
200 atl1c_up(adapter);
201 netif_device_attach(netdev);
202}
203
204static void atl1c_check_link_status(struct atl1c_adapter *adapter)
205{
206 struct atl1c_hw *hw = &adapter->hw;
207 struct net_device *netdev = adapter->netdev;
208 struct pci_dev *pdev = adapter->pdev;
209 int err;
210 unsigned long flags;
211 u16 speed, duplex, phy_data;
212
213 spin_lock_irqsave(&adapter->mdio_lock, flags);
214 /* MII_BMSR must read twise */
215 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
216 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
217 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
218
219 if ((phy_data & BMSR_LSTATUS) == 0) {
220 /* link down */
221 if (netif_carrier_ok(netdev)) {
222 hw->hibernate = true;
223 atl1c_set_aspm(hw, false);
224 if (atl1c_stop_mac(hw) != 0)
225 if (netif_msg_hw(adapter))
226 dev_warn(&pdev->dev,
227 "stop mac failed\n");
228 }
229 netif_carrier_off(netdev);
230 } else {
231 /* Link Up */
232 hw->hibernate = false;
233 spin_lock_irqsave(&adapter->mdio_lock, flags);
234 err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
235 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
236 if (unlikely(err))
237 return;
238 /* link result is our setting */
239 if (adapter->link_speed != speed ||
240 adapter->link_duplex != duplex) {
241 adapter->link_speed = speed;
242 adapter->link_duplex = duplex;
243 atl1c_enable_tx_ctrl(hw);
244 atl1c_enable_rx_ctrl(hw);
245 atl1c_setup_mac_ctrl(adapter);
246 atl1c_set_aspm(hw, true);
247 if (netif_msg_link(adapter))
248 dev_info(&pdev->dev,
249 "%s: %s NIC Link is Up<%d Mbps %s>\n",
250 atl1c_driver_name, netdev->name,
251 adapter->link_speed,
252 adapter->link_duplex == FULL_DUPLEX ?
253 "Full Duplex" : "Half Duplex");
254 }
255 if (!netif_carrier_ok(netdev))
256 netif_carrier_on(netdev);
257 }
258}
259
260/*
261 * atl1c_link_chg_task - deal with link change event Out of interrupt context
262 * @netdev: network interface device structure
263 */
264static void atl1c_link_chg_task(struct work_struct *work)
265{
266 struct atl1c_adapter *adapter;
267
268 adapter = container_of(work, struct atl1c_adapter, link_chg_task);
269 atl1c_check_link_status(adapter);
270}
271
272static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
273{
274 struct net_device *netdev = adapter->netdev;
275 struct pci_dev *pdev = adapter->pdev;
276 u16 phy_data;
277 u16 link_up;
278
279 spin_lock(&adapter->mdio_lock);
280 atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
281 atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
282 spin_unlock(&adapter->mdio_lock);
283 link_up = phy_data & BMSR_LSTATUS;
284 /* notify upper layer link down ASAP */
285 if (!link_up) {
286 if (netif_carrier_ok(netdev)) {
287 /* old link state: Up */
288 netif_carrier_off(netdev);
289 if (netif_msg_link(adapter))
290 dev_info(&pdev->dev,
291 "%s: %s NIC Link is Down\n",
292 atl1c_driver_name, netdev->name);
293 adapter->link_speed = SPEED_0;
294 }
295 }
296 schedule_work(&adapter->link_chg_task);
297}
298
299static void atl1c_del_timer(struct atl1c_adapter *adapter)
300{
301 del_timer_sync(&adapter->phy_config_timer);
302}
303
304static void atl1c_cancel_work(struct atl1c_adapter *adapter)
305{
306 cancel_work_sync(&adapter->reset_task);
307 cancel_work_sync(&adapter->link_chg_task);
308}
309
310/*
311 * atl1c_tx_timeout - Respond to a Tx Hang
312 * @netdev: network interface device structure
313 */
314static void atl1c_tx_timeout(struct net_device *netdev)
315{
316 struct atl1c_adapter *adapter = netdev_priv(netdev);
317
318 /* Do the reset outside of interrupt context */
319 schedule_work(&adapter->reset_task);
320}
321
322/*
323 * atl1c_set_multi - Multicast and Promiscuous mode set
324 * @netdev: network interface device structure
325 *
326 * The set_multi entry point is called whenever the multicast address
327 * list or the network interface flags are updated. This routine is
328 * responsible for configuring the hardware for proper multicast,
329 * promiscuous mode, and all-multi behavior.
330 */
331static void atl1c_set_multi(struct net_device *netdev)
332{
333 struct atl1c_adapter *adapter = netdev_priv(netdev);
334 struct atl1c_hw *hw = &adapter->hw;
335 struct dev_mc_list *mc_ptr;
336 u32 mac_ctrl_data;
337 u32 hash_value;
338
339 /* Check for Promiscuous and All Multicast modes */
340 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
341
342 if (netdev->flags & IFF_PROMISC) {
343 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
344 } else if (netdev->flags & IFF_ALLMULTI) {
345 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
346 mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
347 } else {
348 mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
349 }
350
351 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
352
353 /* clear the old settings from the multicast hash table */
354 AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
355 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
356
357 /* comoute mc addresses' hash value ,and put it into hash table */
358 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
359 hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
360 atl1c_hash_set(hw, hash_value);
361 }
362}
363
364static void atl1c_vlan_rx_register(struct net_device *netdev,
365 struct vlan_group *grp)
366{
367 struct atl1c_adapter *adapter = netdev_priv(netdev);
368 struct pci_dev *pdev = adapter->pdev;
369 u32 mac_ctrl_data = 0;
370
371 if (netif_msg_pktdata(adapter))
372 dev_dbg(&pdev->dev, "atl1c_vlan_rx_register\n");
373
374 atl1c_irq_disable(adapter);
375
376 adapter->vlgrp = grp;
377 AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
378
379 if (grp) {
380 /* enable VLAN tag insert/strip */
381 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
382 } else {
383 /* disable VLAN tag insert/strip */
384 mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
385 }
386
387 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
388 atl1c_irq_enable(adapter);
389}
390
391static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
392{
393 struct pci_dev *pdev = adapter->pdev;
394
395 if (netif_msg_pktdata(adapter))
396 dev_dbg(&pdev->dev, "atl1c_restore_vlan !");
397 atl1c_vlan_rx_register(adapter->netdev, adapter->vlgrp);
398}
399/*
400 * atl1c_set_mac - Change the Ethernet Address of the NIC
401 * @netdev: network interface device structure
402 * @p: pointer to an address structure
403 *
404 * Returns 0 on success, negative on failure
405 */
406static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
407{
408 struct atl1c_adapter *adapter = netdev_priv(netdev);
409 struct sockaddr *addr = p;
410
411 if (!is_valid_ether_addr(addr->sa_data))
412 return -EADDRNOTAVAIL;
413
414 if (netif_running(netdev))
415 return -EBUSY;
416
417 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
418 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
419
420 atl1c_hw_set_mac_addr(&adapter->hw);
421
422 return 0;
423}
424
425static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
426 struct net_device *dev)
427{
428 int mtu = dev->mtu;
429
430 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
431 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
432}
433/*
434 * atl1c_change_mtu - Change the Maximum Transfer Unit
435 * @netdev: network interface device structure
436 * @new_mtu: new value for maximum frame size
437 *
438 * Returns 0 on success, negative on failure
439 */
440static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
441{
442 struct atl1c_adapter *adapter = netdev_priv(netdev);
443 int old_mtu = netdev->mtu;
444 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
445
446 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
447 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
448 if (netif_msg_link(adapter))
449 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
450 return -EINVAL;
451 }
452 /* set MTU */
453 if (old_mtu != new_mtu && netif_running(netdev)) {
454 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
455 msleep(1);
456 netdev->mtu = new_mtu;
457 adapter->hw.max_frame_size = new_mtu;
458 atl1c_set_rxbufsize(adapter, netdev);
459 atl1c_down(adapter);
460 atl1c_up(adapter);
461 clear_bit(__AT_RESETTING, &adapter->flags);
462 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
463 u32 phy_data;
464
465 AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
466 phy_data |= 0x10000000;
467 AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
468 }
469
470 }
471 return 0;
472}
473
474/*
475 * caller should hold mdio_lock
476 */
477static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
478{
479 struct atl1c_adapter *adapter = netdev_priv(netdev);
480 u16 result;
481
482 atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
483 return result;
484}
485
486static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
487 int reg_num, int val)
488{
489 struct atl1c_adapter *adapter = netdev_priv(netdev);
490
491 atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
492}
493
494/*
495 * atl1c_mii_ioctl -
496 * @netdev:
497 * @ifreq:
498 * @cmd:
499 */
500static int atl1c_mii_ioctl(struct net_device *netdev,
501 struct ifreq *ifr, int cmd)
502{
503 struct atl1c_adapter *adapter = netdev_priv(netdev);
504 struct pci_dev *pdev = adapter->pdev;
505 struct mii_ioctl_data *data = if_mii(ifr);
506 unsigned long flags;
507 int retval = 0;
508
509 if (!netif_running(netdev))
510 return -EINVAL;
511
512 spin_lock_irqsave(&adapter->mdio_lock, flags);
513 switch (cmd) {
514 case SIOCGMIIPHY:
515 data->phy_id = 0;
516 break;
517
518 case SIOCGMIIREG:
519 if (!capable(CAP_NET_ADMIN)) {
520 retval = -EPERM;
521 goto out;
522 }
523 if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
524 &data->val_out)) {
525 retval = -EIO;
526 goto out;
527 }
528 break;
529
530 case SIOCSMIIREG:
531 if (!capable(CAP_NET_ADMIN)) {
532 retval = -EPERM;
533 goto out;
534 }
535 if (data->reg_num & ~(0x1F)) {
536 retval = -EFAULT;
537 goto out;
538 }
539
540 dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
541 data->reg_num, data->val_in);
542 if (atl1c_write_phy_reg(&adapter->hw,
543 data->reg_num, data->val_in)) {
544 retval = -EIO;
545 goto out;
546 }
547 break;
548
549 default:
550 retval = -EOPNOTSUPP;
551 break;
552 }
553out:
554 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
555 return retval;
556}
557
558/*
559 * atl1c_ioctl -
560 * @netdev:
561 * @ifreq:
562 * @cmd:
563 */
564static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
565{
566 switch (cmd) {
567 case SIOCGMIIPHY:
568 case SIOCGMIIREG:
569 case SIOCSMIIREG:
570 return atl1c_mii_ioctl(netdev, ifr, cmd);
571 default:
572 return -EOPNOTSUPP;
573 }
574}
575
576/*
577 * atl1c_alloc_queues - Allocate memory for all rings
578 * @adapter: board private structure to initialize
579 *
580 */
581static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter)
582{
583 return 0;
584}
585
586static void atl1c_set_mac_type(struct atl1c_hw *hw)
587{
588 switch (hw->device_id) {
589 case PCI_DEVICE_ID_ATTANSIC_L2C:
590 hw->nic_type = athr_l2c;
591 break;
592
593 case PCI_DEVICE_ID_ATTANSIC_L1C:
594 hw->nic_type = athr_l1c;
595 break;
596
597 default:
598 break;
599 }
600}
601
602static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
603{
604 u32 phy_status_data;
605 u32 link_ctrl_data;
606
607 atl1c_set_mac_type(hw);
608 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
609 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
610
611 hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ |
612 ATL1C_INTR_MODRT_ENABLE |
613 ATL1C_RX_IPV6_CHKSUM |
614 ATL1C_TXQ_MODE_ENHANCE;
615 if (link_ctrl_data & LINK_CTRL_L0S_EN)
616 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
617 if (link_ctrl_data & LINK_CTRL_L1_EN)
618 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
619
620 if (hw->nic_type == athr_l1c) {
621 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
622 hw->ctrl_flags |= ATL1C_LINK_CAP_1000M;
623 }
624 return 0;
625}
626/*
627 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
628 * @adapter: board private structure to initialize
629 *
630 * atl1c_sw_init initializes the Adapter private data structure.
631 * Fields are initialized based on PCI device information and
632 * OS network device settings (MTU size).
633 */
634static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
635{
636 struct atl1c_hw *hw = &adapter->hw;
637 struct pci_dev *pdev = adapter->pdev;
638
639 adapter->wol = 0;
640 adapter->link_speed = SPEED_0;
641 adapter->link_duplex = FULL_DUPLEX;
642 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
643 adapter->tpd_ring[0].count = 1024;
644 adapter->rfd_ring[0].count = 512;
645
646 hw->vendor_id = pdev->vendor;
647 hw->device_id = pdev->device;
648 hw->subsystem_vendor_id = pdev->subsystem_vendor;
649 hw->subsystem_id = pdev->subsystem_device;
650
651 /* before link up, we assume hibernate is true */
652 hw->hibernate = true;
653 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
654 if (atl1c_setup_mac_funcs(hw) != 0) {
655 dev_err(&pdev->dev, "set mac function pointers failed\n");
656 return -1;
657 }
658 hw->intr_mask = IMR_NORMAL_MASK;
659 hw->phy_configured = false;
660 hw->preamble_len = 7;
661 hw->max_frame_size = adapter->netdev->mtu;
662 if (adapter->num_rx_queues < 2) {
663 hw->rss_type = atl1c_rss_disable;
664 hw->rss_mode = atl1c_rss_mode_disable;
665 } else {
666 hw->rss_type = atl1c_rss_ipv4;
667 hw->rss_mode = atl1c_rss_mul_que_mul_int;
668 hw->rss_hash_bits = 16;
669 }
670 hw->autoneg_advertised = ADVERTISED_Autoneg;
671 hw->indirect_tab = 0xE4E4E4E4;
672 hw->base_cpu = 0;
673
674 hw->ict = 50000; /* 100ms */
675 hw->smb_timer = 200000; /* 400ms */
676 hw->cmb_tpd = 4;
677 hw->cmb_tx_timer = 1; /* 2 us */
678 hw->rx_imt = 200;
679 hw->tx_imt = 1000;
680
681 hw->tpd_burst = 5;
682 hw->rfd_burst = 8;
683 hw->dma_order = atl1c_dma_ord_out;
684 hw->dmar_block = atl1c_dma_req_1024;
685 hw->dmaw_block = atl1c_dma_req_1024;
686 hw->dmar_dly_cnt = 15;
687 hw->dmaw_dly_cnt = 4;
688
689 if (atl1c_alloc_queues(adapter)) {
690 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
691 return -ENOMEM;
692 }
693 /* TODO */
694 atl1c_set_rxbufsize(adapter, adapter->netdev);
695 atomic_set(&adapter->irq_sem, 1);
696 spin_lock_init(&adapter->mdio_lock);
697 spin_lock_init(&adapter->tx_lock);
698 set_bit(__AT_DOWN, &adapter->flags);
699
700 return 0;
701}
702
703/*
704 * atl1c_clean_tx_ring - Free Tx-skb
705 * @adapter: board private structure
706 */
707static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
708 enum atl1c_trans_queue type)
709{
710 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
711 struct atl1c_buffer *buffer_info;
712 struct pci_dev *pdev = adapter->pdev;
713 u16 index, ring_count;
714
715 ring_count = tpd_ring->count;
716 for (index = 0; index < ring_count; index++) {
717 buffer_info = &tpd_ring->buffer_info[index];
718 if (buffer_info->state == ATL1_BUFFER_FREE)
719 continue;
720 if (buffer_info->dma)
721 pci_unmap_single(pdev, buffer_info->dma,
722 buffer_info->length,
723 PCI_DMA_TODEVICE);
724 if (buffer_info->skb)
725 dev_kfree_skb(buffer_info->skb);
726 buffer_info->dma = 0;
727 buffer_info->skb = NULL;
728 buffer_info->state = ATL1_BUFFER_FREE;
729 }
730
731 /* Zero out Tx-buffers */
732 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
733 ring_count);
734 atomic_set(&tpd_ring->next_to_clean, 0);
735 tpd_ring->next_to_use = 0;
736}
737
738/*
739 * atl1c_clean_rx_ring - Free rx-reservation skbs
740 * @adapter: board private structure
741 */
742static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
743{
744 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
745 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
746 struct atl1c_buffer *buffer_info;
747 struct pci_dev *pdev = adapter->pdev;
748 int i, j;
749
750 for (i = 0; i < adapter->num_rx_queues; i++) {
751 for (j = 0; j < rfd_ring[i].count; j++) {
752 buffer_info = &rfd_ring[i].buffer_info[j];
753 if (buffer_info->state == ATL1_BUFFER_FREE)
754 continue;
755 if (buffer_info->dma)
756 pci_unmap_single(pdev, buffer_info->dma,
757 buffer_info->length,
758 PCI_DMA_FROMDEVICE);
759 if (buffer_info->skb)
760 dev_kfree_skb(buffer_info->skb);
761 buffer_info->state = ATL1_BUFFER_FREE;
762 buffer_info->skb = NULL;
763 }
764 /* zero out the descriptor ring */
765 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
766 rfd_ring[i].next_to_clean = 0;
767 rfd_ring[i].next_to_use = 0;
768 rrd_ring[i].next_to_use = 0;
769 rrd_ring[i].next_to_clean = 0;
770 }
771}
772
773/*
774 * Read / Write Ptr Initialize:
775 */
776static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
777{
778 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
779 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
780 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
781 struct atl1c_buffer *buffer_info;
782 int i, j;
783
784 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
785 tpd_ring[i].next_to_use = 0;
786 atomic_set(&tpd_ring[i].next_to_clean, 0);
787 buffer_info = tpd_ring[i].buffer_info;
788 for (j = 0; j < tpd_ring->count; j++)
789 buffer_info[i].state = ATL1_BUFFER_FREE;
790 }
791 for (i = 0; i < adapter->num_rx_queues; i++) {
792 rfd_ring[i].next_to_use = 0;
793 rfd_ring[i].next_to_clean = 0;
794 rrd_ring[i].next_to_use = 0;
795 rrd_ring[i].next_to_clean = 0;
796 for (j = 0; j < rfd_ring[i].count; j++) {
797 buffer_info = &rfd_ring[i].buffer_info[j];
798 buffer_info->state = ATL1_BUFFER_FREE;
799 }
800 }
801}
802
803/*
804 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
805 * @adapter: board private structure
806 *
807 * Free all transmit software resources
808 */
809static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
810{
811 struct pci_dev *pdev = adapter->pdev;
812
813 pci_free_consistent(pdev, adapter->ring_header.size,
814 adapter->ring_header.desc,
815 adapter->ring_header.dma);
816 adapter->ring_header.desc = NULL;
817
818 /* Note: just free tdp_ring.buffer_info,
819 * it contain rfd_ring.buffer_info, do not double free */
820 if (adapter->tpd_ring[0].buffer_info) {
821 kfree(adapter->tpd_ring[0].buffer_info);
822 adapter->tpd_ring[0].buffer_info = NULL;
823 }
824}
825
826/*
827 * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
828 * @adapter: board private structure
829 *
830 * Return 0 on success, negative on failure
831 */
832static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
833{
834 struct pci_dev *pdev = adapter->pdev;
835 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
836 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
837 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
838 struct atl1c_ring_header *ring_header = &adapter->ring_header;
839 int num_rx_queues = adapter->num_rx_queues;
840 int size;
841 int i;
842 int count = 0;
843 int rx_desc_count = 0;
844 u32 offset = 0;
845
846 rrd_ring[0].count = rfd_ring[0].count;
847 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
848 tpd_ring[i].count = tpd_ring[0].count;
849
850 for (i = 1; i < adapter->num_rx_queues; i++)
851 rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
852
853 /* 2 tpd queue, one high priority queue,
854 * another normal priority queue */
855 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
856 rfd_ring->count * num_rx_queues);
857 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
858 if (unlikely(!tpd_ring->buffer_info)) {
859 dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
860 size);
861 goto err_nomem;
862 }
863 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
864 tpd_ring[i].buffer_info =
865 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
866 count += tpd_ring[i].count;
867 }
868
869 for (i = 0; i < num_rx_queues; i++) {
870 rfd_ring[i].buffer_info =
871 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
872 count += rfd_ring[i].count;
873 rx_desc_count += rfd_ring[i].count;
874 }
875 /*
876 * real ring DMA buffer
877 * each ring/block may need up to 8 bytes for alignment, hence the
878 * additional bytes tacked onto the end.
879 */
880 ring_header->size = size =
881 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
882 sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
883 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
884 sizeof(struct atl1c_hw_stats) +
885 8 * 4 + 8 * 2 * num_rx_queues;
886
887 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
888 &ring_header->dma);
889 if (unlikely(!ring_header->desc)) {
890 dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
891 goto err_nomem;
892 }
893 memset(ring_header->desc, 0, ring_header->size);
894 /* init TPD ring */
895
896 tpd_ring[0].dma = roundup(ring_header->dma, 8);
897 offset = tpd_ring[0].dma - ring_header->dma;
898 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
899 tpd_ring[i].dma = ring_header->dma + offset;
900 tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
901 tpd_ring[i].size =
902 sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
903 offset += roundup(tpd_ring[i].size, 8);
904 }
905 /* init RFD ring */
906 for (i = 0; i < num_rx_queues; i++) {
907 rfd_ring[i].dma = ring_header->dma + offset;
908 rfd_ring[i].desc = (u8 *) ring_header->desc + offset;
909 rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
910 rfd_ring[i].count;
911 offset += roundup(rfd_ring[i].size, 8);
912 }
913
914 /* init RRD ring */
915 for (i = 0; i < num_rx_queues; i++) {
916 rrd_ring[i].dma = ring_header->dma + offset;
917 rrd_ring[i].desc = (u8 *) ring_header->desc + offset;
918 rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
919 rrd_ring[i].count;
920 offset += roundup(rrd_ring[i].size, 8);
921 }
922
923 adapter->smb.dma = ring_header->dma + offset;
924 adapter->smb.smb = (u8 *)ring_header->desc + offset;
925 return 0;
926
927err_nomem:
928 kfree(tpd_ring->buffer_info);
929 return -ENOMEM;
930}
931
932static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
933{
934 struct atl1c_hw *hw = &adapter->hw;
935 struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *)
936 adapter->rfd_ring;
937 struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
938 adapter->rrd_ring;
939 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
940 adapter->tpd_ring;
941 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
942 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
943 int i;
944
945 /* TPD */
946 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
947 (u32)((tpd_ring[atl1c_trans_normal].dma &
948 AT_DMA_HI_ADDR_MASK) >> 32));
949 /* just enable normal priority TX queue */
950 AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO,
951 (u32)(tpd_ring[atl1c_trans_normal].dma &
952 AT_DMA_LO_ADDR_MASK));
953 AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO,
954 (u32)(tpd_ring[atl1c_trans_high].dma &
955 AT_DMA_LO_ADDR_MASK));
956 AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
957 (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
958
959
960 /* RFD */
961 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
962 (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
963 for (i = 0; i < adapter->num_rx_queues; i++)
964 AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i],
965 (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
966
967 AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
968 rfd_ring[0].count & RFD_RING_SIZE_MASK);
969 AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
970 adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
971
972 /* RRD */
973 for (i = 0; i < adapter->num_rx_queues; i++)
974 AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i],
975 (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
976 AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
977 (rrd_ring[0].count & RRD_RING_SIZE_MASK));
978
979 /* CMB */
980 AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
981
982 /* SMB */
983 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
984 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
985 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
986 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
987 /* Load all of base address above */
988 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
989}
990
991static void atl1c_configure_tx(struct atl1c_adapter *adapter)
992{
993 struct atl1c_hw *hw = &adapter->hw;
994 u32 dev_ctrl_data;
995 u32 max_pay_load;
996 u16 tx_offload_thresh;
997 u32 txq_ctrl_data;
998 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
999
1000 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
1001 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
1002 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1003 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1004 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
1005 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1006 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1007 hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
1008 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1009 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1010 hw->dmar_block = min(max_pay_load, hw->dmar_block);
1011
1012 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
1013 TXQ_NUM_TPD_BURST_SHIFT;
1014 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
1015 txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
1016 txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] &
1017 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
1018
1019 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1020}
1021
1022static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1023{
1024 struct atl1c_hw *hw = &adapter->hw;
1025 u32 rxq_ctrl_data;
1026
1027 rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
1028 RXQ_RFD_BURST_NUM_SHIFT;
1029
1030 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1031 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1032 if (hw->rss_type == atl1c_rss_ipv4)
1033 rxq_ctrl_data |= RSS_HASH_IPV4;
1034 if (hw->rss_type == atl1c_rss_ipv4_tcp)
1035 rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
1036 if (hw->rss_type == atl1c_rss_ipv6)
1037 rxq_ctrl_data |= RSS_HASH_IPV6;
1038 if (hw->rss_type == atl1c_rss_ipv6_tcp)
1039 rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
1040 if (hw->rss_type != atl1c_rss_disable)
1041 rxq_ctrl_data |= RRS_HASH_CTRL_EN;
1042
1043 rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
1044 RSS_MODE_SHIFT;
1045 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1046 RSS_HASH_BITS_SHIFT;
1047 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1048 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M &
1049 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1050
1051 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1052}
1053
1054static void atl1c_configure_rss(struct atl1c_adapter *adapter)
1055{
1056 struct atl1c_hw *hw = &adapter->hw;
1057
1058 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
1059 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1060}
1061
1062static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1063{
1064 struct atl1c_hw *hw = &adapter->hw;
1065 u32 dma_ctrl_data;
1066
1067 dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI;
1068 if (hw->ctrl_flags & ATL1C_CMB_ENABLE)
1069 dma_ctrl_data |= DMA_CTRL_CMB_EN;
1070 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1071 dma_ctrl_data |= DMA_CTRL_SMB_EN;
1072 else
1073 dma_ctrl_data |= MAC_CTRL_SMB_DIS;
1074
1075 switch (hw->dma_order) {
1076 case atl1c_dma_ord_in:
1077 dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
1078 break;
1079 case atl1c_dma_ord_enh:
1080 dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
1081 break;
1082 case atl1c_dma_ord_out:
1083 dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
1084 break;
1085 default:
1086 break;
1087 }
1088
1089 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1090 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1091 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1092 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1093 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1094 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1095 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1096 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1097
1098 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1099}
1100
1101/*
1102 * Stop the mac, transmit and receive units
1103 * hw - Struct containing variables accessed by shared code
1104 * return : 0 or idle status (if error)
1105 */
1106static int atl1c_stop_mac(struct atl1c_hw *hw)
1107{
1108 u32 data;
1109 int timeout;
1110
1111 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1112 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
1113 RXQ3_CTRL_EN | RXQ_CTRL_EN);
1114 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1115
1116 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1117 data &= ~TXQ_CTRL_EN;
1118 AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
1119
1120 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1121 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
1122 if ((data & (IDLE_STATUS_RXQ_NO_IDLE |
1123 IDLE_STATUS_TXQ_NO_IDLE)) == 0)
1124 break;
1125 msleep(1);
1126 }
1127
1128 AT_READ_REG(hw, REG_MAC_CTRL, &data);
1129 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1130 AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1131
1132 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1133 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
1134 if ((data & IDLE_STATUS_MASK) == 0)
1135 return 0;
1136 msleep(1);
1137 }
1138 return data;
1139}
1140
1141static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
1142{
1143 u32 data;
1144
1145 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1146 switch (hw->adapter->num_rx_queues) {
1147 case 4:
1148 data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1149 break;
1150 case 3:
1151 data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1152 break;
1153 case 2:
1154 data |= RXQ1_CTRL_EN;
1155 break;
1156 default:
1157 break;
1158 }
1159 data |= RXQ_CTRL_EN;
1160 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1161}
1162
1163static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw)
1164{
1165 u32 data;
1166
1167 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1168 data |= TXQ_CTRL_EN;
1169 AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1170}
1171
1172/*
1173 * Reset the transmit and receive units; mask and clear all interrupts.
1174 * hw - Struct containing variables accessed by shared code
1175 * return : 0 or idle status (if error)
1176 */
1177static int atl1c_reset_mac(struct atl1c_hw *hw)
1178{
1179 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1180 struct pci_dev *pdev = adapter->pdev;
1181 u32 idle_status_data = 0;
1182 int timeout = 0;
1183 int ret;
1184
1185 AT_WRITE_REG(hw, REG_IMR, 0);
1186 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
1187
1188 ret = atl1c_stop_mac(hw);
1189 if (ret)
1190 return ret;
1191 /*
1192 * Issue Soft Reset to the MAC. This will reset the chip's
1193 * transmit, receive, DMA. It will not effect
1194 * the current PCI configuration. The global reset bit is self-
1195 * clearing, and should clear within a microsecond.
1196 */
1197 AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
1198 AT_WRITE_FLUSH(hw);
1199 msleep(10);
1200 /* Wait at least 10ms for All module to be Idle */
1201 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1202 AT_READ_REG(hw, REG_IDLE_STATUS, &idle_status_data);
1203 if ((idle_status_data & IDLE_STATUS_MASK) == 0)
1204 break;
1205 msleep(1);
1206 }
1207 if (timeout >= AT_HW_MAX_IDLE_DELAY) {
1208 dev_err(&pdev->dev,
1209 "MAC state machine cann't be idle since"
1210 " disabled for 10ms second\n");
1211 return -1;
1212 }
1213 return 0;
1214}
1215
1216static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1217{
1218 u32 pm_ctrl_data;
1219
1220 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1221 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1222 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1223 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1224 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1225 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1226 pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
1227 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1228
1229 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1230 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1231 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1232 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1233}
1234
1235/*
1236 * Set ASPM state.
1237 * Enable/disable L0s/L1 depend on link state.
1238 */
1239static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1240{
1241 u32 pm_ctrl_data;
1242
1243 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1244
1245 pm_ctrl_data &= PM_CTRL_SERDES_PD_EX_L1;
1246 pm_ctrl_data |= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1247 pm_ctrl_data |= ~PM_CTRL_SERDES_L1_EN;
1248 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1249 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1250
1251 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1252
1253 if (linkup) {
1254 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1255 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1256
1257 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) {
1258 pm_ctrl_data |= AT_ASPM_L1_TIMER <<
1259 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1260 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1261 } else
1262 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1263
1264 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1265 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1266 else
1267 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1268
1269 } else {
1270 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1271 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1272
1273 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1274
1275 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1276 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1277 else
1278 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1279 }
1280
1281 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1282}
1283
1284static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1285{
1286 struct atl1c_hw *hw = &adapter->hw;
1287 struct net_device *netdev = adapter->netdev;
1288 u32 mac_ctrl_data;
1289
1290 mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
1291 mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1292
1293 if (adapter->link_duplex == FULL_DUPLEX) {
1294 hw->mac_duplex = true;
1295 mac_ctrl_data |= MAC_CTRL_DUPLX;
1296 }
1297
1298 if (adapter->link_speed == SPEED_1000)
1299 hw->mac_speed = atl1c_mac_speed_1000;
1300 else
1301 hw->mac_speed = atl1c_mac_speed_10_100;
1302
1303 mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
1304 MAC_CTRL_SPEED_SHIFT;
1305
1306 mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1307 mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1308 MAC_CTRL_PRMLEN_SHIFT);
1309
1310 if (adapter->vlgrp)
1311 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
1312
1313 mac_ctrl_data |= MAC_CTRL_BC_EN;
1314 if (netdev->flags & IFF_PROMISC)
1315 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
1316 if (netdev->flags & IFF_ALLMULTI)
1317 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1318
1319 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1320 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
1321}
1322
1323/*
1324 * atl1c_configure - Configure Transmit&Receive Unit after Reset
1325 * @adapter: board private structure
1326 *
1327 * Configure the Tx /Rx unit of the MAC after a reset.
1328 */
1329static int atl1c_configure(struct atl1c_adapter *adapter)
1330{
1331 struct atl1c_hw *hw = &adapter->hw;
1332 u32 master_ctrl_data = 0;
1333 u32 intr_modrt_data;
1334
1335 /* clear interrupt status */
1336 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1337 /* Clear any WOL status */
1338 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1339 /* set Interrupt Clear Timer
1340 * HW will enable self to assert interrupt event to system after
1341 * waiting x-time for software to notify it accept interrupt.
1342 */
1343 AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1344 hw->ict & INT_RETRIG_TIMER_MASK);
1345
1346 atl1c_configure_des_ring(adapter);
1347
1348 if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
1349 intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
1350 IRQ_MODRT_TX_TIMER_SHIFT;
1351 intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
1352 IRQ_MODRT_RX_TIMER_SHIFT;
1353 AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1354 master_ctrl_data |=
1355 MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
1356 }
1357
1358 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1359 master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1360
1361 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1362
1363 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
1364 AT_WRITE_REG(hw, REG_CMB_TPD_THRESH,
1365 hw->cmb_tpd & CMB_TPD_THRESH_MASK);
1366 AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
1367 hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
1368 }
1369
1370 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1371 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1372 hw->smb_timer & SMB_STAT_TIMER_MASK);
1373 /* set MTU */
1374 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1375 VLAN_HLEN + ETH_FCS_LEN);
1376 /* HDS, disable */
1377 AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
1378
1379 atl1c_configure_tx(adapter);
1380 atl1c_configure_rx(adapter);
1381 atl1c_configure_rss(adapter);
1382 atl1c_configure_dma(adapter);
1383
1384 return 0;
1385}
1386
1387static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1388{
1389 u16 hw_reg_addr = 0;
1390 unsigned long *stats_item = NULL;
1391 u32 data;
1392
1393 /* update rx status */
1394 hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1395 stats_item = &adapter->hw_stats.rx_ok;
1396 while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1397 AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1398 *stats_item += data;
1399 stats_item++;
1400 hw_reg_addr += 4;
1401 }
1402/* update tx status */
1403 hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1404 stats_item = &adapter->hw_stats.tx_ok;
1405 while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1406 AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1407 *stats_item += data;
1408 stats_item++;
1409 hw_reg_addr += 4;
1410 }
1411}
1412
1413/*
1414 * atl1c_get_stats - Get System Network Statistics
1415 * @netdev: network interface device structure
1416 *
1417 * Returns the address of the device statistics structure.
1418 * The statistics are actually updated from the timer callback.
1419 */
1420static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1421{
1422 struct atl1c_adapter *adapter = netdev_priv(netdev);
1423 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
1424 struct net_device_stats *net_stats = &adapter->net_stats;
1425
1426 atl1c_update_hw_stats(adapter);
1427 net_stats->rx_packets = hw_stats->rx_ok;
1428 net_stats->tx_packets = hw_stats->tx_ok;
1429 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1430 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1431 net_stats->multicast = hw_stats->rx_mcast;
1432 net_stats->collisions = hw_stats->tx_1_col +
1433 hw_stats->tx_2_col * 2 +
1434 hw_stats->tx_late_col + hw_stats->tx_abort_col;
1435 net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
1436 hw_stats->rx_len_err + hw_stats->rx_sz_ov +
1437 hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
1438 net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
1439 net_stats->rx_length_errors = hw_stats->rx_len_err;
1440 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1441 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1442 net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
1443
1444 net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
1445
1446 net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
1447 hw_stats->tx_underrun + hw_stats->tx_trunc;
1448 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1449 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1450 net_stats->tx_window_errors = hw_stats->tx_late_col;
1451
1452 return &adapter->net_stats;
1453}
1454
1455static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1456{
1457 u16 phy_data;
1458
1459 spin_lock(&adapter->mdio_lock);
1460 atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
1461 spin_unlock(&adapter->mdio_lock);
1462}
1463
1464static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1465 enum atl1c_trans_queue type)
1466{
1467 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1468 &adapter->tpd_ring[type];
1469 struct atl1c_buffer *buffer_info;
1470 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1471 u16 hw_next_to_clean;
1472 u16 shift;
1473 u32 data;
1474
1475 if (type == atl1c_trans_high)
1476 shift = MB_HTPD_CONS_IDX_SHIFT;
1477 else
1478 shift = MB_NTPD_CONS_IDX_SHIFT;
1479
1480 AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data);
1481 hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
1482
1483 while (next_to_clean != hw_next_to_clean) {
1484 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1485 if (buffer_info->state == ATL1_BUFFER_BUSY) {
1486 pci_unmap_page(adapter->pdev, buffer_info->dma,
1487 buffer_info->length, PCI_DMA_TODEVICE);
1488 buffer_info->dma = 0;
1489 if (buffer_info->skb) {
1490 dev_kfree_skb_irq(buffer_info->skb);
1491 buffer_info->skb = NULL;
1492 }
1493 buffer_info->state = ATL1_BUFFER_FREE;
1494 }
1495 if (++next_to_clean == tpd_ring->count)
1496 next_to_clean = 0;
1497 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
1498 }
1499
1500 if (netif_queue_stopped(adapter->netdev) &&
1501 netif_carrier_ok(adapter->netdev)) {
1502 netif_wake_queue(adapter->netdev);
1503 }
1504
1505 return true;
1506}
1507
1508/*
1509 * atl1c_intr - Interrupt Handler
1510 * @irq: interrupt number
1511 * @data: pointer to a network interface device structure
1512 * @pt_regs: CPU registers structure
1513 */
1514static irqreturn_t atl1c_intr(int irq, void *data)
1515{
1516 struct net_device *netdev = data;
1517 struct atl1c_adapter *adapter = netdev_priv(netdev);
1518 struct pci_dev *pdev = adapter->pdev;
1519 struct atl1c_hw *hw = &adapter->hw;
1520 int max_ints = AT_MAX_INT_WORK;
1521 int handled = IRQ_NONE;
1522 u32 status;
1523 u32 reg_data;
1524
1525 do {
1526 AT_READ_REG(hw, REG_ISR, &reg_data);
1527 status = reg_data & hw->intr_mask;
1528
1529 if (status == 0 || (status & ISR_DIS_INT) != 0) {
1530 if (max_ints != AT_MAX_INT_WORK)
1531 handled = IRQ_HANDLED;
1532 break;
1533 }
1534 /* link event */
1535 if (status & ISR_GPHY)
1536 atl1c_clear_phy_int(adapter);
1537 /* Ack ISR */
1538 AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1539 if (status & ISR_RX_PKT) {
1540 if (likely(napi_schedule_prep(&adapter->napi))) {
1541 hw->intr_mask &= ~ISR_RX_PKT;
1542 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1543 __napi_schedule(&adapter->napi);
1544 }
1545 }
1546 if (status & ISR_TX_PKT)
1547 atl1c_clean_tx_irq(adapter, atl1c_trans_normal);
1548
1549 handled = IRQ_HANDLED;
1550 /* check if PCIE PHY Link down */
1551 if (status & ISR_ERROR) {
1552 if (netif_msg_hw(adapter))
1553 dev_err(&pdev->dev,
1554 "atl1c hardware error (status = 0x%x)\n",
1555 status & ISR_ERROR);
1556 /* reset MAC */
1557 hw->intr_mask &= ~ISR_ERROR;
1558 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1559 schedule_work(&adapter->reset_task);
1560 break;
1561 }
1562
1563 if (status & ISR_OVER)
1564 if (netif_msg_intr(adapter))
1565 dev_warn(&pdev->dev,
1566 "TX/RX over flow (status = 0x%x)\n",
1567 status & ISR_OVER);
1568
1569 /* link event */
1570 if (status & (ISR_GPHY | ISR_MANUAL)) {
1571 adapter->net_stats.tx_carrier_errors++;
1572 atl1c_link_chg_event(adapter);
1573 break;
1574 }
1575
1576 } while (--max_ints > 0);
1577 /* re-enable Interrupt*/
1578 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1579 return handled;
1580}
1581
1582static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1583 struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
1584{
1585 /*
1586 * The pid field in RRS in not correct sometimes, so we
1587 * cannot figure out if the packet is fragmented or not,
1588 * so we tell the KERNEL CHECKSUM_NONE
1589 */
1590 skb->ip_summed = CHECKSUM_NONE;
1591}
1592
1593static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
1594{
1595 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid];
1596 struct pci_dev *pdev = adapter->pdev;
1597 struct atl1c_buffer *buffer_info, *next_info;
1598 struct sk_buff *skb;
1599 void *vir_addr = NULL;
1600 u16 num_alloc = 0;
1601 u16 rfd_next_to_use, next_next;
1602 struct atl1c_rx_free_desc *rfd_desc;
1603
1604 next_next = rfd_next_to_use = rfd_ring->next_to_use;
1605 if (++next_next == rfd_ring->count)
1606 next_next = 0;
1607 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1608 next_info = &rfd_ring->buffer_info[next_next];
1609
1610 while (next_info->state == ATL1_BUFFER_FREE) {
1611 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1612
1613 skb = dev_alloc_skb(adapter->rx_buffer_len);
1614 if (unlikely(!skb)) {
1615 if (netif_msg_rx_err(adapter))
1616 dev_warn(&pdev->dev, "alloc rx buffer failed\n");
1617 break;
1618 }
1619
1620 /*
1621 * Make buffer alignment 2 beyond a 16 byte boundary
1622 * this will result in a 16 byte aligned IP header after
1623 * the 14 byte MAC header is removed
1624 */
1625 vir_addr = skb->data;
1626 buffer_info->state = ATL1_BUFFER_BUSY;
1627 buffer_info->skb = skb;
1628 buffer_info->length = adapter->rx_buffer_len;
1629 buffer_info->dma = pci_map_single(pdev, vir_addr,
1630 buffer_info->length,
1631 PCI_DMA_FROMDEVICE);
1632 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1633 rfd_next_to_use = next_next;
1634 if (++next_next == rfd_ring->count)
1635 next_next = 0;
1636 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1637 next_info = &rfd_ring->buffer_info[next_next];
1638 num_alloc++;
1639 }
1640
1641 if (num_alloc) {
1642 /* TODO: update mailbox here */
1643 wmb();
1644 rfd_ring->next_to_use = rfd_next_to_use;
1645 AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid],
1646 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1647 }
1648
1649 return num_alloc;
1650}
1651
1652static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
1653 struct atl1c_recv_ret_status *rrs, u16 num)
1654{
1655 u16 i;
1656 /* the relationship between rrd and rfd is one map one */
1657 for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
1658 rrd_ring->next_to_clean)) {
1659 rrs->word3 &= ~RRS_RXD_UPDATED;
1660 if (++rrd_ring->next_to_clean == rrd_ring->count)
1661 rrd_ring->next_to_clean = 0;
1662 }
1663}
1664
1665static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1666 struct atl1c_recv_ret_status *rrs, u16 num)
1667{
1668 u16 i;
1669 u16 rfd_index;
1670 struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
1671
1672 rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1673 RRS_RX_RFD_INDEX_MASK;
1674 for (i = 0; i < num; i++) {
1675 buffer_info[rfd_index].skb = NULL;
1676 buffer_info[rfd_index].state = ATL1_BUFFER_FREE;
1677 if (++rfd_index == rfd_ring->count)
1678 rfd_index = 0;
1679 }
1680 rfd_ring->next_to_clean = rfd_index;
1681}
1682
1683static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
1684 int *work_done, int work_to_do)
1685{
1686 u16 rfd_num, rfd_index;
1687 u16 count = 0;
1688 u16 length;
1689 struct pci_dev *pdev = adapter->pdev;
1690 struct net_device *netdev = adapter->netdev;
1691 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que];
1692 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que];
1693 struct sk_buff *skb;
1694 struct atl1c_recv_ret_status *rrs;
1695 struct atl1c_buffer *buffer_info;
1696
1697 while (1) {
1698 if (*work_done >= work_to_do)
1699 break;
1700 rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
1701 if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
1702 rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
1703 RRS_RX_RFD_CNT_MASK;
1704 if (unlikely(rfd_num) != 1)
1705 /* TODO support mul rfd*/
1706 if (netif_msg_rx_err(adapter))
1707 dev_warn(&pdev->dev,
1708 "Multi rfd not support yet!\n");
1709 goto rrs_checked;
1710 } else {
1711 break;
1712 }
1713rrs_checked:
1714 atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
1715 if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
1716 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1717 if (netif_msg_rx_err(adapter))
1718 dev_warn(&pdev->dev,
1719 "wrong packet! rrs word3 is %x\n",
1720 rrs->word3);
1721 continue;
1722 }
1723
1724 length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
1725 RRS_PKT_SIZE_MASK);
1726 /* Good Receive */
1727 if (likely(rfd_num == 1)) {
1728 rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1729 RRS_RX_RFD_INDEX_MASK;
1730 buffer_info = &rfd_ring->buffer_info[rfd_index];
1731 pci_unmap_single(pdev, buffer_info->dma,
1732 buffer_info->length, PCI_DMA_FROMDEVICE);
1733 skb = buffer_info->skb;
1734 } else {
1735 /* TODO */
1736 if (netif_msg_rx_err(adapter))
1737 dev_warn(&pdev->dev,
1738 "Multi rfd not support yet!\n");
1739 break;
1740 }
1741 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1742 skb_put(skb, length - ETH_FCS_LEN);
1743 skb->protocol = eth_type_trans(skb, netdev);
1744 skb->dev = netdev;
1745 atl1c_rx_checksum(adapter, skb, rrs);
1746 if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
1747 u16 vlan;
1748
1749 AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
1750 vlan = le16_to_cpu(vlan);
1751 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vlan);
1752 } else
1753 netif_receive_skb(skb);
1754
1755 netdev->last_rx = jiffies;
1756 (*work_done)++;
1757 count++;
1758 }
1759 if (count)
1760 atl1c_alloc_rx_buffer(adapter, que);
1761}
1762
1763/*
1764 * atl1c_clean - NAPI Rx polling callback
1765 * @adapter: board private structure
1766 */
1767static int atl1c_clean(struct napi_struct *napi, int budget)
1768{
1769 struct atl1c_adapter *adapter =
1770 container_of(napi, struct atl1c_adapter, napi);
1771 int work_done = 0;
1772
1773 /* Keep link state information with original netdev */
1774 if (!netif_carrier_ok(adapter->netdev))
1775 goto quit_polling;
1776 /* just enable one RXQ */
1777 atl1c_clean_rx_irq(adapter, 0, &work_done, budget);
1778
1779 if (work_done < budget) {
1780quit_polling:
1781 napi_complete(napi);
1782 adapter->hw.intr_mask |= ISR_RX_PKT;
1783 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1784 }
1785 return work_done;
1786}
1787
1788#ifdef CONFIG_NET_POLL_CONTROLLER
1789
1790/*
1791 * Polling 'interrupt' - used by things like netconsole to send skbs
1792 * without having to re-enable interrupts. It's not called while
1793 * the interrupt routine is executing.
1794 */
1795static void atl1c_netpoll(struct net_device *netdev)
1796{
1797 struct atl1c_adapter *adapter = netdev_priv(netdev);
1798
1799 disable_irq(adapter->pdev->irq);
1800 atl1c_intr(adapter->pdev->irq, netdev);
1801 enable_irq(adapter->pdev->irq);
1802}
1803#endif
1804
1805static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
1806{
1807 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1808 u16 next_to_use = 0;
1809 u16 next_to_clean = 0;
1810
1811 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1812 next_to_use = tpd_ring->next_to_use;
1813
1814 return (u16)(next_to_clean > next_to_use) ?
1815 (next_to_clean - next_to_use - 1) :
1816 (tpd_ring->count + next_to_clean - next_to_use - 1);
1817}
1818
1819/*
1820 * get next usable tpd
1821 * Note: should call atl1c_tdp_avail to make sure
1822 * there is enough tpd to use
1823 */
1824static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
1825 enum atl1c_trans_queue type)
1826{
1827 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1828 struct atl1c_tpd_desc *tpd_desc;
1829 u16 next_to_use = 0;
1830
1831 next_to_use = tpd_ring->next_to_use;
1832 if (++tpd_ring->next_to_use == tpd_ring->count)
1833 tpd_ring->next_to_use = 0;
1834 tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
1835 memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
1836 return tpd_desc;
1837}
1838
1839static struct atl1c_buffer *
1840atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
1841{
1842 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
1843
1844 return &tpd_ring->buffer_info[tpd -
1845 (struct atl1c_tpd_desc *)tpd_ring->desc];
1846}
1847
1848/* Calculate the transmit packet descript needed*/
1849static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
1850{
1851 u16 tpd_req;
1852 u16 proto_hdr_len = 0;
1853
1854 tpd_req = skb_shinfo(skb)->nr_frags + 1;
1855
1856 if (skb_is_gso(skb)) {
1857 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1858 if (proto_hdr_len < skb_headlen(skb))
1859 tpd_req++;
1860 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1861 tpd_req++;
1862 }
1863 return tpd_req;
1864}
1865
1866static int atl1c_tso_csum(struct atl1c_adapter *adapter,
1867 struct sk_buff *skb,
1868 struct atl1c_tpd_desc **tpd,
1869 enum atl1c_trans_queue type)
1870{
1871 struct pci_dev *pdev = adapter->pdev;
1872 u8 hdr_len;
1873 u32 real_len;
1874 unsigned short offload_type;
1875 int err;
1876
1877 if (skb_is_gso(skb)) {
1878 if (skb_header_cloned(skb)) {
1879 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1880 if (unlikely(err))
1881 return -1;
1882 }
1883 offload_type = skb_shinfo(skb)->gso_type;
1884
1885 if (offload_type & SKB_GSO_TCPV4) {
1886 real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
1887 + ntohs(ip_hdr(skb)->tot_len));
1888
1889 if (real_len < skb->len)
1890 pskb_trim(skb, real_len);
1891
1892 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1893 if (unlikely(skb->len == hdr_len)) {
1894 /* only xsum need */
1895 if (netif_msg_tx_queued(adapter))
1896 dev_warn(&pdev->dev,
1897 "IPV4 tso with zero data??\n");
1898 goto check_sum;
1899 } else {
1900 ip_hdr(skb)->check = 0;
1901 tcp_hdr(skb)->check = ~csum_tcpudp_magic(
1902 ip_hdr(skb)->saddr,
1903 ip_hdr(skb)->daddr,
1904 0, IPPROTO_TCP, 0);
1905 (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
1906 }
1907 }
1908
1909 if (offload_type & SKB_GSO_TCPV6) {
1910 struct atl1c_tpd_ext_desc *etpd =
1911 *(struct atl1c_tpd_ext_desc **)(tpd);
1912
1913 memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
1914 *tpd = atl1c_get_tpd(adapter, type);
1915 ipv6_hdr(skb)->payload_len = 0;
1916 /* check payload == 0 byte ? */
1917 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1918 if (unlikely(skb->len == hdr_len)) {
1919 /* only xsum need */
1920 if (netif_msg_tx_queued(adapter))
1921 dev_warn(&pdev->dev,
1922 "IPV6 tso with zero data??\n");
1923 goto check_sum;
1924 } else
1925 tcp_hdr(skb)->check = ~csum_ipv6_magic(
1926 &ipv6_hdr(skb)->saddr,
1927 &ipv6_hdr(skb)->daddr,
1928 0, IPPROTO_TCP, 0);
1929 etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
1930 etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
1931 etpd->pkt_len = cpu_to_le32(skb->len);
1932 (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
1933 }
1934
1935 (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
1936 (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
1937 TPD_TCPHDR_OFFSET_SHIFT;
1938 (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
1939 TPD_MSS_SHIFT;
1940 return 0;
1941 }
1942
1943check_sum:
1944 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1945 u8 css, cso;
1946 cso = skb_transport_offset(skb);
1947
1948 if (unlikely(cso & 0x1)) {
1949 if (netif_msg_tx_err(adapter))
1950 dev_err(&adapter->pdev->dev,
1951 "payload offset should not an event number\n");
1952 return -1;
1953 } else {
1954 css = cso + skb->csum_offset;
1955
1956 (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
1957 TPD_PLOADOFFSET_SHIFT;
1958 (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
1959 TPD_CCSUM_OFFSET_SHIFT;
1960 (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
1961 }
1962 }
1963 return 0;
1964}
1965
1966static void atl1c_tx_map(struct atl1c_adapter *adapter,
1967 struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
1968 enum atl1c_trans_queue type)
1969{
1970 struct atl1c_tpd_desc *use_tpd = NULL;
1971 struct atl1c_buffer *buffer_info = NULL;
1972 u16 buf_len = skb_headlen(skb);
1973 u16 map_len = 0;
1974 u16 mapped_len = 0;
1975 u16 hdr_len = 0;
1976 u16 nr_frags;
1977 u16 f;
1978 int tso;
1979
1980 nr_frags = skb_shinfo(skb)->nr_frags;
1981 tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
1982 if (tso) {
1983 /* TSO */
1984 map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1985 use_tpd = tpd;
1986
1987 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
1988 buffer_info->length = map_len;
1989 buffer_info->dma = pci_map_single(adapter->pdev,
1990 skb->data, hdr_len, PCI_DMA_TODEVICE);
1991 buffer_info->state = ATL1_BUFFER_BUSY;
1992 mapped_len += map_len;
1993 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1994 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
1995 }
1996
1997 if (mapped_len < buf_len) {
1998 /* mapped_len == 0, means we should use the first tpd,
1999 which is given by caller */
2000 if (mapped_len == 0)
2001 use_tpd = tpd;
2002 else {
2003 use_tpd = atl1c_get_tpd(adapter, type);
2004 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2005 use_tpd = atl1c_get_tpd(adapter, type);
2006 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2007 }
2008 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2009 buffer_info->length = buf_len - mapped_len;
2010 buffer_info->dma =
2011 pci_map_single(adapter->pdev, skb->data + mapped_len,
2012 buffer_info->length, PCI_DMA_TODEVICE);
2013 buffer_info->state = ATL1_BUFFER_BUSY;
2014
2015 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2016 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2017 }
2018
2019 for (f = 0; f < nr_frags; f++) {
2020 struct skb_frag_struct *frag;
2021
2022 frag = &skb_shinfo(skb)->frags[f];
2023
2024 use_tpd = atl1c_get_tpd(adapter, type);
2025 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2026
2027 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2028 buffer_info->length = frag->size;
2029 buffer_info->dma =
2030 pci_map_page(adapter->pdev, frag->page,
2031 frag->page_offset,
2032 buffer_info->length,
2033 PCI_DMA_TODEVICE);
2034 buffer_info->state = ATL1_BUFFER_BUSY;
2035
2036 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2037 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2038 }
2039
2040 /* The last tpd */
2041 use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
2042 /* The last buffer info contain the skb address,
2043 so it will be free after unmap */
2044 buffer_info->skb = skb;
2045}
2046
2047static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
2048 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
2049{
2050 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
2051 u32 prod_data;
2052
2053 AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
2054 switch (type) {
2055 case atl1c_trans_high:
2056 prod_data &= 0xFFFF0000;
2057 prod_data |= tpd_ring->next_to_use & 0xFFFF;
2058 break;
2059 case atl1c_trans_normal:
2060 prod_data &= 0x0000FFFF;
2061 prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
2062 break;
2063 default:
2064 break;
2065 }
2066 wmb();
2067 AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
2068}
2069
2070static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2071{
2072 struct atl1c_adapter *adapter = netdev_priv(netdev);
2073 unsigned long flags;
2074 u16 tpd_req = 1;
2075 struct atl1c_tpd_desc *tpd;
2076 enum atl1c_trans_queue type = atl1c_trans_normal;
2077
2078 if (test_bit(__AT_DOWN, &adapter->flags)) {
2079 dev_kfree_skb_any(skb);
2080 return NETDEV_TX_OK;
2081 }
2082
2083 tpd_req = atl1c_cal_tpd_req(skb);
2084 if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
2085 if (netif_msg_pktdata(adapter))
2086 dev_info(&adapter->pdev->dev, "tx locked\n");
2087 return NETDEV_TX_LOCKED;
2088 }
2089 if (skb->mark == 0x01)
2090 type = atl1c_trans_high;
2091 else
2092 type = atl1c_trans_normal;
2093
2094 if (atl1c_tpd_avail(adapter, type) < tpd_req) {
2095 /* no enough descriptor, just stop queue */
2096 netif_stop_queue(netdev);
2097 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2098 return NETDEV_TX_BUSY;
2099 }
2100
2101 tpd = atl1c_get_tpd(adapter, type);
2102
2103 /* do TSO and check sum */
2104 if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
2105 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2106 dev_kfree_skb_any(skb);
2107 return NETDEV_TX_OK;
2108 }
2109
2110 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2111 u16 vlan = vlan_tx_tag_get(skb);
2112 __le16 tag;
2113
2114 vlan = cpu_to_le16(vlan);
2115 AT_VLAN_TO_TAG(vlan, tag);
2116 tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
2117 tpd->vlan_tag = tag;
2118 }
2119
2120 if (skb_network_offset(skb) != ETH_HLEN)
2121 tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
2122
2123 atl1c_tx_map(adapter, skb, tpd, type);
2124 atl1c_tx_queue(adapter, skb, tpd, type);
2125
2126 netdev->trans_start = jiffies;
2127 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2128 return NETDEV_TX_OK;
2129}
2130
2131static void atl1c_free_irq(struct atl1c_adapter *adapter)
2132{
2133 struct net_device *netdev = adapter->netdev;
2134
2135 free_irq(adapter->pdev->irq, netdev);
2136
2137 if (adapter->have_msi)
2138 pci_disable_msi(adapter->pdev);
2139}
2140
2141static int atl1c_request_irq(struct atl1c_adapter *adapter)
2142{
2143 struct pci_dev *pdev = adapter->pdev;
2144 struct net_device *netdev = adapter->netdev;
2145 int flags = 0;
2146 int err = 0;
2147
2148 adapter->have_msi = true;
2149 err = pci_enable_msi(adapter->pdev);
2150 if (err) {
2151 if (netif_msg_ifup(adapter))
2152 dev_err(&pdev->dev,
2153 "Unable to allocate MSI interrupt Error: %d\n",
2154 err);
2155 adapter->have_msi = false;
2156 } else
2157 netdev->irq = pdev->irq;
2158
2159 if (!adapter->have_msi)
2160 flags |= IRQF_SHARED;
2161 err = request_irq(adapter->pdev->irq, &atl1c_intr, flags,
2162 netdev->name, netdev);
2163 if (err) {
2164 if (netif_msg_ifup(adapter))
2165 dev_err(&pdev->dev,
2166 "Unable to allocate interrupt Error: %d\n",
2167 err);
2168 if (adapter->have_msi)
2169 pci_disable_msi(adapter->pdev);
2170 return err;
2171 }
2172 if (netif_msg_ifup(adapter))
2173 dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
2174 return err;
2175}
2176
2177int atl1c_up(struct atl1c_adapter *adapter)
2178{
2179 struct net_device *netdev = adapter->netdev;
2180 int num;
2181 int err;
2182 int i;
2183
2184 netif_carrier_off(netdev);
2185 atl1c_init_ring_ptrs(adapter);
2186 atl1c_set_multi(netdev);
2187 atl1c_restore_vlan(adapter);
2188
2189 for (i = 0; i < adapter->num_rx_queues; i++) {
2190 num = atl1c_alloc_rx_buffer(adapter, i);
2191 if (unlikely(num == 0)) {
2192 err = -ENOMEM;
2193 goto err_alloc_rx;
2194 }
2195 }
2196
2197 if (atl1c_configure(adapter)) {
2198 err = -EIO;
2199 goto err_up;
2200 }
2201
2202 err = atl1c_request_irq(adapter);
2203 if (unlikely(err))
2204 goto err_up;
2205
2206 clear_bit(__AT_DOWN, &adapter->flags);
2207 napi_enable(&adapter->napi);
2208 atl1c_irq_enable(adapter);
2209 atl1c_check_link_status(adapter);
2210 netif_start_queue(netdev);
2211 return err;
2212
2213err_up:
2214err_alloc_rx:
2215 atl1c_clean_rx_ring(adapter);
2216 return err;
2217}
2218
2219void atl1c_down(struct atl1c_adapter *adapter)
2220{
2221 struct net_device *netdev = adapter->netdev;
2222
2223 atl1c_del_timer(adapter);
2224 atl1c_cancel_work(adapter);
2225
2226 /* signal that we're down so the interrupt handler does not
2227 * reschedule our watchdog timer */
2228 set_bit(__AT_DOWN, &adapter->flags);
2229 netif_carrier_off(netdev);
2230 napi_disable(&adapter->napi);
2231 atl1c_irq_disable(adapter);
2232 atl1c_free_irq(adapter);
2233 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
2234 /* reset MAC to disable all RX/TX */
2235 atl1c_reset_mac(&adapter->hw);
2236 msleep(1);
2237
2238 adapter->link_speed = SPEED_0;
2239 adapter->link_duplex = -1;
2240 atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
2241 atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2242 atl1c_clean_rx_ring(adapter);
2243}
2244
2245/*
2246 * atl1c_open - Called when a network interface is made active
2247 * @netdev: network interface device structure
2248 *
2249 * Returns 0 on success, negative value on failure
2250 *
2251 * The open entry point is called when a network interface is made
2252 * active by the system (IFF_UP). At this point all resources needed
2253 * for transmit and receive operations are allocated, the interrupt
2254 * handler is registered with the OS, the watchdog timer is started,
2255 * and the stack is notified that the interface is ready.
2256 */
2257static int atl1c_open(struct net_device *netdev)
2258{
2259 struct atl1c_adapter *adapter = netdev_priv(netdev);
2260 int err;
2261
2262 /* disallow open during test */
2263 if (test_bit(__AT_TESTING, &adapter->flags))
2264 return -EBUSY;
2265
2266 /* allocate rx/tx dma buffer & descriptors */
2267 err = atl1c_setup_ring_resources(adapter);
2268 if (unlikely(err))
2269 return err;
2270
2271 err = atl1c_up(adapter);
2272 if (unlikely(err))
2273 goto err_up;
2274
2275 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
2276 u32 phy_data;
2277
2278 AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
2279 phy_data |= MDIO_AP_EN;
2280 AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
2281 }
2282 return 0;
2283
2284err_up:
2285 atl1c_free_irq(adapter);
2286 atl1c_free_ring_resources(adapter);
2287 atl1c_reset_mac(&adapter->hw);
2288 return err;
2289}
2290
2291/*
2292 * atl1c_close - Disables a network interface
2293 * @netdev: network interface device structure
2294 *
2295 * Returns 0, this is not allowed to fail
2296 *
2297 * The close entry point is called when an interface is de-activated
2298 * by the OS. The hardware is still under the drivers control, but
2299 * needs to be disabled. A global MAC reset is issued to stop the
2300 * hardware, and all transmit and receive resources are freed.
2301 */
2302static int atl1c_close(struct net_device *netdev)
2303{
2304 struct atl1c_adapter *adapter = netdev_priv(netdev);
2305
2306 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2307 atl1c_down(adapter);
2308 atl1c_free_ring_resources(adapter);
2309 return 0;
2310}
2311
2312static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2313{
2314 struct net_device *netdev = pci_get_drvdata(pdev);
2315 struct atl1c_adapter *adapter = netdev_priv(netdev);
2316 struct atl1c_hw *hw = &adapter->hw;
2317 u32 ctrl;
2318 u32 mac_ctrl_data;
2319 u32 master_ctrl_data;
2320 u32 wol_ctrl_data;
2321 u16 mii_bmsr_data;
2322 u16 save_autoneg_advertised;
2323 u16 mii_intr_status_data;
2324 u32 wufc = adapter->wol;
2325 u32 i;
2326 int retval = 0;
2327
2328 if (netif_running(netdev)) {
2329 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2330 atl1c_down(adapter);
2331 }
2332 netif_device_detach(netdev);
2333 atl1c_disable_l0s_l1(hw);
2334 retval = pci_save_state(pdev);
2335 if (retval)
2336 return retval;
2337 if (wufc) {
2338 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
2339 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
2340
2341 /* get link status */
2342 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2343 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2344 save_autoneg_advertised = hw->autoneg_advertised;
2345 hw->autoneg_advertised = ADVERTISED_10baseT_Half;
2346 if (atl1c_restart_autoneg(hw) != 0)
2347 if (netif_msg_link(adapter))
2348 dev_warn(&pdev->dev, "phy autoneg failed\n");
2349 hw->phy_configured = false; /* re-init PHY when resume */
2350 hw->autoneg_advertised = save_autoneg_advertised;
2351 /* turn on magic packet wol */
2352 if (wufc & AT_WUFC_MAG)
2353 wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2354
2355 if (wufc & AT_WUFC_LNKC) {
2356 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2357 msleep(100);
2358 atl1c_read_phy_reg(hw, MII_BMSR,
2359 (u16 *)&mii_bmsr_data);
2360 if (mii_bmsr_data & BMSR_LSTATUS)
2361 break;
2362 }
2363 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2364 if (netif_msg_link(adapter))
2365 dev_warn(&pdev->dev,
2366 "%s: Link may change"
2367 "when suspend\n",
2368 atl1c_driver_name);
2369 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2370 /* only link up can wake up */
2371 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
2372 if (netif_msg_link(adapter))
2373 dev_err(&pdev->dev,
2374 "%s: read write phy "
2375 "register failed.\n",
2376 atl1c_driver_name);
2377 goto wol_dis;
2378 }
2379 }
2380 /* clear phy interrupt */
2381 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
2382 /* Config MAC Ctrl register */
2383 mac_ctrl_data = MAC_CTRL_RX_EN;
2384 /* set to 10/100M halt duplex */
2385 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2386 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2387 MAC_CTRL_PRMLEN_MASK) <<
2388 MAC_CTRL_PRMLEN_SHIFT);
2389
2390 if (adapter->vlgrp)
2391 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
2392
2393 /* magic packet maybe Broadcast&multicast&Unicast frame */
2394 if (wufc & AT_WUFC_MAG)
2395 mac_ctrl_data |= MAC_CTRL_BC_EN;
2396
2397 if (netif_msg_hw(adapter))
2398 dev_dbg(&pdev->dev,
2399 "%s: suspend MAC=0x%x\n",
2400 atl1c_driver_name, mac_ctrl_data);
2401 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2402 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2403 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2404
2405 /* pcie patch */
2406 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
2407 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2408 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2409
2410 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2411 goto suspend_exit;
2412 }
2413wol_dis:
2414
2415 /* WOL disabled */
2416 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2417
2418 /* pcie patch */
2419 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
2420 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2421 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2422
2423 atl1c_phy_disable(hw);
2424 hw->phy_configured = false; /* re-init PHY when resume */
2425
2426 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2427suspend_exit:
2428
2429 pci_disable_device(pdev);
2430 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2431
2432 return 0;
2433}
2434
2435static int atl1c_resume(struct pci_dev *pdev)
2436{
2437 struct net_device *netdev = pci_get_drvdata(pdev);
2438 struct atl1c_adapter *adapter = netdev_priv(netdev);
2439
2440 pci_set_power_state(pdev, PCI_D0);
2441 pci_restore_state(pdev);
2442 pci_enable_wake(pdev, PCI_D3hot, 0);
2443 pci_enable_wake(pdev, PCI_D3cold, 0);
2444
2445 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2446
2447 atl1c_phy_reset(&adapter->hw);
2448 atl1c_reset_mac(&adapter->hw);
2449 netif_device_attach(netdev);
2450 if (netif_running(netdev))
2451 atl1c_up(adapter);
2452
2453 return 0;
2454}
2455
2456static void atl1c_shutdown(struct pci_dev *pdev)
2457{
2458 atl1c_suspend(pdev, PMSG_SUSPEND);
2459}
2460
2461static const struct net_device_ops atl1c_netdev_ops = {
2462 .ndo_open = atl1c_open,
2463 .ndo_stop = atl1c_close,
2464 .ndo_validate_addr = eth_validate_addr,
2465 .ndo_start_xmit = atl1c_xmit_frame,
2466 .ndo_set_mac_address = atl1c_set_mac_addr,
2467 .ndo_set_multicast_list = atl1c_set_multi,
2468 .ndo_change_mtu = atl1c_change_mtu,
2469 .ndo_do_ioctl = atl1c_ioctl,
2470 .ndo_tx_timeout = atl1c_tx_timeout,
2471 .ndo_get_stats = atl1c_get_stats,
2472 .ndo_vlan_rx_register = atl1c_vlan_rx_register,
2473#ifdef CONFIG_NET_POLL_CONTROLLER
2474 .ndo_poll_controller = atl1c_netpoll,
2475#endif
2476};
2477
2478static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2479{
2480 SET_NETDEV_DEV(netdev, &pdev->dev);
2481 pci_set_drvdata(pdev, netdev);
2482
2483 netdev->irq = pdev->irq;
2484 netdev->netdev_ops = &atl1c_netdev_ops;
2485 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2486 atl1c_set_ethtool_ops(netdev);
2487
2488 /* TODO: add when ready */
2489 netdev->features = NETIF_F_SG |
2490 NETIF_F_HW_CSUM |
2491 NETIF_F_HW_VLAN_TX |
2492 NETIF_F_HW_VLAN_RX |
2493 NETIF_F_TSO |
2494 NETIF_F_TSO6;
2495 return 0;
2496}
2497
2498/*
2499 * atl1c_probe - Device Initialization Routine
2500 * @pdev: PCI device information struct
2501 * @ent: entry in atl1c_pci_tbl
2502 *
2503 * Returns 0 on success, negative on failure
2504 *
2505 * atl1c_probe initializes an adapter identified by a pci_dev structure.
2506 * The OS initialization, configuring of the adapter private structure,
2507 * and a hardware reset occur.
2508 */
2509static int __devinit atl1c_probe(struct pci_dev *pdev,
2510 const struct pci_device_id *ent)
2511{
2512 struct net_device *netdev;
2513 struct atl1c_adapter *adapter;
2514 static int cards_found;
2515
2516 int err = 0;
2517
2518 /* enable device (incl. PCI PM wakeup and hotplug setup) */
2519 err = pci_enable_device_mem(pdev);
2520 if (err) {
2521 dev_err(&pdev->dev, "cannot enable PCI device\n");
2522 return err;
2523 }
2524
2525 /*
2526 * The atl1c chip can DMA to 64-bit addresses, but it uses a single
2527 * shared register for the high 32 bits, so only a single, aligned,
2528 * 4 GB physical address range can be used at a time.
2529 *
2530 * Supporting 64-bit DMA on this hardware is more trouble than it's
2531 * worth. It is far easier to limit to 32-bit DMA than update
2532 * various kernel subsystems to support the mechanics required by a
2533 * fixed-high-32-bit system.
2534 */
2535 if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) ||
2536 (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) {
2537 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2538 goto err_dma;
2539 }
2540
2541 err = pci_request_regions(pdev, atl1c_driver_name);
2542 if (err) {
2543 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2544 goto err_pci_reg;
2545 }
2546
2547 pci_set_master(pdev);
2548
2549 netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
2550 if (netdev == NULL) {
2551 err = -ENOMEM;
2552 dev_err(&pdev->dev, "etherdev alloc failed\n");
2553 goto err_alloc_etherdev;
2554 }
2555
2556 err = atl1c_init_netdev(netdev, pdev);
2557 if (err) {
2558 dev_err(&pdev->dev, "init netdevice failed\n");
2559 goto err_init_netdev;
2560 }
2561 adapter = netdev_priv(netdev);
2562 adapter->bd_number = cards_found;
2563 adapter->netdev = netdev;
2564 adapter->pdev = pdev;
2565 adapter->hw.adapter = adapter;
2566 adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
2567 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2568 if (!adapter->hw.hw_addr) {
2569 err = -EIO;
2570 dev_err(&pdev->dev, "cannot map device registers\n");
2571 goto err_ioremap;
2572 }
2573 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2574
2575 /* init mii data */
2576 adapter->mii.dev = netdev;
2577 adapter->mii.mdio_read = atl1c_mdio_read;
2578 adapter->mii.mdio_write = atl1c_mdio_write;
2579 adapter->mii.phy_id_mask = 0x1f;
2580 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
2581 netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
2582 setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
2583 (unsigned long)adapter);
2584 /* setup the private structure */
2585 err = atl1c_sw_init(adapter);
2586 if (err) {
2587 dev_err(&pdev->dev, "net device private data init failed\n");
2588 goto err_sw_init;
2589 }
2590 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
2591 ATL1C_PCIE_PHY_RESET);
2592
2593 /* Init GPHY as early as possible due to power saving issue */
2594 atl1c_phy_reset(&adapter->hw);
2595
2596 err = atl1c_reset_mac(&adapter->hw);
2597 if (err) {
2598 err = -EIO;
2599 goto err_reset;
2600 }
2601
2602 device_init_wakeup(&pdev->dev, 1);
2603 /* reset the controller to
2604 * put the device in a known good starting state */
2605 err = atl1c_phy_init(&adapter->hw);
2606 if (err) {
2607 err = -EIO;
2608 goto err_reset;
2609 }
2610 if (atl1c_read_mac_addr(&adapter->hw) != 0) {
2611 err = -EIO;
2612 dev_err(&pdev->dev, "get mac address failed\n");
2613 goto err_eeprom;
2614 }
2615 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2616 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2617 if (netif_msg_probe(adapter))
2618 dev_dbg(&pdev->dev,
2619 "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
2620 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2621 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2622 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2623
2624 atl1c_hw_set_mac_addr(&adapter->hw);
2625 INIT_WORK(&adapter->reset_task, atl1c_reset_task);
2626 INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task);
2627 err = register_netdev(netdev);
2628 if (err) {
2629 dev_err(&pdev->dev, "register netdevice failed\n");
2630 goto err_register;
2631 }
2632
2633 if (netif_msg_probe(adapter))
2634 dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
2635 cards_found++;
2636 return 0;
2637
2638err_reset:
2639err_register:
2640err_sw_init:
2641err_eeprom:
2642 iounmap(adapter->hw.hw_addr);
2643err_init_netdev:
2644err_ioremap:
2645 free_netdev(netdev);
2646err_alloc_etherdev:
2647 pci_release_regions(pdev);
2648err_pci_reg:
2649err_dma:
2650 pci_disable_device(pdev);
2651 return err;
2652}
2653
2654/*
2655 * atl1c_remove - Device Removal Routine
2656 * @pdev: PCI device information struct
2657 *
2658 * atl1c_remove is called by the PCI subsystem to alert the driver
2659 * that it should release a PCI device. The could be caused by a
2660 * Hot-Plug event, or because the driver is going to be removed from
2661 * memory.
2662 */
2663static void __devexit atl1c_remove(struct pci_dev *pdev)
2664{
2665 struct net_device *netdev = pci_get_drvdata(pdev);
2666 struct atl1c_adapter *adapter = netdev_priv(netdev);
2667
2668 unregister_netdev(netdev);
2669 atl1c_phy_disable(&adapter->hw);
2670
2671 iounmap(adapter->hw.hw_addr);
2672
2673 pci_release_regions(pdev);
2674 pci_disable_device(pdev);
2675 free_netdev(netdev);
2676}
2677
2678/*
2679 * atl1c_io_error_detected - called when PCI error is detected
2680 * @pdev: Pointer to PCI device
2681 * @state: The current pci connection state
2682 *
2683 * This function is called after a PCI bus error affecting
2684 * this device has been detected.
2685 */
2686static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2687 pci_channel_state_t state)
2688{
2689 struct net_device *netdev = pci_get_drvdata(pdev);
2690 struct atl1c_adapter *adapter = netdev_priv(netdev);
2691
2692 netif_device_detach(netdev);
2693
2694 if (netif_running(netdev))
2695 atl1c_down(adapter);
2696
2697 pci_disable_device(pdev);
2698
2699 /* Request a slot slot reset. */
2700 return PCI_ERS_RESULT_NEED_RESET;
2701}
2702
2703/*
2704 * atl1c_io_slot_reset - called after the pci bus has been reset.
2705 * @pdev: Pointer to PCI device
2706 *
2707 * Restart the card from scratch, as if from a cold-boot. Implementation
2708 * resembles the first-half of the e1000_resume routine.
2709 */
2710static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2711{
2712 struct net_device *netdev = pci_get_drvdata(pdev);
2713 struct atl1c_adapter *adapter = netdev_priv(netdev);
2714
2715 if (pci_enable_device(pdev)) {
2716 if (netif_msg_hw(adapter))
2717 dev_err(&pdev->dev,
2718 "Cannot re-enable PCI device after reset\n");
2719 return PCI_ERS_RESULT_DISCONNECT;
2720 }
2721 pci_set_master(pdev);
2722
2723 pci_enable_wake(pdev, PCI_D3hot, 0);
2724 pci_enable_wake(pdev, PCI_D3cold, 0);
2725
2726 atl1c_reset_mac(&adapter->hw);
2727
2728 return PCI_ERS_RESULT_RECOVERED;
2729}
2730
2731/*
2732 * atl1c_io_resume - called when traffic can start flowing again.
2733 * @pdev: Pointer to PCI device
2734 *
2735 * This callback is called when the error recovery driver tells us that
2736 * its OK to resume normal operation. Implementation resembles the
2737 * second-half of the atl1c_resume routine.
2738 */
2739static void atl1c_io_resume(struct pci_dev *pdev)
2740{
2741 struct net_device *netdev = pci_get_drvdata(pdev);
2742 struct atl1c_adapter *adapter = netdev_priv(netdev);
2743
2744 if (netif_running(netdev)) {
2745 if (atl1c_up(adapter)) {
2746 if (netif_msg_hw(adapter))
2747 dev_err(&pdev->dev,
2748 "Cannot bring device back up after reset\n");
2749 return;
2750 }
2751 }
2752
2753 netif_device_attach(netdev);
2754}
2755
2756static struct pci_error_handlers atl1c_err_handler = {
2757 .error_detected = atl1c_io_error_detected,
2758 .slot_reset = atl1c_io_slot_reset,
2759 .resume = atl1c_io_resume,
2760};
2761
2762static struct pci_driver atl1c_driver = {
2763 .name = atl1c_driver_name,
2764 .id_table = atl1c_pci_tbl,
2765 .probe = atl1c_probe,
2766 .remove = __devexit_p(atl1c_remove),
2767 /* Power Managment Hooks */
2768 .suspend = atl1c_suspend,
2769 .resume = atl1c_resume,
2770 .shutdown = atl1c_shutdown,
2771 .err_handler = &atl1c_err_handler
2772};
2773
2774/*
2775 * atl1c_init_module - Driver Registration Routine
2776 *
2777 * atl1c_init_module is the first routine called when the driver is
2778 * loaded. All it does is register with the PCI subsystem.
2779 */
2780static int __init atl1c_init_module(void)
2781{
2782 return pci_register_driver(&atl1c_driver);
2783}
2784
2785/*
2786 * atl1c_exit_module - Driver Exit Cleanup Routine
2787 *
2788 * atl1c_exit_module is called just before the driver is removed
2789 * from memory.
2790 */
2791static void __exit atl1c_exit_module(void)
2792{
2793 pci_unregister_driver(&atl1c_driver);
2794}
2795
2796module_init(atl1c_init_module);
2797module_exit(atl1c_exit_module);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c38512ebcea6..dc5f051005fa 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1264,8 +1264,14 @@ static void b44_clear_stats(struct b44 *bp)
1264static void b44_chip_reset(struct b44 *bp, int reset_kind) 1264static void b44_chip_reset(struct b44 *bp, int reset_kind)
1265{ 1265{
1266 struct ssb_device *sdev = bp->sdev; 1266 struct ssb_device *sdev = bp->sdev;
1267 bool was_enabled;
1267 1268
1268 if (ssb_device_is_enabled(bp->sdev)) { 1269 was_enabled = ssb_device_is_enabled(bp->sdev);
1270
1271 ssb_device_enable(bp->sdev, 0);
1272 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1273
1274 if (was_enabled) {
1269 bw32(bp, B44_RCV_LAZY, 0); 1275 bw32(bp, B44_RCV_LAZY, 0);
1270 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); 1276 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1271 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); 1277 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
@@ -1277,10 +1283,8 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
1277 } 1283 }
1278 bw32(bp, B44_DMARX_CTRL, 0); 1284 bw32(bp, B44_DMARX_CTRL, 0);
1279 bp->rx_prod = bp->rx_cons = 0; 1285 bp->rx_prod = bp->rx_cons = 0;
1280 } else 1286 }
1281 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1282 1287
1283 ssb_device_enable(bp->sdev, 0);
1284 b44_clear_stats(bp); 1288 b44_clear_stats(bp);
1285 1289
1286 /* 1290 /*
@@ -2236,6 +2240,7 @@ static void __devexit b44_remove_one(struct ssb_device *sdev)
2236 struct net_device *dev = ssb_get_drvdata(sdev); 2240 struct net_device *dev = ssb_get_drvdata(sdev);
2237 2241
2238 unregister_netdev(dev); 2242 unregister_netdev(dev);
2243 ssb_device_disable(sdev, 0);
2239 ssb_bus_may_powerdown(sdev->bus); 2244 ssb_bus_may_powerdown(sdev->bus);
2240 free_netdev(dev); 2245 free_netdev(dev);
2241 ssb_pcihost_set_power_state(sdev, PCI_D3hot); 2246 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 0089746b8d02..bab8a934c33d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -90,6 +90,7 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
90 CH_DEVICE(0x30, 2), /* T3B10 */ 90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */ 91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */ 92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
93 {0,} 94 {0,}
94}; 95};
95 96
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 2d1433077a8e..ac2a974dfe37 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -512,6 +512,13 @@ static const struct adapter_info t3_adap_info[] = {
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 &mi1_mdio_ext_ops, "Chelsio T320"}, 514 &mi1_mdio_ext_ops, "Chelsio T320"},
515 {},
516 {},
517 {1, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T310" },
515}; 522};
516 523
517/* 524/*
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b910cf63740..b8251e827059 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6011,9 +6011,20 @@ static void nv_shutdown(struct pci_dev *pdev)
6011 if (netif_running(dev)) 6011 if (netif_running(dev))
6012 nv_close(dev); 6012 nv_close(dev);
6013 6013
6014 nv_restore_mac_addr(pdev); 6014 /*
6015 * Restore the MAC so a kernel started by kexec won't get confused.
6016 * If we really go for poweroff, we must not restore the MAC,
6017 * otherwise the MAC for WOL will be reversed at least on some boards.
6018 */
6019 if (system_state != SYSTEM_POWER_OFF) {
6020 nv_restore_mac_addr(pdev);
6021 }
6015 6022
6016 pci_disable_device(pdev); 6023 pci_disable_device(pdev);
6024 /*
6025 * Apparently it is not possible to reinitialise from D3 hot,
6026 * only put the device into D3 if we really go for poweroff.
6027 */
6017 if (system_state == SYSTEM_POWER_OFF) { 6028 if (system_state == SYSTEM_POWER_OFF) {
6018 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 6029 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
6019 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 6030 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 9b12a13a640f..9831b3f408aa 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1284,7 +1284,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1284 spin_lock_irqsave(&priv->txlock, flags); 1284 spin_lock_irqsave(&priv->txlock, flags);
1285 1285
1286 /* check if there is space to queue this packet */ 1286 /* check if there is space to queue this packet */
1287 if (nr_frags > priv->num_txbdfree) { 1287 if ((nr_frags+1) > priv->num_txbdfree) {
1288 /* no space, stop the queue */ 1288 /* no space, stop the queue */
1289 netif_stop_queue(dev); 1289 netif_stop_queue(dev);
1290 dev->stats.tx_fifo_errors++; 1290 dev->stats.tx_fifo_errors++;
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 5e070f446635..0486cbe01adb 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -467,7 +467,7 @@ init_module(void)
467 if (this_dev != 0) break; /* only autoprobe 1st one */ 467 if (this_dev != 0) break; /* only autoprobe 1st one */
468 printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n"); 468 printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
469 } 469 }
470 dev = alloc_ei_netdev(); 470 dev = alloc_eip_netdev();
471 if (!dev) 471 if (!dev)
472 break; 472 break;
473 dev->irq = irq[this_dev]; 473 dev->irq = irq[this_dev];
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 5f31bbb614af..13f11f402a99 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1175,7 +1175,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1175{ 1175{
1176 struct mib_counters *p = &mp->mib_counters; 1176 struct mib_counters *p = &mp->mib_counters;
1177 1177
1178 spin_lock(&mp->mib_counters_lock); 1178 spin_lock_bh(&mp->mib_counters_lock);
1179 p->good_octets_received += mib_read(mp, 0x00); 1179 p->good_octets_received += mib_read(mp, 0x00);
1180 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1180 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
1181 p->bad_octets_received += mib_read(mp, 0x08); 1181 p->bad_octets_received += mib_read(mp, 0x08);
@@ -1208,7 +1208,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1208 p->bad_crc_event += mib_read(mp, 0x74); 1208 p->bad_crc_event += mib_read(mp, 0x74);
1209 p->collision += mib_read(mp, 0x78); 1209 p->collision += mib_read(mp, 0x78);
1210 p->late_collision += mib_read(mp, 0x7c); 1210 p->late_collision += mib_read(mp, 0x7c);
1211 spin_unlock(&mp->mib_counters_lock); 1211 spin_unlock_bh(&mp->mib_counters_lock);
1212 1212
1213 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1213 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1214} 1214}
@@ -1575,7 +1575,7 @@ oom:
1575 return; 1575 return;
1576 } 1576 }
1577 1577
1578 mc_spec = kmalloc(0x200, GFP_KERNEL); 1578 mc_spec = kmalloc(0x200, GFP_ATOMIC);
1579 if (mc_spec == NULL) 1579 if (mc_spec == NULL)
1580 goto oom; 1580 goto oom;
1581 mc_other = mc_spec + (0x100 >> 2); 1581 mc_other = mc_spec + (0x100 >> 2);
@@ -2216,8 +2216,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
2216 wrlp(mp, INT_MASK, 0x00000000); 2216 wrlp(mp, INT_MASK, 0x00000000);
2217 rdlp(mp, INT_MASK); 2217 rdlp(mp, INT_MASK);
2218 2218
2219 del_timer_sync(&mp->mib_counters_timer);
2220
2221 napi_disable(&mp->napi); 2219 napi_disable(&mp->napi);
2222 2220
2223 del_timer_sync(&mp->rx_oom); 2221 del_timer_sync(&mp->rx_oom);
@@ -2229,6 +2227,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
2229 port_reset(mp); 2227 port_reset(mp);
2230 mv643xx_eth_get_stats(dev); 2228 mv643xx_eth_get_stats(dev);
2231 mib_counters_update(mp); 2229 mib_counters_update(mp);
2230 del_timer_sync(&mp->mib_counters_timer);
2232 2231
2233 skb_queue_purge(&mp->rx_recycle); 2232 skb_queue_purge(&mp->rx_recycle);
2234 2233
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f33e442f403..13087782ac40 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -588,7 +588,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
588 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M; 588 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
589 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M; 589 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
590 590
591 mem_ptr0 = ioremap(mem_base, mem_len); 591 mem_ptr0 = pci_ioremap_bar(pdev, 0);
592 if (mem_ptr0 == NULL) {
593 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
594 return -EIO;
595 }
596
592 pci_len0 = mem_len; 597 pci_len0 = mem_len;
593 first_page_group_start = 0; 598 first_page_group_start = 0;
594 first_page_group_end = 0; 599 first_page_group_end = 0;
@@ -795,9 +800,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
795 * See if the firmware gave us a virtual-physical port mapping. 800 * See if the firmware gave us a virtual-physical port mapping.
796 */ 801 */
797 adapter->physical_port = adapter->portnum; 802 adapter->physical_port = adapter->portnum;
798 i = adapter->pci_read_normalize(adapter, CRB_V2P(adapter->portnum)); 803 if (adapter->fw_major < 4) {
799 if (i != 0x55555555) 804 i = adapter->pci_read_normalize(adapter,
800 adapter->physical_port = i; 805 CRB_V2P(adapter->portnum));
806 if (i != 0x55555555)
807 adapter->physical_port = i;
808 }
801 809
802 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); 810 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
803 811
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0771eb6fc6eb..b3473401c83a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -81,9 +81,9 @@ static const int multicast_filter_limit = 32;
81#define RTL8169_TX_TIMEOUT (6*HZ) 81#define RTL8169_TX_TIMEOUT (6*HZ)
82#define RTL8169_PHY_TIMEOUT (10*HZ) 82#define RTL8169_PHY_TIMEOUT (10*HZ)
83 83
84#define RTL_EEPROM_SIG cpu_to_le32(0x8129) 84#define RTL_EEPROM_SIG 0x8129
85#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
86#define RTL_EEPROM_SIG_ADDR 0x0000 85#define RTL_EEPROM_SIG_ADDR 0x0000
86#define RTL_EEPROM_MAC_ADDR 0x0007
87 87
88/* write/read MMIO register */ 88/* write/read MMIO register */
89#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 89#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
@@ -293,6 +293,11 @@ enum rtl_register_content {
293 /* Cfg9346Bits */ 293 /* Cfg9346Bits */
294 Cfg9346_Lock = 0x00, 294 Cfg9346_Lock = 0x00,
295 Cfg9346_Unlock = 0xc0, 295 Cfg9346_Unlock = 0xc0,
296 Cfg9346_Program = 0x80, /* Programming mode */
297 Cfg9346_EECS = 0x08, /* Chip select */
298 Cfg9346_EESK = 0x04, /* Serial data clock */
299 Cfg9346_EEDI = 0x02, /* Data input */
300 Cfg9346_EEDO = 0x01, /* Data output */
296 301
297 /* rx_mode_bits */ 302 /* rx_mode_bits */
298 AcceptErr = 0x20, 303 AcceptErr = 0x20,
@@ -305,6 +310,7 @@ enum rtl_register_content {
305 /* RxConfigBits */ 310 /* RxConfigBits */
306 RxCfgFIFOShift = 13, 311 RxCfgFIFOShift = 13,
307 RxCfgDMAShift = 8, 312 RxCfgDMAShift = 8,
313 RxCfg9356SEL = 6, /* EEPROM type: 0 = 9346, 1 = 9356 */
308 314
309 /* TxConfigBits */ 315 /* TxConfigBits */
310 TxInterFrameGapShift = 24, 316 TxInterFrameGapShift = 24,
@@ -1963,6 +1969,108 @@ static const struct net_device_ops rtl8169_netdev_ops = {
1963 1969
1964}; 1970};
1965 1971
1972/* Delay between EEPROM clock transitions. Force out buffered PCI writes. */
1973#define RTL_EEPROM_DELAY() RTL_R8(Cfg9346)
1974#define RTL_EEPROM_READ_CMD 6
1975
1976/* read 16bit word stored in EEPROM. EEPROM is addressed by words. */
1977static u16 rtl_eeprom_read(void __iomem *ioaddr, int addr)
1978{
1979 u16 result = 0;
1980 int cmd, cmd_len, i;
1981
1982 /* check for EEPROM address size (in bits) */
1983 if (RTL_R32(RxConfig) & (1 << RxCfg9356SEL)) {
1984 /* EEPROM is 93C56 */
1985 cmd_len = 3 + 8; /* 3 bits for command id and 8 for address */
1986 cmd = (RTL_EEPROM_READ_CMD << 8) | (addr & 0xff);
1987 } else {
1988 /* EEPROM is 93C46 */
1989 cmd_len = 3 + 6; /* 3 bits for command id and 6 for address */
1990 cmd = (RTL_EEPROM_READ_CMD << 6) | (addr & 0x3f);
1991 }
1992
1993 /* enter programming mode */
1994 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS);
1995 RTL_EEPROM_DELAY();
1996
1997 /* write command and requested address */
1998 while (cmd_len--) {
1999 u8 x = Cfg9346_Program | Cfg9346_EECS;
2000
2001 x |= (cmd & (1 << cmd_len)) ? Cfg9346_EEDI : 0;
2002
2003 /* write a bit */
2004 RTL_W8(Cfg9346, x);
2005 RTL_EEPROM_DELAY();
2006
2007 /* raise clock */
2008 RTL_W8(Cfg9346, x | Cfg9346_EESK);
2009 RTL_EEPROM_DELAY();
2010 }
2011
2012 /* lower clock */
2013 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS);
2014 RTL_EEPROM_DELAY();
2015
2016 /* read back 16bit value */
2017 for (i = 16; i > 0; i--) {
2018 /* raise clock */
2019 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS | Cfg9346_EESK);
2020 RTL_EEPROM_DELAY();
2021
2022 result <<= 1;
2023 result |= (RTL_R8(Cfg9346) & Cfg9346_EEDO) ? 1 : 0;
2024
2025 /* lower clock */
2026 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS);
2027 RTL_EEPROM_DELAY();
2028 }
2029
2030 RTL_W8(Cfg9346, Cfg9346_Program);
2031 /* leave programming mode */
2032 RTL_W8(Cfg9346, Cfg9346_Lock);
2033
2034 return result;
2035}
2036
2037static void rtl_init_mac_address(struct rtl8169_private *tp,
2038 void __iomem *ioaddr)
2039{
2040 struct pci_dev *pdev = tp->pci_dev;
2041 u16 x;
2042 u8 mac[8];
2043
2044 /* read EEPROM signature */
2045 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_SIG_ADDR);
2046
2047 if (x != RTL_EEPROM_SIG) {
2048 dev_info(&pdev->dev, "Missing EEPROM signature: %04x\n", x);
2049 return;
2050 }
2051
2052 /* read MAC address */
2053 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR);
2054 mac[0] = x & 0xff;
2055 mac[1] = x >> 8;
2056 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR + 1);
2057 mac[2] = x & 0xff;
2058 mac[3] = x >> 8;
2059 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR + 2);
2060 mac[4] = x & 0xff;
2061 mac[5] = x >> 8;
2062
2063 if (netif_msg_probe(tp)) {
2064 DECLARE_MAC_BUF(buf);
2065
2066 dev_info(&pdev->dev, "MAC address found in EEPROM: %s\n",
2067 print_mac(buf, mac));
2068 }
2069
2070 if (is_valid_ether_addr(mac))
2071 rtl_rar_set(tp, mac);
2072}
2073
1966static int __devinit 2074static int __devinit
1967rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2075rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1968{ 2076{
@@ -2141,6 +2249,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2141 2249
2142 tp->mmio_addr = ioaddr; 2250 tp->mmio_addr = ioaddr;
2143 2251
2252 rtl_init_mac_address(tp, ioaddr);
2253
2144 /* Get MAC address */ 2254 /* Get MAC address */
2145 for (i = 0; i < MAC_ADDR_LEN; i++) 2255 for (i = 0; i < MAC_ADDR_LEN; i++)
2146 dev->dev_addr[i] = RTL_R8(MAC0 + i); 2256 dev->dev_addr[i] = RTL_R8(MAC0 + i);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 783c1a7b869e..9a78daec2fe9 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1624,7 +1624,7 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1624 do { 1624 do {
1625 msleep(1); 1625 msleep(1);
1626 e2cmd = smsc911x_reg_read(pdata, E2P_CMD); 1626 e2cmd = smsc911x_reg_read(pdata, E2P_CMD);
1627 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--)); 1627 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
1628 1628
1629 if (!timeout) { 1629 if (!timeout) {
1630 SMSC_TRACE(DRV, "TIMED OUT"); 1630 SMSC_TRACE(DRV, "TIMED OUT");
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index a1e4b3895b33..4e15ae068b3f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -341,7 +341,7 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
341 do { 341 do {
342 msleep(1); 342 msleep(1);
343 e2cmd = smsc9420_reg_read(pd, E2P_CMD); 343 e2cmd = smsc9420_reg_read(pd, E2P_CMD);
344 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--)); 344 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
345 345
346 if (!timeout) { 346 if (!timeout) {
347 smsc_info(HW, "TIMED OUT"); 347 smsc_info(HW, "TIMED OUT");
@@ -413,6 +413,7 @@ static int smsc9420_ethtool_get_eeprom(struct net_device *dev,
413 } 413 }
414 414
415 memcpy(data, &eeprom_data[eeprom->offset], len); 415 memcpy(data, &eeprom_data[eeprom->offset], len);
416 eeprom->magic = SMSC9420_EEPROM_MAGIC;
416 eeprom->len = len; 417 eeprom->len = len;
417 return 0; 418 return 0;
418} 419}
@@ -423,6 +424,9 @@ static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
423 struct smsc9420_pdata *pd = netdev_priv(dev); 424 struct smsc9420_pdata *pd = netdev_priv(dev);
424 int ret; 425 int ret;
425 426
427 if (eeprom->magic != SMSC9420_EEPROM_MAGIC)
428 return -EINVAL;
429
426 smsc9420_eeprom_enable_access(pd); 430 smsc9420_eeprom_enable_access(pd);
427 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); 431 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_);
428 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); 432 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data);
diff --git a/drivers/net/smsc9420.h b/drivers/net/smsc9420.h
index 69c351f93f86..e441402f77a2 100644
--- a/drivers/net/smsc9420.h
+++ b/drivers/net/smsc9420.h
@@ -44,6 +44,7 @@
44#define LAN_REGISTER_EXTENT (0x400) 44#define LAN_REGISTER_EXTENT (0x400)
45 45
46#define SMSC9420_EEPROM_SIZE ((u32)11) 46#define SMSC9420_EEPROM_SIZE ((u32)11)
47#define SMSC9420_EEPROM_MAGIC (0x9420)
47 48
48#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4) 49#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4)
49 50
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index feaf0e0577d7..43695b76606f 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -909,7 +909,7 @@ static void check_duplex(struct net_device *dev)
909 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " 909 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
910 "negotiated capability %4.4x.\n", dev->name, 910 "negotiated capability %4.4x.\n", dev->name,
911 duplex ? "full" : "half", np->phys[0], negotiated); 911 duplex ? "full" : "half", np->phys[0], negotiated);
912 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0); 912 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
913 } 913 }
914} 914}
915 915
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 491876341068..8d64b1da0465 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1157,7 +1157,7 @@ static void gem_pcs_reset(struct gem *gp)
1157 if (limit-- <= 0) 1157 if (limit-- <= 0)
1158 break; 1158 break;
1159 } 1159 }
1160 if (limit <= 0) 1160 if (limit < 0)
1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1162 gp->dev->name); 1162 gp->dev->name);
1163} 1163}
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 281373281756..16c528db7251 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -343,7 +343,7 @@ static void lance_init_ring_dvma(struct net_device *dev)
343 ib->phys_addr [5] = dev->dev_addr [4]; 343 ib->phys_addr [5] = dev->dev_addr [4];
344 344
345 /* Setup the Tx ring entries */ 345 /* Setup the Tx ring entries */
346 for (i = 0; i <= TX_RING_SIZE; i++) { 346 for (i = 0; i < TX_RING_SIZE; i++) {
347 leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); 347 leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
348 ib->btx_ring [i].tmd0 = leptr; 348 ib->btx_ring [i].tmd0 = leptr;
349 ib->btx_ring [i].tmd1_hadr = leptr >> 16; 349 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
@@ -399,7 +399,7 @@ static void lance_init_ring_pio(struct net_device *dev)
399 sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); 399 sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
400 400
401 /* Setup the Tx ring entries */ 401 /* Setup the Tx ring entries */
402 for (i = 0; i <= TX_RING_SIZE; i++) { 402 for (i = 0; i < TX_RING_SIZE; i++) {
403 leptr = libbuff_offset(tx_buf, i); 403 leptr = libbuff_offset(tx_buf, i);
404 sbus_writew(leptr, &ib->btx_ring [i].tmd0); 404 sbus_writew(leptr, &ib->btx_ring [i].tmd0);
405 sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); 405 sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4595962fb8e1..b080f9493d83 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -2237,8 +2237,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2237 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 2237 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2238 if (phyid != TG3_PHY_ID_BCMAC131) { 2238 if (phyid != TG3_PHY_ID_BCMAC131) {
2239 phyid &= TG3_PHY_OUI_MASK; 2239 phyid &= TG3_PHY_OUI_MASK;
2240 if (phyid == TG3_PHY_OUI_1 && 2240 if (phyid == TG3_PHY_OUI_1 ||
2241 phyid == TG3_PHY_OUI_2 && 2241 phyid == TG3_PHY_OUI_2 ||
2242 phyid == TG3_PHY_OUI_3) 2242 phyid == TG3_PHY_OUI_3)
2243 do_low_power = true; 2243 do_low_power = true;
2244 } 2244 }
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e009481c606c..396f821b5ff0 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1451,6 +1451,14 @@ static const struct usb_device_id products [] = {
1451 // Cables-to-Go USB Ethernet Adapter 1451 // Cables-to-Go USB Ethernet Adapter
1452 USB_DEVICE(0x0b95, 0x772a), 1452 USB_DEVICE(0x0b95, 0x772a),
1453 .driver_info = (unsigned long) &ax88772_info, 1453 .driver_info = (unsigned long) &ax88772_info,
1454}, {
1455 // ABOCOM for pci
1456 USB_DEVICE(0x14ea, 0xab11),
1457 .driver_info = (unsigned long) &ax88178_info,
1458}, {
1459 // ASIX 88772a
1460 USB_DEVICE(0x0db0, 0xa877),
1461 .driver_info = (unsigned long) &ax88772_info,
1454}, 1462},
1455 { }, // END 1463 { }, // END
1456}; 1464};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 0e061dfea78d..55e8ecc3a9e5 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -559,6 +559,11 @@ static const struct usb_device_id products [] = {
559 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 559 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
560 USB_CDC_PROTO_NONE), 560 USB_CDC_PROTO_NONE),
561 .driver_info = (unsigned long) &cdc_info, 561 .driver_info = (unsigned long) &cdc_info,
562}, {
563 /* Ericsson F3507g */
564 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
565 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
566 .driver_info = (unsigned long) &cdc_info,
562}, 567},
563 { }, // END 568 { }, // END
564}; 569};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aa3149078888..c32284ff3f54 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -723,8 +723,8 @@ u32 usbnet_get_link (struct net_device *net)
723 if (dev->mii.mdio_read) 723 if (dev->mii.mdio_read)
724 return mii_link_ok(&dev->mii); 724 return mii_link_ok(&dev->mii);
725 725
726 /* Otherwise, say we're up (to avoid breaking scripts) */ 726 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
727 return 1; 727 return ethtool_op_get_link(net);
728} 728}
729EXPORT_SYMBOL_GPL(usbnet_get_link); 729EXPORT_SYMBOL_GPL(usbnet_get_link);
730 730
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index e24f7b3ace4b..04882c8f9bf1 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -341,6 +341,11 @@ static const struct usb_device_id products [] = {
341 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, 341 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
342 USB_CDC_PROTO_NONE), 342 USB_CDC_PROTO_NONE),
343 .driver_info = (unsigned long) &bogus_mdlm_info, 343 .driver_info = (unsigned long) &bogus_mdlm_info,
344}, {
345 /* Motorola MOTOMAGX phones */
346 USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
347 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
348 .driver_info = (unsigned long) &bogus_mdlm_info,
344}, 349},
345 350
346/* Olympus has some models with a Zaurus-compatible option. 351/* Olympus has some models with a Zaurus-compatible option.
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 852d0e7c4e62..124fe75b8a8a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -239,6 +239,16 @@ static int veth_open(struct net_device *dev)
239 return 0; 239 return 0;
240} 240}
241 241
242static int veth_close(struct net_device *dev)
243{
244 struct veth_priv *priv = netdev_priv(dev);
245
246 netif_carrier_off(dev);
247 netif_carrier_off(priv->peer);
248
249 return 0;
250}
251
242static int veth_dev_init(struct net_device *dev) 252static int veth_dev_init(struct net_device *dev)
243{ 253{
244 struct veth_net_stats *stats; 254 struct veth_net_stats *stats;
@@ -263,10 +273,12 @@ static void veth_dev_free(struct net_device *dev)
263} 273}
264 274
265static const struct net_device_ops veth_netdev_ops = { 275static const struct net_device_ops veth_netdev_ops = {
266 .ndo_init = veth_dev_init, 276 .ndo_init = veth_dev_init,
267 .ndo_open = veth_open, 277 .ndo_open = veth_open,
268 .ndo_start_xmit = veth_xmit, 278 .ndo_stop = veth_close,
269 .ndo_get_stats = veth_get_stats, 279 .ndo_start_xmit = veth_xmit,
280 .ndo_get_stats = veth_get_stats,
281 .ndo_set_mac_address = eth_mac_addr,
270}; 282};
271 283
272static void veth_setup(struct net_device *dev) 284static void veth_setup(struct net_device *dev)
@@ -279,44 +291,6 @@ static void veth_setup(struct net_device *dev)
279 dev->destructor = veth_dev_free; 291 dev->destructor = veth_dev_free;
280} 292}
281 293
282static void veth_change_state(struct net_device *dev)
283{
284 struct net_device *peer;
285 struct veth_priv *priv;
286
287 priv = netdev_priv(dev);
288 peer = priv->peer;
289
290 if (netif_carrier_ok(peer)) {
291 if (!netif_carrier_ok(dev))
292 netif_carrier_on(dev);
293 } else {
294 if (netif_carrier_ok(dev))
295 netif_carrier_off(dev);
296 }
297}
298
299static int veth_device_event(struct notifier_block *unused,
300 unsigned long event, void *ptr)
301{
302 struct net_device *dev = ptr;
303
304 if (dev->netdev_ops->ndo_open != veth_open)
305 goto out;
306
307 switch (event) {
308 case NETDEV_CHANGE:
309 veth_change_state(dev);
310 break;
311 }
312out:
313 return NOTIFY_DONE;
314}
315
316static struct notifier_block veth_notifier_block __read_mostly = {
317 .notifier_call = veth_device_event,
318};
319
320/* 294/*
321 * netlink interface 295 * netlink interface
322 */ 296 */
@@ -467,14 +441,12 @@ static struct rtnl_link_ops veth_link_ops = {
467 441
468static __init int veth_init(void) 442static __init int veth_init(void)
469{ 443{
470 register_netdevice_notifier(&veth_notifier_block);
471 return rtnl_link_register(&veth_link_ops); 444 return rtnl_link_register(&veth_link_ops);
472} 445}
473 446
474static __exit void veth_exit(void) 447static __exit void veth_exit(void)
475{ 448{
476 rtnl_link_unregister(&veth_link_ops); 449 rtnl_link_unregister(&veth_link_ops);
477 unregister_netdevice_notifier(&veth_notifier_block);
478} 450}
479 451
480module_init(veth_init); 452module_init(veth_init);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 067c871cc226..3b9d27ea2950 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -157,7 +157,7 @@ enum {
157 157
158 158
159/* Firmware version we request when pulling the fw image file */ 159/* Firmware version we request when pulling the fw image file */
160#define I2400M_FW_VERSION "1.3" 160#define I2400M_FW_VERSION "1.4"
161 161
162 162
163/** 163/**
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 727f067aca4f..0e80990d8e84 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -1538,6 +1538,7 @@ bad2:
1538bad: 1538bad:
1539 if (ah) 1539 if (ah)
1540 ath9k_hw_detach(ah); 1540 ath9k_hw_detach(ah);
1541 ath9k_exit_debug(sc);
1541 1542
1542 return error; 1543 return error;
1543} 1544}
@@ -1545,7 +1546,7 @@ bad:
1545static int ath_attach(u16 devid, struct ath_softc *sc) 1546static int ath_attach(u16 devid, struct ath_softc *sc)
1546{ 1547{
1547 struct ieee80211_hw *hw = sc->hw; 1548 struct ieee80211_hw *hw = sc->hw;
1548 int error = 0; 1549 int error = 0, i;
1549 1550
1550 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n"); 1551 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1551 1552
@@ -1589,11 +1590,11 @@ static int ath_attach(u16 devid, struct ath_softc *sc)
1589 /* initialize tx/rx engine */ 1590 /* initialize tx/rx engine */
1590 error = ath_tx_init(sc, ATH_TXBUF); 1591 error = ath_tx_init(sc, ATH_TXBUF);
1591 if (error != 0) 1592 if (error != 0)
1592 goto detach; 1593 goto error_attach;
1593 1594
1594 error = ath_rx_init(sc, ATH_RXBUF); 1595 error = ath_rx_init(sc, ATH_RXBUF);
1595 if (error != 0) 1596 if (error != 0)
1596 goto detach; 1597 goto error_attach;
1597 1598
1598#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1599#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1599 /* Initialze h/w Rfkill */ 1600 /* Initialze h/w Rfkill */
@@ -1601,8 +1602,9 @@ static int ath_attach(u16 devid, struct ath_softc *sc)
1601 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll); 1602 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1602 1603
1603 /* Initialize s/w rfkill */ 1604 /* Initialize s/w rfkill */
1604 if (ath_init_sw_rfkill(sc)) 1605 error = ath_init_sw_rfkill(sc);
1605 goto detach; 1606 if (error)
1607 goto error_attach;
1606#endif 1608#endif
1607 1609
1608 error = ieee80211_register_hw(hw); 1610 error = ieee80211_register_hw(hw);
@@ -1611,8 +1613,16 @@ static int ath_attach(u16 devid, struct ath_softc *sc)
1611 ath_init_leds(sc); 1613 ath_init_leds(sc);
1612 1614
1613 return 0; 1615 return 0;
1614detach: 1616
1615 ath_detach(sc); 1617error_attach:
1618 /* cleanup tx queues */
1619 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1620 if (ATH_TXQ_SETUP(sc, i))
1621 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1622
1623 ath9k_hw_detach(sc->sc_ah);
1624 ath9k_exit_debug(sc);
1625
1616 return error; 1626 return error;
1617} 1627}
1618 1628
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b0ee86c62685..ab13ff22a8c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -148,7 +148,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
148 pci_unmap_single(dev, 148 pci_unmap_single(dev,
149 pci_unmap_addr(&txq->cmd[index]->meta, mapping), 149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
150 pci_unmap_len(&txq->cmd[index]->meta, len), 150 pci_unmap_len(&txq->cmd[index]->meta, len),
151 PCI_DMA_TODEVICE); 151 PCI_DMA_BIDIRECTIONAL);
152 152
153 /* Unmap chunks, if any. */ 153 /* Unmap chunks, if any. */
154 for (i = 1; i < num_tbs; i++) { 154 for (i = 1; i < num_tbs; i++) {
@@ -964,7 +964,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
964 * within command buffer array. */ 964 * within command buffer array. */
965 txcmd_phys = pci_map_single(priv->pci_dev, 965 txcmd_phys = pci_map_single(priv->pci_dev,
966 out_cmd, sizeof(struct iwl_cmd), 966 out_cmd, sizeof(struct iwl_cmd),
967 PCI_DMA_TODEVICE); 967 PCI_DMA_BIDIRECTIONAL);
968 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); 968 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
969 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); 969 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
970 /* Add buffer containing Tx command and MAC(!) header to TFD's 970 /* Add buffer containing Tx command and MAC(!) header to TFD's
@@ -1115,7 +1115,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1115 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); 1115 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1116 1116
1117 phys_addr = pci_map_single(priv->pci_dev, out_cmd, 1117 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
1118 len, PCI_DMA_TODEVICE); 1118 len, PCI_DMA_BIDIRECTIONAL);
1119 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); 1119 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1120 pci_unmap_len_set(&out_cmd->meta, len, len); 1120 pci_unmap_len_set(&out_cmd->meta, len, len);
1121 phys_addr += offsetof(struct iwl_cmd, hdr); 1121 phys_addr += offsetof(struct iwl_cmd, hdr);
@@ -1212,7 +1212,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1212 pci_unmap_single(priv->pci_dev, 1212 pci_unmap_single(priv->pci_dev,
1213 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), 1213 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
1214 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), 1214 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
1215 PCI_DMA_TODEVICE); 1215 PCI_DMA_BIDIRECTIONAL);
1216 1216
1217 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 1217 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 61d2f50470c8..b118a35ec605 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -23,7 +23,7 @@ static const char * mesh_stat_strings[]= {
23static void lbs_ethtool_get_drvinfo(struct net_device *dev, 23static void lbs_ethtool_get_drvinfo(struct net_device *dev,
24 struct ethtool_drvinfo *info) 24 struct ethtool_drvinfo *info)
25{ 25{
26 struct lbs_private *priv = netdev_priv(dev); 26 struct lbs_private *priv = dev->ml_priv;
27 27
28 snprintf(info->fw_version, 32, "%u.%u.%u.p%u", 28 snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
29 priv->fwrelease >> 24 & 0xff, 29 priv->fwrelease >> 24 & 0xff,
@@ -47,7 +47,7 @@ static int lbs_ethtool_get_eeprom_len(struct net_device *dev)
47static int lbs_ethtool_get_eeprom(struct net_device *dev, 47static int lbs_ethtool_get_eeprom(struct net_device *dev,
48 struct ethtool_eeprom *eeprom, u8 * bytes) 48 struct ethtool_eeprom *eeprom, u8 * bytes)
49{ 49{
50 struct lbs_private *priv = netdev_priv(dev); 50 struct lbs_private *priv = dev->ml_priv;
51 struct cmd_ds_802_11_eeprom_access cmd; 51 struct cmd_ds_802_11_eeprom_access cmd;
52 int ret; 52 int ret;
53 53
@@ -76,7 +76,7 @@ out:
76static void lbs_ethtool_get_stats(struct net_device *dev, 76static void lbs_ethtool_get_stats(struct net_device *dev,
77 struct ethtool_stats *stats, uint64_t *data) 77 struct ethtool_stats *stats, uint64_t *data)
78{ 78{
79 struct lbs_private *priv = netdev_priv(dev); 79 struct lbs_private *priv = dev->ml_priv;
80 struct cmd_ds_mesh_access mesh_access; 80 struct cmd_ds_mesh_access mesh_access;
81 int ret; 81 int ret;
82 82
@@ -113,7 +113,7 @@ static void lbs_ethtool_get_stats(struct net_device *dev,
113 113
114static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset) 114static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset)
115{ 115{
116 struct lbs_private *priv = netdev_priv(dev); 116 struct lbs_private *priv = dev->ml_priv;
117 117
118 if (sset == ETH_SS_STATS && dev == priv->mesh_dev) 118 if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
119 return MESH_STATS_NUM; 119 return MESH_STATS_NUM;
@@ -143,7 +143,7 @@ static void lbs_ethtool_get_strings(struct net_device *dev,
143static void lbs_ethtool_get_wol(struct net_device *dev, 143static void lbs_ethtool_get_wol(struct net_device *dev,
144 struct ethtool_wolinfo *wol) 144 struct ethtool_wolinfo *wol)
145{ 145{
146 struct lbs_private *priv = netdev_priv(dev); 146 struct lbs_private *priv = dev->ml_priv;
147 147
148 if (priv->wol_criteria == 0xffffffff) { 148 if (priv->wol_criteria == 0xffffffff) {
149 /* Interface driver didn't configure wake */ 149 /* Interface driver didn't configure wake */
@@ -166,7 +166,7 @@ static void lbs_ethtool_get_wol(struct net_device *dev,
166static int lbs_ethtool_set_wol(struct net_device *dev, 166static int lbs_ethtool_set_wol(struct net_device *dev,
167 struct ethtool_wolinfo *wol) 167 struct ethtool_wolinfo *wol)
168{ 168{
169 struct lbs_private *priv = netdev_priv(dev); 169 struct lbs_private *priv = dev->ml_priv;
170 uint32_t criteria = 0; 170 uint32_t criteria = 0;
171 171
172 if (priv->wol_criteria == 0xffffffff && wol->wolopts) 172 if (priv->wol_criteria == 0xffffffff && wol->wolopts)
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 2fc637ad85c7..ea3dc038be76 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -59,7 +59,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
59static ssize_t if_usb_firmware_set(struct device *dev, 59static ssize_t if_usb_firmware_set(struct device *dev,
60 struct device_attribute *attr, const char *buf, size_t count) 60 struct device_attribute *attr, const char *buf, size_t count)
61{ 61{
62 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 62 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
63 struct if_usb_card *cardp = priv->card; 63 struct if_usb_card *cardp = priv->card;
64 char fwname[FIRMWARE_NAME_MAX]; 64 char fwname[FIRMWARE_NAME_MAX];
65 int ret; 65 int ret;
@@ -86,7 +86,7 @@ static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
86static ssize_t if_usb_boot2_set(struct device *dev, 86static ssize_t if_usb_boot2_set(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count) 87 struct device_attribute *attr, const char *buf, size_t count)
88{ 88{
89 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 89 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
90 struct if_usb_card *cardp = priv->card; 90 struct if_usb_card *cardp = priv->card;
91 char fwname[FIRMWARE_NAME_MAX]; 91 char fwname[FIRMWARE_NAME_MAX];
92 int ret; 92 int ret;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4e0007d20030..f76623e0ff9a 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -222,7 +222,7 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
222static ssize_t lbs_anycast_get(struct device *dev, 222static ssize_t lbs_anycast_get(struct device *dev,
223 struct device_attribute *attr, char * buf) 223 struct device_attribute *attr, char * buf)
224{ 224{
225 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 225 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
226 struct cmd_ds_mesh_access mesh_access; 226 struct cmd_ds_mesh_access mesh_access;
227 int ret; 227 int ret;
228 228
@@ -241,7 +241,7 @@ static ssize_t lbs_anycast_get(struct device *dev,
241static ssize_t lbs_anycast_set(struct device *dev, 241static ssize_t lbs_anycast_set(struct device *dev,
242 struct device_attribute *attr, const char * buf, size_t count) 242 struct device_attribute *attr, const char * buf, size_t count)
243{ 243{
244 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 244 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
245 struct cmd_ds_mesh_access mesh_access; 245 struct cmd_ds_mesh_access mesh_access;
246 uint32_t datum; 246 uint32_t datum;
247 int ret; 247 int ret;
@@ -263,7 +263,7 @@ static ssize_t lbs_anycast_set(struct device *dev,
263static ssize_t lbs_prb_rsp_limit_get(struct device *dev, 263static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
264 struct device_attribute *attr, char *buf) 264 struct device_attribute *attr, char *buf)
265{ 265{
266 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 266 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
267 struct cmd_ds_mesh_access mesh_access; 267 struct cmd_ds_mesh_access mesh_access;
268 int ret; 268 int ret;
269 u32 retry_limit; 269 u32 retry_limit;
@@ -286,7 +286,7 @@ static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
286static ssize_t lbs_prb_rsp_limit_set(struct device *dev, 286static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
287 struct device_attribute *attr, const char *buf, size_t count) 287 struct device_attribute *attr, const char *buf, size_t count)
288{ 288{
289 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 289 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
290 struct cmd_ds_mesh_access mesh_access; 290 struct cmd_ds_mesh_access mesh_access;
291 int ret; 291 int ret;
292 unsigned long retry_limit; 292 unsigned long retry_limit;
@@ -321,7 +321,7 @@ static void lbs_remove_mesh(struct lbs_private *priv);
321static ssize_t lbs_rtap_get(struct device *dev, 321static ssize_t lbs_rtap_get(struct device *dev,
322 struct device_attribute *attr, char * buf) 322 struct device_attribute *attr, char * buf)
323{ 323{
324 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 324 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
325 return snprintf(buf, 5, "0x%X\n", priv->monitormode); 325 return snprintf(buf, 5, "0x%X\n", priv->monitormode);
326} 326}
327 327
@@ -332,7 +332,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
332 struct device_attribute *attr, const char * buf, size_t count) 332 struct device_attribute *attr, const char * buf, size_t count)
333{ 333{
334 int monitor_mode; 334 int monitor_mode;
335 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 335 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
336 336
337 sscanf(buf, "%x", &monitor_mode); 337 sscanf(buf, "%x", &monitor_mode);
338 if (monitor_mode) { 338 if (monitor_mode) {
@@ -383,7 +383,7 @@ static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set );
383static ssize_t lbs_mesh_get(struct device *dev, 383static ssize_t lbs_mesh_get(struct device *dev,
384 struct device_attribute *attr, char * buf) 384 struct device_attribute *attr, char * buf)
385{ 385{
386 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 386 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
387 return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev); 387 return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
388} 388}
389 389
@@ -393,7 +393,7 @@ static ssize_t lbs_mesh_get(struct device *dev,
393static ssize_t lbs_mesh_set(struct device *dev, 393static ssize_t lbs_mesh_set(struct device *dev,
394 struct device_attribute *attr, const char * buf, size_t count) 394 struct device_attribute *attr, const char * buf, size_t count)
395{ 395{
396 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 396 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
397 int enable; 397 int enable;
398 int ret, action = CMD_ACT_MESH_CONFIG_STOP; 398 int ret, action = CMD_ACT_MESH_CONFIG_STOP;
399 399
@@ -452,7 +452,7 @@ static struct attribute_group lbs_mesh_attr_group = {
452 */ 452 */
453static int lbs_dev_open(struct net_device *dev) 453static int lbs_dev_open(struct net_device *dev)
454{ 454{
455 struct lbs_private *priv = netdev_priv(dev) ; 455 struct lbs_private *priv = dev->ml_priv;
456 int ret = 0; 456 int ret = 0;
457 457
458 lbs_deb_enter(LBS_DEB_NET); 458 lbs_deb_enter(LBS_DEB_NET);
@@ -521,7 +521,7 @@ static int lbs_mesh_stop(struct net_device *dev)
521 */ 521 */
522static int lbs_eth_stop(struct net_device *dev) 522static int lbs_eth_stop(struct net_device *dev)
523{ 523{
524 struct lbs_private *priv = netdev_priv(dev); 524 struct lbs_private *priv = dev->ml_priv;
525 525
526 lbs_deb_enter(LBS_DEB_NET); 526 lbs_deb_enter(LBS_DEB_NET);
527 527
@@ -538,7 +538,7 @@ static int lbs_eth_stop(struct net_device *dev)
538 538
539static void lbs_tx_timeout(struct net_device *dev) 539static void lbs_tx_timeout(struct net_device *dev)
540{ 540{
541 struct lbs_private *priv = netdev_priv(dev); 541 struct lbs_private *priv = dev->ml_priv;
542 542
543 lbs_deb_enter(LBS_DEB_TX); 543 lbs_deb_enter(LBS_DEB_TX);
544 544
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(lbs_host_to_card_done);
590 */ 590 */
591static struct net_device_stats *lbs_get_stats(struct net_device *dev) 591static struct net_device_stats *lbs_get_stats(struct net_device *dev)
592{ 592{
593 struct lbs_private *priv = netdev_priv(dev); 593 struct lbs_private *priv = dev->ml_priv;
594 594
595 lbs_deb_enter(LBS_DEB_NET); 595 lbs_deb_enter(LBS_DEB_NET);
596 return &priv->stats; 596 return &priv->stats;
@@ -599,7 +599,7 @@ static struct net_device_stats *lbs_get_stats(struct net_device *dev)
599static int lbs_set_mac_address(struct net_device *dev, void *addr) 599static int lbs_set_mac_address(struct net_device *dev, void *addr)
600{ 600{
601 int ret = 0; 601 int ret = 0;
602 struct lbs_private *priv = netdev_priv(dev); 602 struct lbs_private *priv = dev->ml_priv;
603 struct sockaddr *phwaddr = addr; 603 struct sockaddr *phwaddr = addr;
604 struct cmd_ds_802_11_mac_address cmd; 604 struct cmd_ds_802_11_mac_address cmd;
605 605
@@ -732,7 +732,7 @@ static void lbs_set_mcast_worker(struct work_struct *work)
732 732
733static void lbs_set_multicast_list(struct net_device *dev) 733static void lbs_set_multicast_list(struct net_device *dev)
734{ 734{
735 struct lbs_private *priv = netdev_priv(dev); 735 struct lbs_private *priv = dev->ml_priv;
736 736
737 schedule_work(&priv->mcast_work); 737 schedule_work(&priv->mcast_work);
738} 738}
@@ -748,7 +748,7 @@ static void lbs_set_multicast_list(struct net_device *dev)
748static int lbs_thread(void *data) 748static int lbs_thread(void *data)
749{ 749{
750 struct net_device *dev = data; 750 struct net_device *dev = data;
751 struct lbs_private *priv = netdev_priv(dev); 751 struct lbs_private *priv = dev->ml_priv;
752 wait_queue_t wait; 752 wait_queue_t wait;
753 753
754 lbs_deb_enter(LBS_DEB_THREAD); 754 lbs_deb_enter(LBS_DEB_THREAD);
@@ -1184,6 +1184,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1184 goto done; 1184 goto done;
1185 } 1185 }
1186 priv = netdev_priv(dev); 1186 priv = netdev_priv(dev);
1187 dev->ml_priv = priv;
1187 1188
1188 if (lbs_init_adapter(priv)) { 1189 if (lbs_init_adapter(priv)) {
1189 lbs_pr_err("failed to initialize adapter structure.\n"); 1190 lbs_pr_err("failed to initialize adapter structure.\n");
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index d42b7a5a1b3f..18fe29faf99b 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -18,7 +18,7 @@
18static int mesh_get_default_parameters(struct device *dev, 18static int mesh_get_default_parameters(struct device *dev,
19 struct mrvl_mesh_defaults *defs) 19 struct mrvl_mesh_defaults *defs)
20{ 20{
21 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 21 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
22 struct cmd_ds_mesh_config cmd; 22 struct cmd_ds_mesh_config cmd;
23 int ret; 23 int ret;
24 24
@@ -57,7 +57,7 @@ static ssize_t bootflag_get(struct device *dev,
57static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr, 57static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
58 const char *buf, size_t count) 58 const char *buf, size_t count)
59{ 59{
60 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 60 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
61 struct cmd_ds_mesh_config cmd; 61 struct cmd_ds_mesh_config cmd;
62 uint32_t datum; 62 uint32_t datum;
63 int ret; 63 int ret;
@@ -100,7 +100,7 @@ static ssize_t boottime_get(struct device *dev,
100static ssize_t boottime_set(struct device *dev, 100static ssize_t boottime_set(struct device *dev,
101 struct device_attribute *attr, const char *buf, size_t count) 101 struct device_attribute *attr, const char *buf, size_t count)
102{ 102{
103 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 103 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
104 struct cmd_ds_mesh_config cmd; 104 struct cmd_ds_mesh_config cmd;
105 uint32_t datum; 105 uint32_t datum;
106 int ret; 106 int ret;
@@ -152,7 +152,7 @@ static ssize_t channel_get(struct device *dev,
152static ssize_t channel_set(struct device *dev, struct device_attribute *attr, 152static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
153 const char *buf, size_t count) 153 const char *buf, size_t count)
154{ 154{
155 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 155 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
156 struct cmd_ds_mesh_config cmd; 156 struct cmd_ds_mesh_config cmd;
157 uint32_t datum; 157 uint32_t datum;
158 int ret; 158 int ret;
@@ -210,7 +210,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
210 struct cmd_ds_mesh_config cmd; 210 struct cmd_ds_mesh_config cmd;
211 struct mrvl_mesh_defaults defs; 211 struct mrvl_mesh_defaults defs;
212 struct mrvl_meshie *ie; 212 struct mrvl_meshie *ie;
213 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 213 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
214 int len; 214 int len;
215 int ret; 215 int ret;
216 216
@@ -269,7 +269,7 @@ static ssize_t protocol_id_set(struct device *dev,
269 struct cmd_ds_mesh_config cmd; 269 struct cmd_ds_mesh_config cmd;
270 struct mrvl_mesh_defaults defs; 270 struct mrvl_mesh_defaults defs;
271 struct mrvl_meshie *ie; 271 struct mrvl_meshie *ie;
272 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 272 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
273 uint32_t datum; 273 uint32_t datum;
274 int ret; 274 int ret;
275 275
@@ -323,7 +323,7 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
323 struct cmd_ds_mesh_config cmd; 323 struct cmd_ds_mesh_config cmd;
324 struct mrvl_mesh_defaults defs; 324 struct mrvl_mesh_defaults defs;
325 struct mrvl_meshie *ie; 325 struct mrvl_meshie *ie;
326 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 326 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
327 uint32_t datum; 327 uint32_t datum;
328 int ret; 328 int ret;
329 329
@@ -377,7 +377,7 @@ static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
377 struct cmd_ds_mesh_config cmd; 377 struct cmd_ds_mesh_config cmd;
378 struct mrvl_mesh_defaults defs; 378 struct mrvl_mesh_defaults defs;
379 struct mrvl_meshie *ie; 379 struct mrvl_meshie *ie;
380 struct lbs_private *priv = netdev_priv(to_net_dev(dev)); 380 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
381 uint32_t datum; 381 uint32_t datum;
382 int ret; 382 int ret;
383 383
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 57f6c12cda20..9014950f4328 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -945,7 +945,7 @@ int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
945 union iwreq_data *wrqu, char *extra) 945 union iwreq_data *wrqu, char *extra)
946{ 946{
947 DECLARE_SSID_BUF(ssid); 947 DECLARE_SSID_BUF(ssid);
948 struct lbs_private *priv = netdev_priv(dev); 948 struct lbs_private *priv = dev->ml_priv;
949 int ret = 0; 949 int ret = 0;
950 950
951 lbs_deb_enter(LBS_DEB_WEXT); 951 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1008,7 +1008,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1008 struct iw_point *dwrq, char *extra) 1008 struct iw_point *dwrq, char *extra)
1009{ 1009{
1010#define SCAN_ITEM_SIZE 128 1010#define SCAN_ITEM_SIZE 128
1011 struct lbs_private *priv = netdev_priv(dev); 1011 struct lbs_private *priv = dev->ml_priv;
1012 int err = 0; 1012 int err = 0;
1013 char *ev = extra; 1013 char *ev = extra;
1014 char *stop = ev + dwrq->length; 1014 char *stop = ev + dwrq->length;
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index dac462641170..68bec31ae03b 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -60,7 +60,7 @@ static u32 convert_radiotap_rate_to_mv(u8 rate)
60int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 60int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
61{ 61{
62 unsigned long flags; 62 unsigned long flags;
63 struct lbs_private *priv = netdev_priv(dev); 63 struct lbs_private *priv = dev->ml_priv;
64 struct txpd *txpd; 64 struct txpd *txpd;
65 char *p802x_hdr; 65 char *p802x_hdr;
66 uint16_t pkt_len; 66 uint16_t pkt_len;
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index c6102e08179e..f16d136ab4bb 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -163,7 +163,7 @@ static int lbs_get_name(struct net_device *dev, struct iw_request_info *info,
163static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info, 163static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
164 struct iw_freq *fwrq, char *extra) 164 struct iw_freq *fwrq, char *extra)
165{ 165{
166 struct lbs_private *priv = netdev_priv(dev); 166 struct lbs_private *priv = dev->ml_priv;
167 struct chan_freq_power *cfp; 167 struct chan_freq_power *cfp;
168 168
169 lbs_deb_enter(LBS_DEB_WEXT); 169 lbs_deb_enter(LBS_DEB_WEXT);
@@ -189,7 +189,7 @@ static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
189static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info, 189static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
190 struct sockaddr *awrq, char *extra) 190 struct sockaddr *awrq, char *extra)
191{ 191{
192 struct lbs_private *priv = netdev_priv(dev); 192 struct lbs_private *priv = dev->ml_priv;
193 193
194 lbs_deb_enter(LBS_DEB_WEXT); 194 lbs_deb_enter(LBS_DEB_WEXT);
195 195
@@ -207,7 +207,7 @@ static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
207static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info, 207static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
208 struct iw_point *dwrq, char *extra) 208 struct iw_point *dwrq, char *extra)
209{ 209{
210 struct lbs_private *priv = netdev_priv(dev); 210 struct lbs_private *priv = dev->ml_priv;
211 211
212 lbs_deb_enter(LBS_DEB_WEXT); 212 lbs_deb_enter(LBS_DEB_WEXT);
213 213
@@ -231,7 +231,7 @@ static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
231static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info, 231static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
232 struct iw_point *dwrq, char *extra) 232 struct iw_point *dwrq, char *extra)
233{ 233{
234 struct lbs_private *priv = netdev_priv(dev); 234 struct lbs_private *priv = dev->ml_priv;
235 235
236 lbs_deb_enter(LBS_DEB_WEXT); 236 lbs_deb_enter(LBS_DEB_WEXT);
237 237
@@ -248,7 +248,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
248static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, 248static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
249 struct iw_point *dwrq, char *extra) 249 struct iw_point *dwrq, char *extra)
250{ 250{
251 struct lbs_private *priv = netdev_priv(dev); 251 struct lbs_private *priv = dev->ml_priv;
252 252
253 lbs_deb_enter(LBS_DEB_WEXT); 253 lbs_deb_enter(LBS_DEB_WEXT);
254 254
@@ -273,7 +273,7 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
273 struct iw_param *vwrq, char *extra) 273 struct iw_param *vwrq, char *extra)
274{ 274{
275 int ret = 0; 275 int ret = 0;
276 struct lbs_private *priv = netdev_priv(dev); 276 struct lbs_private *priv = dev->ml_priv;
277 u32 val = vwrq->value; 277 u32 val = vwrq->value;
278 278
279 lbs_deb_enter(LBS_DEB_WEXT); 279 lbs_deb_enter(LBS_DEB_WEXT);
@@ -293,7 +293,7 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
293static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info, 293static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
294 struct iw_param *vwrq, char *extra) 294 struct iw_param *vwrq, char *extra)
295{ 295{
296 struct lbs_private *priv = netdev_priv(dev); 296 struct lbs_private *priv = dev->ml_priv;
297 int ret = 0; 297 int ret = 0;
298 u16 val = 0; 298 u16 val = 0;
299 299
@@ -315,7 +315,7 @@ out:
315static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info, 315static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
316 struct iw_param *vwrq, char *extra) 316 struct iw_param *vwrq, char *extra)
317{ 317{
318 struct lbs_private *priv = netdev_priv(dev); 318 struct lbs_private *priv = dev->ml_priv;
319 int ret = 0; 319 int ret = 0;
320 u32 val = vwrq->value; 320 u32 val = vwrq->value;
321 321
@@ -336,7 +336,7 @@ static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
336static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info, 336static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
337 struct iw_param *vwrq, char *extra) 337 struct iw_param *vwrq, char *extra)
338{ 338{
339 struct lbs_private *priv = netdev_priv(dev); 339 struct lbs_private *priv = dev->ml_priv;
340 int ret = 0; 340 int ret = 0;
341 u16 val = 0; 341 u16 val = 0;
342 342
@@ -359,7 +359,7 @@ out:
359static int lbs_get_mode(struct net_device *dev, 359static int lbs_get_mode(struct net_device *dev,
360 struct iw_request_info *info, u32 * uwrq, char *extra) 360 struct iw_request_info *info, u32 * uwrq, char *extra)
361{ 361{
362 struct lbs_private *priv = netdev_priv(dev); 362 struct lbs_private *priv = dev->ml_priv;
363 363
364 lbs_deb_enter(LBS_DEB_WEXT); 364 lbs_deb_enter(LBS_DEB_WEXT);
365 365
@@ -385,7 +385,7 @@ static int lbs_get_txpow(struct net_device *dev,
385 struct iw_request_info *info, 385 struct iw_request_info *info,
386 struct iw_param *vwrq, char *extra) 386 struct iw_param *vwrq, char *extra)
387{ 387{
388 struct lbs_private *priv = netdev_priv(dev); 388 struct lbs_private *priv = dev->ml_priv;
389 s16 curlevel = 0; 389 s16 curlevel = 0;
390 int ret = 0; 390 int ret = 0;
391 391
@@ -418,7 +418,7 @@ out:
418static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info, 418static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
419 struct iw_param *vwrq, char *extra) 419 struct iw_param *vwrq, char *extra)
420{ 420{
421 struct lbs_private *priv = netdev_priv(dev); 421 struct lbs_private *priv = dev->ml_priv;
422 int ret = 0; 422 int ret = 0;
423 u16 slimit = 0, llimit = 0; 423 u16 slimit = 0, llimit = 0;
424 424
@@ -466,7 +466,7 @@ out:
466static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info, 466static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
467 struct iw_param *vwrq, char *extra) 467 struct iw_param *vwrq, char *extra)
468{ 468{
469 struct lbs_private *priv = netdev_priv(dev); 469 struct lbs_private *priv = dev->ml_priv;
470 int ret = 0; 470 int ret = 0;
471 u16 val = 0; 471 u16 val = 0;
472 472
@@ -542,7 +542,7 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
542 struct iw_point *dwrq, char *extra) 542 struct iw_point *dwrq, char *extra)
543{ 543{
544 int i, j; 544 int i, j;
545 struct lbs_private *priv = netdev_priv(dev); 545 struct lbs_private *priv = dev->ml_priv;
546 struct iw_range *range = (struct iw_range *)extra; 546 struct iw_range *range = (struct iw_range *)extra;
547 struct chan_freq_power *cfp; 547 struct chan_freq_power *cfp;
548 u8 rates[MAX_RATES + 1]; 548 u8 rates[MAX_RATES + 1];
@@ -708,7 +708,7 @@ out:
708static int lbs_set_power(struct net_device *dev, struct iw_request_info *info, 708static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
709 struct iw_param *vwrq, char *extra) 709 struct iw_param *vwrq, char *extra)
710{ 710{
711 struct lbs_private *priv = netdev_priv(dev); 711 struct lbs_private *priv = dev->ml_priv;
712 712
713 lbs_deb_enter(LBS_DEB_WEXT); 713 lbs_deb_enter(LBS_DEB_WEXT);
714 714
@@ -758,7 +758,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
758static int lbs_get_power(struct net_device *dev, struct iw_request_info *info, 758static int lbs_get_power(struct net_device *dev, struct iw_request_info *info,
759 struct iw_param *vwrq, char *extra) 759 struct iw_param *vwrq, char *extra)
760{ 760{
761 struct lbs_private *priv = netdev_priv(dev); 761 struct lbs_private *priv = dev->ml_priv;
762 762
763 lbs_deb_enter(LBS_DEB_WEXT); 763 lbs_deb_enter(LBS_DEB_WEXT);
764 764
@@ -781,7 +781,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
781 EXCELLENT = 95, 781 EXCELLENT = 95,
782 PERFECT = 100 782 PERFECT = 100
783 }; 783 };
784 struct lbs_private *priv = netdev_priv(dev); 784 struct lbs_private *priv = dev->ml_priv;
785 u32 rssi_qual; 785 u32 rssi_qual;
786 u32 tx_qual; 786 u32 tx_qual;
787 u32 quality = 0; 787 u32 quality = 0;
@@ -886,7 +886,7 @@ static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info,
886 struct iw_freq *fwrq, char *extra) 886 struct iw_freq *fwrq, char *extra)
887{ 887{
888 int ret = -EINVAL; 888 int ret = -EINVAL;
889 struct lbs_private *priv = netdev_priv(dev); 889 struct lbs_private *priv = dev->ml_priv;
890 struct chan_freq_power *cfp; 890 struct chan_freq_power *cfp;
891 struct assoc_request * assoc_req; 891 struct assoc_request * assoc_req;
892 892
@@ -943,7 +943,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
943 struct iw_request_info *info, 943 struct iw_request_info *info,
944 struct iw_freq *fwrq, char *extra) 944 struct iw_freq *fwrq, char *extra)
945{ 945{
946 struct lbs_private *priv = netdev_priv(dev); 946 struct lbs_private *priv = dev->ml_priv;
947 struct chan_freq_power *cfp; 947 struct chan_freq_power *cfp;
948 int ret = -EINVAL; 948 int ret = -EINVAL;
949 949
@@ -994,7 +994,7 @@ out:
994static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, 994static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
995 struct iw_param *vwrq, char *extra) 995 struct iw_param *vwrq, char *extra)
996{ 996{
997 struct lbs_private *priv = netdev_priv(dev); 997 struct lbs_private *priv = dev->ml_priv;
998 u8 new_rate = 0; 998 u8 new_rate = 0;
999 int ret = -EINVAL; 999 int ret = -EINVAL;
1000 u8 rates[MAX_RATES + 1]; 1000 u8 rates[MAX_RATES + 1];
@@ -1054,7 +1054,7 @@ out:
1054static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info, 1054static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
1055 struct iw_param *vwrq, char *extra) 1055 struct iw_param *vwrq, char *extra)
1056{ 1056{
1057 struct lbs_private *priv = netdev_priv(dev); 1057 struct lbs_private *priv = dev->ml_priv;
1058 1058
1059 lbs_deb_enter(LBS_DEB_WEXT); 1059 lbs_deb_enter(LBS_DEB_WEXT);
1060 1060
@@ -1079,7 +1079,7 @@ static int lbs_set_mode(struct net_device *dev,
1079 struct iw_request_info *info, u32 * uwrq, char *extra) 1079 struct iw_request_info *info, u32 * uwrq, char *extra)
1080{ 1080{
1081 int ret = 0; 1081 int ret = 0;
1082 struct lbs_private *priv = netdev_priv(dev); 1082 struct lbs_private *priv = dev->ml_priv;
1083 struct assoc_request * assoc_req; 1083 struct assoc_request * assoc_req;
1084 1084
1085 lbs_deb_enter(LBS_DEB_WEXT); 1085 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1124,7 +1124,7 @@ static int lbs_get_encode(struct net_device *dev,
1124 struct iw_request_info *info, 1124 struct iw_request_info *info,
1125 struct iw_point *dwrq, u8 * extra) 1125 struct iw_point *dwrq, u8 * extra)
1126{ 1126{
1127 struct lbs_private *priv = netdev_priv(dev); 1127 struct lbs_private *priv = dev->ml_priv;
1128 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1128 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1129 1129
1130 lbs_deb_enter(LBS_DEB_WEXT); 1130 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1319,7 +1319,7 @@ static int lbs_set_encode(struct net_device *dev,
1319 struct iw_point *dwrq, char *extra) 1319 struct iw_point *dwrq, char *extra)
1320{ 1320{
1321 int ret = 0; 1321 int ret = 0;
1322 struct lbs_private *priv = netdev_priv(dev); 1322 struct lbs_private *priv = dev->ml_priv;
1323 struct assoc_request * assoc_req; 1323 struct assoc_request * assoc_req;
1324 u16 is_default = 0, index = 0, set_tx_key = 0; 1324 u16 is_default = 0, index = 0, set_tx_key = 0;
1325 1325
@@ -1395,7 +1395,7 @@ static int lbs_get_encodeext(struct net_device *dev,
1395 char *extra) 1395 char *extra)
1396{ 1396{
1397 int ret = -EINVAL; 1397 int ret = -EINVAL;
1398 struct lbs_private *priv = netdev_priv(dev); 1398 struct lbs_private *priv = dev->ml_priv;
1399 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1399 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1400 int index, max_key_len; 1400 int index, max_key_len;
1401 1401
@@ -1501,7 +1501,7 @@ static int lbs_set_encodeext(struct net_device *dev,
1501 char *extra) 1501 char *extra)
1502{ 1502{
1503 int ret = 0; 1503 int ret = 0;
1504 struct lbs_private *priv = netdev_priv(dev); 1504 struct lbs_private *priv = dev->ml_priv;
1505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1506 int alg = ext->alg; 1506 int alg = ext->alg;
1507 struct assoc_request * assoc_req; 1507 struct assoc_request * assoc_req;
@@ -1639,7 +1639,7 @@ static int lbs_set_genie(struct net_device *dev,
1639 struct iw_point *dwrq, 1639 struct iw_point *dwrq,
1640 char *extra) 1640 char *extra)
1641{ 1641{
1642 struct lbs_private *priv = netdev_priv(dev); 1642 struct lbs_private *priv = dev->ml_priv;
1643 int ret = 0; 1643 int ret = 0;
1644 struct assoc_request * assoc_req; 1644 struct assoc_request * assoc_req;
1645 1645
@@ -1685,7 +1685,7 @@ static int lbs_get_genie(struct net_device *dev,
1685 char *extra) 1685 char *extra)
1686{ 1686{
1687 int ret = 0; 1687 int ret = 0;
1688 struct lbs_private *priv = netdev_priv(dev); 1688 struct lbs_private *priv = dev->ml_priv;
1689 1689
1690 lbs_deb_enter(LBS_DEB_WEXT); 1690 lbs_deb_enter(LBS_DEB_WEXT);
1691 1691
@@ -1713,7 +1713,7 @@ static int lbs_set_auth(struct net_device *dev,
1713 struct iw_param *dwrq, 1713 struct iw_param *dwrq,
1714 char *extra) 1714 char *extra)
1715{ 1715{
1716 struct lbs_private *priv = netdev_priv(dev); 1716 struct lbs_private *priv = dev->ml_priv;
1717 struct assoc_request * assoc_req; 1717 struct assoc_request * assoc_req;
1718 int ret = 0; 1718 int ret = 0;
1719 int updated = 0; 1719 int updated = 0;
@@ -1816,7 +1816,7 @@ static int lbs_get_auth(struct net_device *dev,
1816 char *extra) 1816 char *extra)
1817{ 1817{
1818 int ret = 0; 1818 int ret = 0;
1819 struct lbs_private *priv = netdev_priv(dev); 1819 struct lbs_private *priv = dev->ml_priv;
1820 1820
1821 lbs_deb_enter(LBS_DEB_WEXT); 1821 lbs_deb_enter(LBS_DEB_WEXT);
1822 1822
@@ -1857,7 +1857,7 @@ static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
1857 struct iw_param *vwrq, char *extra) 1857 struct iw_param *vwrq, char *extra)
1858{ 1858{
1859 int ret = 0; 1859 int ret = 0;
1860 struct lbs_private *priv = netdev_priv(dev); 1860 struct lbs_private *priv = dev->ml_priv;
1861 s16 dbm = (s16) vwrq->value; 1861 s16 dbm = (s16) vwrq->value;
1862 1862
1863 lbs_deb_enter(LBS_DEB_WEXT); 1863 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1936,7 +1936,7 @@ out:
1936static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, 1936static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
1937 struct iw_point *dwrq, char *extra) 1937 struct iw_point *dwrq, char *extra)
1938{ 1938{
1939 struct lbs_private *priv = netdev_priv(dev); 1939 struct lbs_private *priv = dev->ml_priv;
1940 1940
1941 lbs_deb_enter(LBS_DEB_WEXT); 1941 lbs_deb_enter(LBS_DEB_WEXT);
1942 1942
@@ -1971,7 +1971,7 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
1971static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info, 1971static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1972 struct iw_point *dwrq, char *extra) 1972 struct iw_point *dwrq, char *extra)
1973{ 1973{
1974 struct lbs_private *priv = netdev_priv(dev); 1974 struct lbs_private *priv = dev->ml_priv;
1975 int ret = 0; 1975 int ret = 0;
1976 u8 ssid[IW_ESSID_MAX_SIZE]; 1976 u8 ssid[IW_ESSID_MAX_SIZE];
1977 u8 ssid_len = 0; 1977 u8 ssid_len = 0;
@@ -2040,7 +2040,7 @@ static int lbs_mesh_get_essid(struct net_device *dev,
2040 struct iw_request_info *info, 2040 struct iw_request_info *info,
2041 struct iw_point *dwrq, char *extra) 2041 struct iw_point *dwrq, char *extra)
2042{ 2042{
2043 struct lbs_private *priv = netdev_priv(dev); 2043 struct lbs_private *priv = dev->ml_priv;
2044 2044
2045 lbs_deb_enter(LBS_DEB_WEXT); 2045 lbs_deb_enter(LBS_DEB_WEXT);
2046 2046
@@ -2058,7 +2058,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2058 struct iw_request_info *info, 2058 struct iw_request_info *info,
2059 struct iw_point *dwrq, char *extra) 2059 struct iw_point *dwrq, char *extra)
2060{ 2060{
2061 struct lbs_private *priv = netdev_priv(dev); 2061 struct lbs_private *priv = dev->ml_priv;
2062 int ret = 0; 2062 int ret = 0;
2063 2063
2064 lbs_deb_enter(LBS_DEB_WEXT); 2064 lbs_deb_enter(LBS_DEB_WEXT);
@@ -2102,7 +2102,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2102static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info, 2102static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2103 struct sockaddr *awrq, char *extra) 2103 struct sockaddr *awrq, char *extra)
2104{ 2104{
2105 struct lbs_private *priv = netdev_priv(dev); 2105 struct lbs_private *priv = dev->ml_priv;
2106 struct assoc_request * assoc_req; 2106 struct assoc_request * assoc_req;
2107 int ret = 0; 2107 int ret = 0;
2108 2108
diff --git a/drivers/net/wireless/orinoco/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
index 45a04faa7818..067d1a9c728b 100644
--- a/drivers/net/wireless/orinoco/orinoco.c
+++ b/drivers/net/wireless/orinoco/orinoco.c
@@ -3157,8 +3157,20 @@ static int orinoco_pm_notifier(struct notifier_block *notifier,
3157 3157
3158 return NOTIFY_DONE; 3158 return NOTIFY_DONE;
3159} 3159}
3160
3161static void orinoco_register_pm_notifier(struct orinoco_private *priv)
3162{
3163 priv->pm_notifier.notifier_call = orinoco_pm_notifier;
3164 register_pm_notifier(&priv->pm_notifier);
3165}
3166
3167static void orinoco_unregister_pm_notifier(struct orinoco_private *priv)
3168{
3169 unregister_pm_notifier(&priv->pm_notifier);
3170}
3160#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */ 3171#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
3161#define orinoco_pm_notifier NULL 3172#define orinoco_register_pm_notifier(priv) do { } while(0)
3173#define orinoco_unregister_pm_notifier(priv) do { } while(0)
3162#endif 3174#endif
3163 3175
3164/********************************************************************/ 3176/********************************************************************/
@@ -3648,8 +3660,7 @@ struct net_device
3648 priv->cached_fw = NULL; 3660 priv->cached_fw = NULL;
3649 3661
3650 /* Register PM notifiers */ 3662 /* Register PM notifiers */
3651 priv->pm_notifier.notifier_call = orinoco_pm_notifier; 3663 orinoco_register_pm_notifier(priv);
3652 register_pm_notifier(&priv->pm_notifier);
3653 3664
3654 return dev; 3665 return dev;
3655} 3666}
@@ -3673,7 +3684,7 @@ void free_orinocodev(struct net_device *dev)
3673 kfree(rx_data); 3684 kfree(rx_data);
3674 } 3685 }
3675 3686
3676 unregister_pm_notifier(&priv->pm_notifier); 3687 orinoco_unregister_pm_notifier(priv);
3677 orinoco_uncache_fw(priv); 3688 orinoco_uncache_fw(priv);
3678 3689
3679 priv->wpa_ie_len = 0; 3690 priv->wpa_ie_len = 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 22bc07ef2f37..f4747a1134ba 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -48,6 +48,10 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
48 {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B}, 48 {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B},
49 {USB_DEVICE(0x0bda, 0x8197), .driver_info = DEVICE_RTL8187B}, 49 {USB_DEVICE(0x0bda, 0x8197), .driver_info = DEVICE_RTL8187B},
50 {USB_DEVICE(0x0bda, 0x8198), .driver_info = DEVICE_RTL8187B}, 50 {USB_DEVICE(0x0bda, 0x8198), .driver_info = DEVICE_RTL8187B},
51 /* Surecom */
52 {USB_DEVICE(0x0769, 0x11F2), .driver_info = DEVICE_RTL8187},
53 /* Logitech */
54 {USB_DEVICE(0x0789, 0x010C), .driver_info = DEVICE_RTL8187},
51 /* Netgear */ 55 /* Netgear */
52 {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, 56 {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187},
53 {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, 57 {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187},
@@ -57,8 +61,16 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
57 /* Sitecom */ 61 /* Sitecom */
58 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 62 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
59 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, 63 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
64 /* Sphairon Access Systems GmbH */
65 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
66 /* Dick Smith Electronics */
67 {USB_DEVICE(0x1371, 0x9401), .driver_info = DEVICE_RTL8187},
60 /* Abocom */ 68 /* Abocom */
61 {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187}, 69 {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187},
70 /* Qcom */
71 {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187},
72 /* AirLive */
73 {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187},
62 {} 74 {}
63}; 75};
64 76
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 519f5f91e765..5f333403c2ea 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -332,6 +332,14 @@ parse_dmar_table(void)
332 entry_header = (struct acpi_dmar_header *)(dmar + 1); 332 entry_header = (struct acpi_dmar_header *)(dmar + 1);
333 while (((unsigned long)entry_header) < 333 while (((unsigned long)entry_header) <
334 (((unsigned long)dmar) + dmar_tbl->length)) { 334 (((unsigned long)dmar) + dmar_tbl->length)) {
335 /* Avoid looping forever on bad ACPI tables */
336 if (entry_header->length == 0) {
337 printk(KERN_WARNING PREFIX
338 "Invalid 0-length structure\n");
339 ret = -EINVAL;
340 break;
341 }
342
335 dmar_table_print_dmar_entry(entry_header); 343 dmar_table_print_dmar_entry(entry_header);
336 344
337 switch (entry_header->type) { 345 switch (entry_header->type) {
@@ -494,7 +502,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
494 int map_size; 502 int map_size;
495 u32 ver; 503 u32 ver;
496 static int iommu_allocated = 0; 504 static int iommu_allocated = 0;
497 int agaw; 505 int agaw = 0;
498 506
499 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 507 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
500 if (!iommu) 508 if (!iommu)
@@ -510,6 +518,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
510 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); 518 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
511 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 519 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
512 520
521#ifdef CONFIG_DMAR
513 agaw = iommu_calculate_agaw(iommu); 522 agaw = iommu_calculate_agaw(iommu);
514 if (agaw < 0) { 523 if (agaw < 0) {
515 printk(KERN_ERR 524 printk(KERN_ERR
@@ -517,6 +526,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
517 iommu->seq_id); 526 iommu->seq_id);
518 goto error; 527 goto error;
519 } 528 }
529#endif
520 iommu->agaw = agaw; 530 iommu->agaw = agaw;
521 531
522 /* the registers might be more than one page */ 532 /* the registers might be more than one page */
@@ -574,19 +584,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
574 } 584 }
575} 585}
576 586
587static int qi_check_fault(struct intel_iommu *iommu, int index)
588{
589 u32 fault;
590 int head;
591 struct q_inval *qi = iommu->qi;
592 int wait_index = (index + 1) % QI_LENGTH;
593
594 fault = readl(iommu->reg + DMAR_FSTS_REG);
595
596 /*
597 * If IQE happens, the head points to the descriptor associated
598 * with the error. No new descriptors are fetched until the IQE
599 * is cleared.
600 */
601 if (fault & DMA_FSTS_IQE) {
602 head = readl(iommu->reg + DMAR_IQH_REG);
603 if ((head >> 4) == index) {
604 memcpy(&qi->desc[index], &qi->desc[wait_index],
605 sizeof(struct qi_desc));
606 __iommu_flush_cache(iommu, &qi->desc[index],
607 sizeof(struct qi_desc));
608 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
609 return -EINVAL;
610 }
611 }
612
613 return 0;
614}
615
577/* 616/*
578 * Submit the queued invalidation descriptor to the remapping 617 * Submit the queued invalidation descriptor to the remapping
579 * hardware unit and wait for its completion. 618 * hardware unit and wait for its completion.
580 */ 619 */
581void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 620int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
582{ 621{
622 int rc = 0;
583 struct q_inval *qi = iommu->qi; 623 struct q_inval *qi = iommu->qi;
584 struct qi_desc *hw, wait_desc; 624 struct qi_desc *hw, wait_desc;
585 int wait_index, index; 625 int wait_index, index;
586 unsigned long flags; 626 unsigned long flags;
587 627
588 if (!qi) 628 if (!qi)
589 return; 629 return 0;
590 630
591 hw = qi->desc; 631 hw = qi->desc;
592 632
@@ -604,7 +644,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
604 644
605 hw[index] = *desc; 645 hw[index] = *desc;
606 646
607 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; 647 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
648 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
608 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); 649 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
609 650
610 hw[wait_index] = wait_desc; 651 hw[wait_index] = wait_desc;
@@ -615,13 +656,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
615 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 656 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
616 qi->free_cnt -= 2; 657 qi->free_cnt -= 2;
617 658
618 spin_lock(&iommu->register_lock);
619 /* 659 /*
620 * update the HW tail register indicating the presence of 660 * update the HW tail register indicating the presence of
621 * new descriptors. 661 * new descriptors.
622 */ 662 */
623 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 663 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
624 spin_unlock(&iommu->register_lock);
625 664
626 while (qi->desc_status[wait_index] != QI_DONE) { 665 while (qi->desc_status[wait_index] != QI_DONE) {
627 /* 666 /*
@@ -631,15 +670,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
631 * a deadlock where the interrupt context can wait indefinitely 670 * a deadlock where the interrupt context can wait indefinitely
632 * for free slots in the queue. 671 * for free slots in the queue.
633 */ 672 */
673 rc = qi_check_fault(iommu, index);
674 if (rc)
675 goto out;
676
634 spin_unlock(&qi->q_lock); 677 spin_unlock(&qi->q_lock);
635 cpu_relax(); 678 cpu_relax();
636 spin_lock(&qi->q_lock); 679 spin_lock(&qi->q_lock);
637 } 680 }
638 681out:
639 qi->desc_status[index] = QI_DONE; 682 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
640 683
641 reclaim_free_desc(qi); 684 reclaim_free_desc(qi);
642 spin_unlock_irqrestore(&qi->q_lock, flags); 685 spin_unlock_irqrestore(&qi->q_lock, flags);
686
687 return rc;
643} 688}
644 689
645/* 690/*
@@ -652,13 +697,13 @@ void qi_global_iec(struct intel_iommu *iommu)
652 desc.low = QI_IEC_TYPE; 697 desc.low = QI_IEC_TYPE;
653 desc.high = 0; 698 desc.high = 0;
654 699
700 /* should never fail */
655 qi_submit_sync(&desc, iommu); 701 qi_submit_sync(&desc, iommu);
656} 702}
657 703
658int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 704int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
659 u64 type, int non_present_entry_flush) 705 u64 type, int non_present_entry_flush)
660{ 706{
661
662 struct qi_desc desc; 707 struct qi_desc desc;
663 708
664 if (non_present_entry_flush) { 709 if (non_present_entry_flush) {
@@ -672,10 +717,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
672 | QI_CC_GRAN(type) | QI_CC_TYPE; 717 | QI_CC_GRAN(type) | QI_CC_TYPE;
673 desc.high = 0; 718 desc.high = 0;
674 719
675 qi_submit_sync(&desc, iommu); 720 return qi_submit_sync(&desc, iommu);
676
677 return 0;
678
679} 721}
680 722
681int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 723int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -705,10 +747,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
705 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 747 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
706 | QI_IOTLB_AM(size_order); 748 | QI_IOTLB_AM(size_order);
707 749
708 qi_submit_sync(&desc, iommu); 750 return qi_submit_sync(&desc, iommu);
709
710 return 0;
711
712} 751}
713 752
714/* 753/*
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index db85284ffb62..39ae37589fda 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -111,6 +111,7 @@ struct controller {
111 int cmd_busy; 111 int cmd_busy;
112 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 113 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1;
114}; 115};
115 116
116#define INT_BUTTON_IGNORE 0 117#define INT_BUTTON_IGNORE 0
@@ -170,6 +171,7 @@ extern int pciehp_configure_device(struct slot *p_slot);
170extern int pciehp_unconfigure_device(struct slot *p_slot); 171extern int pciehp_unconfigure_device(struct slot *p_slot);
171extern void pciehp_queue_pushbutton_work(struct work_struct *work); 172extern void pciehp_queue_pushbutton_work(struct work_struct *work);
172struct controller *pcie_init(struct pcie_device *dev); 173struct controller *pcie_init(struct pcie_device *dev);
174int pcie_init_notification(struct controller *ctrl);
173int pciehp_enable_slot(struct slot *p_slot); 175int pciehp_enable_slot(struct slot *p_slot);
174int pciehp_disable_slot(struct slot *p_slot); 176int pciehp_disable_slot(struct slot *p_slot);
175int pcie_enable_notification(struct controller *ctrl); 177int pcie_enable_notification(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index c2485542f543..681e3912b821 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -434,6 +434,13 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
434 goto err_out_release_ctlr; 434 goto err_out_release_ctlr;
435 } 435 }
436 436
437 /* Enable events after we have setup the data structures */
438 rc = pcie_init_notification(ctrl);
439 if (rc) {
440 ctrl_err(ctrl, "Notification initialization failed\n");
441 goto err_out_release_ctlr;
442 }
443
437 /* Check if slot is occupied */ 444 /* Check if slot is occupied */
438 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 445 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
439 t_slot->hpc_ops->get_adapter_status(t_slot, &value); 446 t_slot->hpc_ops->get_adapter_status(t_slot, &value);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 71a8012886b0..7a16c6897bb9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -934,7 +934,7 @@ static void pcie_disable_notification(struct controller *ctrl)
934 ctrl_warn(ctrl, "Cannot disable software notification\n"); 934 ctrl_warn(ctrl, "Cannot disable software notification\n");
935} 935}
936 936
937static int pcie_init_notification(struct controller *ctrl) 937int pcie_init_notification(struct controller *ctrl)
938{ 938{
939 if (pciehp_request_irq(ctrl)) 939 if (pciehp_request_irq(ctrl))
940 return -1; 940 return -1;
@@ -942,13 +942,17 @@ static int pcie_init_notification(struct controller *ctrl)
942 pciehp_free_irq(ctrl); 942 pciehp_free_irq(ctrl);
943 return -1; 943 return -1;
944 } 944 }
945 ctrl->notification_enabled = 1;
945 return 0; 946 return 0;
946} 947}
947 948
948static void pcie_shutdown_notification(struct controller *ctrl) 949static void pcie_shutdown_notification(struct controller *ctrl)
949{ 950{
950 pcie_disable_notification(ctrl); 951 if (ctrl->notification_enabled) {
951 pciehp_free_irq(ctrl); 952 pcie_disable_notification(ctrl);
953 pciehp_free_irq(ctrl);
954 ctrl->notification_enabled = 0;
955 }
952} 956}
953 957
954static int pcie_init_slot(struct controller *ctrl) 958static int pcie_init_slot(struct controller *ctrl)
@@ -1110,13 +1114,8 @@ struct controller *pcie_init(struct pcie_device *dev)
1110 if (pcie_init_slot(ctrl)) 1114 if (pcie_init_slot(ctrl))
1111 goto abort_ctrl; 1115 goto abort_ctrl;
1112 1116
1113 if (pcie_init_notification(ctrl))
1114 goto abort_slot;
1115
1116 return ctrl; 1117 return ctrl;
1117 1118
1118abort_slot:
1119 pcie_cleanup_slot(ctrl);
1120abort_ctrl: 1119abort_ctrl:
1121 kfree(ctrl); 1120 kfree(ctrl);
1122abort: 1121abort:
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 5a57753ea9fc..8e44db040db7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -208,7 +208,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
208 return index; 208 return index;
209} 209}
210 210
211static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 211static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
212{ 212{
213 struct qi_desc desc; 213 struct qi_desc desc;
214 214
@@ -216,7 +216,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
216 | QI_IEC_SELECTIVE; 216 | QI_IEC_SELECTIVE;
217 desc.high = 0; 217 desc.high = 0;
218 218
219 qi_submit_sync(&desc, iommu); 219 return qi_submit_sync(&desc, iommu);
220} 220}
221 221
222int map_irq_to_irte_handle(int irq, u16 *sub_handle) 222int map_irq_to_irte_handle(int irq, u16 *sub_handle)
@@ -284,6 +284,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
284 284
285int modify_irte(int irq, struct irte *irte_modified) 285int modify_irte(int irq, struct irte *irte_modified)
286{ 286{
287 int rc;
287 int index; 288 int index;
288 struct irte *irte; 289 struct irte *irte;
289 struct intel_iommu *iommu; 290 struct intel_iommu *iommu;
@@ -304,14 +305,15 @@ int modify_irte(int irq, struct irte *irte_modified)
304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 305 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
305 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 306 __iommu_flush_cache(iommu, irte, sizeof(*irte));
306 307
307 qi_flush_iec(iommu, index, 0); 308 rc = qi_flush_iec(iommu, index, 0);
308
309 spin_unlock(&irq_2_ir_lock); 309 spin_unlock(&irq_2_ir_lock);
310 return 0; 310
311 return rc;
311} 312}
312 313
313int flush_irte(int irq) 314int flush_irte(int irq)
314{ 315{
316 int rc;
315 int index; 317 int index;
316 struct intel_iommu *iommu; 318 struct intel_iommu *iommu;
317 struct irq_2_iommu *irq_iommu; 319 struct irq_2_iommu *irq_iommu;
@@ -327,10 +329,10 @@ int flush_irte(int irq)
327 329
328 index = irq_iommu->irte_index + irq_iommu->sub_handle; 330 index = irq_iommu->irte_index + irq_iommu->sub_handle;
329 331
330 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 332 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
331 spin_unlock(&irq_2_ir_lock); 333 spin_unlock(&irq_2_ir_lock);
332 334
333 return 0; 335 return rc;
334} 336}
335 337
336struct intel_iommu *map_ioapic_to_ir(int apic) 338struct intel_iommu *map_ioapic_to_ir(int apic)
@@ -356,6 +358,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
356 358
357int free_irte(int irq) 359int free_irte(int irq)
358{ 360{
361 int rc = 0;
359 int index, i; 362 int index, i;
360 struct irte *irte; 363 struct irte *irte;
361 struct intel_iommu *iommu; 364 struct intel_iommu *iommu;
@@ -376,7 +379,7 @@ int free_irte(int irq)
376 if (!irq_iommu->sub_handle) { 379 if (!irq_iommu->sub_handle) {
377 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 380 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
378 set_64bit((unsigned long *)irte, 0); 381 set_64bit((unsigned long *)irte, 0);
379 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 382 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
380 } 383 }
381 384
382 irq_iommu->iommu = NULL; 385 irq_iommu->iommu = NULL;
@@ -386,7 +389,7 @@ int free_irte(int irq)
386 389
387 spin_unlock(&irq_2_ir_lock); 390 spin_unlock(&irq_2_ir_lock);
388 391
389 return 0; 392 return rc;
390} 393}
391 394
392static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 395static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aac7006949f1..d0c973685868 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -108,6 +108,34 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
108} 108}
109#endif /* 0 */ 109#endif /* 0 */
110 110
111
112static void set_device_error_reporting(struct pci_dev *dev, void *data)
113{
114 bool enable = *((bool *)data);
115
116 if (dev->pcie_type != PCIE_RC_PORT &&
117 dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
118 dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
119 return;
120
121 if (enable)
122 pci_enable_pcie_error_reporting(dev);
123 else
124 pci_disable_pcie_error_reporting(dev);
125}
126
127/**
128 * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
129 * @dev: pointer to root port's pci_dev data structure
130 * @enable: true = enable error reporting, false = disable error reporting.
131 */
132static void set_downstream_devices_error_reporting(struct pci_dev *dev,
133 bool enable)
134{
135 set_device_error_reporting(dev, &enable);
136 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
137}
138
111static int find_device_iter(struct device *device, void *data) 139static int find_device_iter(struct device *device, void *data)
112{ 140{
113 struct pci_dev *dev; 141 struct pci_dev *dev;
@@ -525,15 +553,11 @@ void aer_enable_rootport(struct aer_rpc *rpc)
525 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32); 553 pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
526 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); 554 pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
527 555
528 /* Enable Root Port device reporting error itself */ 556 /*
529 pci_read_config_word(pdev, pos+PCI_EXP_DEVCTL, &reg16); 557 * Enable error reporting for the root port device and downstream port
530 reg16 = reg16 | 558 * devices.
531 PCI_EXP_DEVCTL_CERE | 559 */
532 PCI_EXP_DEVCTL_NFERE | 560 set_downstream_devices_error_reporting(pdev, true);
533 PCI_EXP_DEVCTL_FERE |
534 PCI_EXP_DEVCTL_URRE;
535 pci_write_config_word(pdev, pos+PCI_EXP_DEVCTL,
536 reg16);
537 561
538 /* Enable Root Port's interrupt in response to error messages */ 562 /* Enable Root Port's interrupt in response to error messages */
539 pci_write_config_dword(pdev, 563 pci_write_config_dword(pdev,
@@ -553,6 +577,12 @@ static void disable_root_aer(struct aer_rpc *rpc)
553 u32 reg32; 577 u32 reg32;
554 int pos; 578 int pos;
555 579
580 /*
581 * Disable error reporting for the root port device and downstream port
582 * devices.
583 */
584 set_downstream_devices_error_reporting(pdev, false);
585
556 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 586 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
557 /* Disable Root's interrupt in response to error messages */ 587 /* Disable Root's interrupt in response to error messages */
558 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); 588 pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9b874eaeb9f..248b4db91552 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -97,8 +97,6 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
97 97
98 pcie_portdrv_save_config(dev); 98 pcie_portdrv_save_config(dev);
99 99
100 pci_enable_pcie_error_reporting(dev);
101
102 return 0; 100 return 0;
103} 101}
104 102
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index baad093aafe3..f20d55368edb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1584,6 +1584,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_
1584 */ 1584 */
1585#define AMD_813X_MISC 0x40 1585#define AMD_813X_MISC 0x40
1586#define AMD_813X_NOIOAMODE (1<<0) 1586#define AMD_813X_NOIOAMODE (1<<0)
1587#define AMD_813X_REV_B2 0x13
1587 1588
1588static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) 1589static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1589{ 1590{
@@ -1591,6 +1592,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1591 1592
1592 if (noioapicquirk) 1593 if (noioapicquirk)
1593 return; 1594 return;
1595 if (dev->revision == AMD_813X_REV_B2)
1596 return;
1594 1597
1595 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); 1598 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
1596 pci_config_dword &= ~AMD_813X_NOIOAMODE; 1599 pci_config_dword &= ~AMD_813X_NOIOAMODE;
@@ -1981,7 +1984,6 @@ static void __devinit quirk_msi_ht_cap(struct pci_dev *dev)
1981DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, 1984DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
1982 quirk_msi_ht_cap); 1985 quirk_msi_ht_cap);
1983 1986
1984
1985/* The nVidia CK804 chipset may have 2 HT MSI mappings. 1987/* The nVidia CK804 chipset may have 2 HT MSI mappings.
1986 * MSI are supported if the MSI capability set in any of these mappings. 1988 * MSI are supported if the MSI capability set in any of these mappings.
1987 */ 1989 */
@@ -2032,6 +2034,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2032 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, 2034 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2033 ht_enable_msi_mapping); 2035 ht_enable_msi_mapping);
2034 2036
2037DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2038 ht_enable_msi_mapping);
2039
2035/* The P5N32-SLI Premium motherboard from Asus has a problem with msi 2040/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
2036 * for the MCP55 NIC. It is not yet determined whether the msi problem 2041 * for the MCP55 NIC. It is not yet determined whether the msi problem
2037 * also affects other devices. As for now, turn off msi for this device. 2042 * also affects other devices. As for now, turn off msi for this device.
@@ -2048,10 +2053,100 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2048 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2053 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2049 nvenet_msi_disable); 2054 nvenet_msi_disable);
2050 2055
2051static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) 2056static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2052{ 2057{
2053 struct pci_dev *host_bridge; 2058 struct pci_dev *host_bridge;
2059 int pos;
2060 int i, dev_no;
2061 int found = 0;
2062
2063 dev_no = dev->devfn >> 3;
2064 for (i = dev_no; i >= 0; i--) {
2065 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2066 if (!host_bridge)
2067 continue;
2068
2069 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2070 if (pos != 0) {
2071 found = 1;
2072 break;
2073 }
2074 pci_dev_put(host_bridge);
2075 }
2076
2077 if (!found)
2078 return;
2079
2080 /* root did that ! */
2081 if (msi_ht_cap_enabled(host_bridge))
2082 goto out;
2083
2084 ht_enable_msi_mapping(dev);
2085
2086out:
2087 pci_dev_put(host_bridge);
2088}
2089
2090static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2091{
2092 int pos, ttl = 48;
2093
2094 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2095 while (pos && ttl--) {
2096 u8 flags;
2097
2098 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2099 &flags) == 0) {
2100 dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
2101
2102 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2103 flags & ~HT_MSI_FLAGS_ENABLE);
2104 }
2105 pos = pci_find_next_ht_capability(dev, pos,
2106 HT_CAPTYPE_MSI_MAPPING);
2107 }
2108}
2109
2110static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2111{
2054 int pos, ttl = 48; 2112 int pos, ttl = 48;
2113 int found = 0;
2114
2115 /* check if there is HT MSI cap or enabled on this device */
2116 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2117 while (pos && ttl--) {
2118 u8 flags;
2119
2120 if (found < 1)
2121 found = 1;
2122 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2123 &flags) == 0) {
2124 if (flags & HT_MSI_FLAGS_ENABLE) {
2125 if (found < 2) {
2126 found = 2;
2127 break;
2128 }
2129 }
2130 }
2131 pos = pci_find_next_ht_capability(dev, pos,
2132 HT_CAPTYPE_MSI_MAPPING);
2133 }
2134
2135 return found;
2136}
2137
2138static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2139{
2140 struct pci_dev *host_bridge;
2141 int pos;
2142 int found;
2143
2144 /* check if there is HT MSI cap or enabled on this device */
2145 found = ht_check_msi_mapping(dev);
2146
2147 /* no HT MSI CAP */
2148 if (found == 0)
2149 return;
2055 2150
2056 /* 2151 /*
2057 * HT MSI mapping should be disabled on devices that are below 2152 * HT MSI mapping should be disabled on devices that are below
@@ -2067,24 +2162,19 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2067 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); 2162 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2068 if (pos != 0) { 2163 if (pos != 0) {
2069 /* Host bridge is to HT */ 2164 /* Host bridge is to HT */
2070 ht_enable_msi_mapping(dev); 2165 if (found == 1) {
2166 /* it is not enabled, try to enable it */
2167 nv_ht_enable_msi_mapping(dev);
2168 }
2071 return; 2169 return;
2072 } 2170 }
2073 2171
2074 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2172 /* HT MSI is not enabled */
2075 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); 2173 if (found == 1)
2076 while (pos && ttl--) { 2174 return;
2077 u8 flags;
2078 2175
2079 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2176 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2080 &flags) == 0) { 2177 ht_disable_msi_mapping(dev);
2081 dev_info(&dev->dev, "Disabling HT MSI mapping");
2082 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2083 flags & ~HT_MSI_FLAGS_ENABLE);
2084 }
2085 pos = pci_find_next_ht_capability(dev, pos,
2086 HT_CAPTYPE_MSI_MAPPING);
2087 }
2088} 2178}
2089DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2179DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
2090DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2180DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk);
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index fde6e4c634e7..a7cf550b9cca 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -20,6 +20,7 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/skbuff.h>
23#include <scsi/libiscsi_tcp.h> 24#include <scsi/libiscsi_tcp.h>
24 25
25/* from cxgb3 LLD */ 26/* from cxgb3 LLD */
@@ -113,6 +114,26 @@ struct cxgb3i_endpoint {
113 struct cxgb3i_conn *cconn; 114 struct cxgb3i_conn *cconn;
114}; 115};
115 116
117/**
118 * struct cxgb3i_task_data - private iscsi task data
119 *
120 * @nr_frags: # of coalesced page frags (from scsi sgl)
121 * @frags: coalesced page frags (from scsi sgl)
122 * @skb: tx pdu skb
123 * @offset: data offset for the next pdu
124 * @count: max. possible pdu payload
125 * @sgoffset: offset to the first sg entry for a given offset
126 */
127#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
128struct cxgb3i_task_data {
129 unsigned short nr_frags;
130 skb_frag_t frags[MAX_PDU_FRAGS];
131 struct sk_buff *skb;
132 unsigned int offset;
133 unsigned int count;
134 unsigned int sgoffset;
135};
136
116int cxgb3i_iscsi_init(void); 137int cxgb3i_iscsi_init(void);
117void cxgb3i_iscsi_cleanup(void); 138void cxgb3i_iscsi_cleanup(void);
118 139
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 08f3a09d9233..a83d36e4926f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -639,10 +639,11 @@ static int ddp_init(struct t3cdev *tdev)
639 write_unlock(&cxgb3i_ddp_rwlock); 639 write_unlock(&cxgb3i_ddp_rwlock);
640 640
641 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x " 641 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
642 "pkt %u,%u.\n", 642 "pkt %u/%u, %u/%u.\n",
643 ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits, 643 ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
644 ddp->idx_mask, ddp->rsvd_tag_mask, 644 ddp->idx_mask, ddp->rsvd_tag_mask,
645 ddp->max_txsz, ddp->max_rxsz); 645 ddp->max_txsz, uinfo.max_txsz,
646 ddp->max_rxsz, uinfo.max_rxsz);
646 return 0; 647 return 0;
647 648
648free_ddp_map: 649free_ddp_map:
@@ -654,8 +655,8 @@ free_ddp_map:
654 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource 655 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
655 * @tdev: t3cdev adapter 656 * @tdev: t3cdev adapter
656 * @tformat: tag format 657 * @tformat: tag format
657 * @txsz: max tx pkt size, filled in by this func. 658 * @txsz: max tx pdu payload size, filled in by this func.
658 * @rxsz: max rx pkt size, filled in by this func. 659 * @rxsz: max rx pdu payload size, filled in by this func.
659 * initialize the ddp pagepod manager for a given adapter if needed and 660 * initialize the ddp pagepod manager for a given adapter if needed and
660 * setup the tag format for a given iscsi entity 661 * setup the tag format for a given iscsi entity
661 */ 662 */
@@ -685,10 +686,12 @@ int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
685 tformat->sw_bits, tformat->rsvd_bits, 686 tformat->sw_bits, tformat->rsvd_bits,
686 tformat->rsvd_shift, tformat->rsvd_mask); 687 tformat->rsvd_shift, tformat->rsvd_mask);
687 688
688 *txsz = ddp->max_txsz; 689 *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
689 *rxsz = ddp->max_rxsz; 690 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
690 ddp_log_info("ddp max pkt size: %u, %u.\n", 691 *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
691 ddp->max_txsz, ddp->max_rxsz); 692 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
693 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
694 *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
692 return 0; 695 return 0;
693} 696}
694EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init); 697EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 5c7c4d95c493..3faae7831c83 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -13,6 +13,8 @@
13#ifndef __CXGB3I_ULP2_DDP_H__ 13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__ 14#define __CXGB3I_ULP2_DDP_H__
15 15
16#include <linux/vmalloc.h>
17
16/** 18/**
17 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity 19 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
18 * 20 *
@@ -85,8 +87,9 @@ struct cxgb3i_ddp_info {
85 struct sk_buff **gl_skb; 87 struct sk_buff **gl_skb;
86}; 88};
87 89
90#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */
88#define ULP2_MAX_PKT_SIZE 16224 91#define ULP2_MAX_PKT_SIZE 16224
89#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX) 92#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
90#define PPOD_PAGES_MAX 4 93#define PPOD_PAGES_MAX 4
91#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ 94#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
92 95
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index 091ecb4d9f3d..1ce9f244e46c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -12,8 +12,8 @@
12#include "cxgb3i.h" 12#include "cxgb3i.h"
13 13
14#define DRV_MODULE_NAME "cxgb3i" 14#define DRV_MODULE_NAME "cxgb3i"
15#define DRV_MODULE_VERSION "1.0.0" 15#define DRV_MODULE_VERSION "1.0.1"
16#define DRV_MODULE_RELDATE "Jun. 1, 2008" 16#define DRV_MODULE_RELDATE "Jan. 2009"
17 17
18static char version[] = 18static char version[] =
19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME 19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index d83464b9b3f9..fa2a44f37b36 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -364,7 +364,8 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
364 364
365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, 365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
366 cmds_max, 366 cmds_max,
367 sizeof(struct iscsi_tcp_task), 367 sizeof(struct iscsi_tcp_task) +
368 sizeof(struct cxgb3i_task_data),
368 initial_cmdsn, ISCSI_MAX_TARGET); 369 initial_cmdsn, ISCSI_MAX_TARGET);
369 if (!cls_session) 370 if (!cls_session)
370 return NULL; 371 return NULL;
@@ -402,17 +403,15 @@ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
402{ 403{
403 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 404 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
404 struct cxgb3i_conn *cconn = tcp_conn->dd_data; 405 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
405 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 406 unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM);
406 cconn->hba->snic->tx_max_size -
407 ISCSI_PDU_NONPAYLOAD_MAX);
408 407
408 max = min(cconn->hba->snic->tx_max_size, max);
409 if (conn->max_xmit_dlength) 409 if (conn->max_xmit_dlength)
410 conn->max_xmit_dlength = min_t(unsigned int, 410 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
411 conn->max_xmit_dlength, max);
412 else 411 else
413 conn->max_xmit_dlength = max; 412 conn->max_xmit_dlength = max;
414 align_pdu_size(conn->max_xmit_dlength); 413 align_pdu_size(conn->max_xmit_dlength);
415 cxgb3i_log_info("conn 0x%p, max xmit %u.\n", 414 cxgb3i_api_debug("conn 0x%p, max xmit %u.\n",
416 conn, conn->max_xmit_dlength); 415 conn, conn->max_xmit_dlength);
417 return 0; 416 return 0;
418} 417}
@@ -427,9 +426,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
427{ 426{
428 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 427 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
429 struct cxgb3i_conn *cconn = tcp_conn->dd_data; 428 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
430 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 429 unsigned int max = cconn->hba->snic->rx_max_size;
431 cconn->hba->snic->rx_max_size -
432 ISCSI_PDU_NONPAYLOAD_MAX);
433 430
434 align_pdu_size(max); 431 align_pdu_size(max);
435 if (conn->max_recv_dlength) { 432 if (conn->max_recv_dlength) {
@@ -439,8 +436,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
439 conn->max_recv_dlength, max); 436 conn->max_recv_dlength, max);
440 return -EINVAL; 437 return -EINVAL;
441 } 438 }
442 conn->max_recv_dlength = min_t(unsigned int, 439 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
443 conn->max_recv_dlength, max);
444 align_pdu_size(conn->max_recv_dlength); 440 align_pdu_size(conn->max_recv_dlength);
445 } else 441 } else
446 conn->max_recv_dlength = max; 442 conn->max_recv_dlength = max;
@@ -844,7 +840,7 @@ static struct scsi_host_template cxgb3i_host_template = {
844 .proc_name = "cxgb3i", 840 .proc_name = "cxgb3i",
845 .queuecommand = iscsi_queuecommand, 841 .queuecommand = iscsi_queuecommand,
846 .change_queue_depth = iscsi_change_queue_depth, 842 .change_queue_depth = iscsi_change_queue_depth,
847 .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1), 843 .can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1,
848 .sg_tablesize = SG_ALL, 844 .sg_tablesize = SG_ALL,
849 .max_sectors = 0xFFFF, 845 .max_sectors = 0xFFFF,
850 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 846 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index a865f1fefe8b..de3b3b614cca 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -23,19 +23,19 @@
23#include "cxgb3i_ddp.h" 23#include "cxgb3i_ddp.h"
24 24
25#ifdef __DEBUG_C3CN_CONN__ 25#ifdef __DEBUG_C3CN_CONN__
26#define c3cn_conn_debug cxgb3i_log_info 26#define c3cn_conn_debug cxgb3i_log_debug
27#else 27#else
28#define c3cn_conn_debug(fmt...) 28#define c3cn_conn_debug(fmt...)
29#endif 29#endif
30 30
31#ifdef __DEBUG_C3CN_TX__ 31#ifdef __DEBUG_C3CN_TX__
32#define c3cn_tx_debug cxgb3i_log_debug 32#define c3cn_tx_debug cxgb3i_log_debug
33#else 33#else
34#define c3cn_tx_debug(fmt...) 34#define c3cn_tx_debug(fmt...)
35#endif 35#endif
36 36
37#ifdef __DEBUG_C3CN_RX__ 37#ifdef __DEBUG_C3CN_RX__
38#define c3cn_rx_debug cxgb3i_log_debug 38#define c3cn_rx_debug cxgb3i_log_debug
39#else 39#else
40#define c3cn_rx_debug(fmt...) 40#define c3cn_rx_debug(fmt...)
41#endif 41#endif
@@ -47,9 +47,9 @@ static int cxgb3_rcv_win = 256 * 1024;
47module_param(cxgb3_rcv_win, int, 0644); 47module_param(cxgb3_rcv_win, int, 0644);
48MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); 48MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
49 49
50static int cxgb3_snd_win = 64 * 1024; 50static int cxgb3_snd_win = 128 * 1024;
51module_param(cxgb3_snd_win, int, 0644); 51module_param(cxgb3_snd_win, int, 0644);
52MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)"); 52MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)");
53 53
54static int cxgb3_rx_credit_thres = 10 * 1024; 54static int cxgb3_rx_credit_thres = 10 * 1024;
55module_param(cxgb3_rx_credit_thres, int, 0644); 55module_param(cxgb3_rx_credit_thres, int, 0644);
@@ -301,8 +301,8 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
301static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, 301static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
302 int flags) 302 int flags)
303{ 303{
304 CXGB3_SKB_CB(skb)->seq = c3cn->write_seq; 304 skb_tcp_seq(skb) = c3cn->write_seq;
305 CXGB3_SKB_CB(skb)->flags = flags; 305 skb_flags(skb) = flags;
306 __skb_queue_tail(&c3cn->write_queue, skb); 306 __skb_queue_tail(&c3cn->write_queue, skb);
307} 307}
308 308
@@ -457,12 +457,9 @@ static unsigned int wrlen __read_mostly;
457 * The number of WRs needed for an skb depends on the number of fragments 457 * The number of WRs needed for an skb depends on the number of fragments
458 * in the skb and whether it has any payload in its main body. This maps the 458 * in the skb and whether it has any payload in its main body. This maps the
459 * length of the gather list represented by an skb into the # of necessary WRs. 459 * length of the gather list represented by an skb into the # of necessary WRs.
460 * 460 * The extra two fragments are for iscsi bhs and payload padding.
461 * The max. length of an skb is controlled by the max pdu size which is ~16K.
462 * Also, assume the min. fragment length is the sector size (512), then add
463 * extra fragment counts for iscsi bhs and payload padding.
464 */ 461 */
465#define SKB_WR_LIST_SIZE (16384/512 + 3) 462#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
466static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; 463static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
467 464
468static void s3_init_wr_tab(unsigned int wr_len) 465static void s3_init_wr_tab(unsigned int wr_len)
@@ -485,7 +482,7 @@ static void s3_init_wr_tab(unsigned int wr_len)
485 482
486static inline void reset_wr_list(struct s3_conn *c3cn) 483static inline void reset_wr_list(struct s3_conn *c3cn)
487{ 484{
488 c3cn->wr_pending_head = NULL; 485 c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL;
489} 486}
490 487
491/* 488/*
@@ -496,7 +493,7 @@ static inline void reset_wr_list(struct s3_conn *c3cn)
496static inline void enqueue_wr(struct s3_conn *c3cn, 493static inline void enqueue_wr(struct s3_conn *c3cn,
497 struct sk_buff *skb) 494 struct sk_buff *skb)
498{ 495{
499 skb_wr_data(skb) = NULL; 496 skb_tx_wr_next(skb) = NULL;
500 497
501 /* 498 /*
502 * We want to take an extra reference since both us and the driver 499 * We want to take an extra reference since both us and the driver
@@ -509,10 +506,22 @@ static inline void enqueue_wr(struct s3_conn *c3cn,
509 if (!c3cn->wr_pending_head) 506 if (!c3cn->wr_pending_head)
510 c3cn->wr_pending_head = skb; 507 c3cn->wr_pending_head = skb;
511 else 508 else
512 skb_wr_data(skb) = skb; 509 skb_tx_wr_next(c3cn->wr_pending_tail) = skb;
513 c3cn->wr_pending_tail = skb; 510 c3cn->wr_pending_tail = skb;
514} 511}
515 512
513static int count_pending_wrs(struct s3_conn *c3cn)
514{
515 int n = 0;
516 const struct sk_buff *skb = c3cn->wr_pending_head;
517
518 while (skb) {
519 n += skb->csum;
520 skb = skb_tx_wr_next(skb);
521 }
522 return n;
523}
524
516static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) 525static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
517{ 526{
518 return c3cn->wr_pending_head; 527 return c3cn->wr_pending_head;
@@ -529,8 +538,8 @@ static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
529 538
530 if (likely(skb)) { 539 if (likely(skb)) {
531 /* Don't bother clearing the tail */ 540 /* Don't bother clearing the tail */
532 c3cn->wr_pending_head = skb_wr_data(skb); 541 c3cn->wr_pending_head = skb_tx_wr_next(skb);
533 skb_wr_data(skb) = NULL; 542 skb_tx_wr_next(skb) = NULL;
534 } 543 }
535 return skb; 544 return skb;
536} 545}
@@ -543,13 +552,14 @@ static void purge_wr_queue(struct s3_conn *c3cn)
543} 552}
544 553
545static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, 554static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
546 int len) 555 int len, int req_completion)
547{ 556{
548 struct tx_data_wr *req; 557 struct tx_data_wr *req;
549 558
550 skb_reset_transport_header(skb); 559 skb_reset_transport_header(skb);
551 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); 560 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
552 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 561 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
562 (req_completion ? F_WR_COMPL : 0));
553 req->wr_lo = htonl(V_WR_TID(c3cn->tid)); 563 req->wr_lo = htonl(V_WR_TID(c3cn->tid));
554 req->sndseq = htonl(c3cn->snd_nxt); 564 req->sndseq = htonl(c3cn->snd_nxt);
555 /* len includes the length of any HW ULP additions */ 565 /* len includes the length of any HW ULP additions */
@@ -592,7 +602,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
592 602
593 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || 603 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
594 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || 604 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
595 c3cn->state == C3CN_STATE_ABORTING)) { 605 c3cn->state >= C3CN_STATE_ABORTING)) {
596 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", 606 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
597 c3cn, c3cn->state); 607 c3cn, c3cn->state);
598 return 0; 608 return 0;
@@ -615,7 +625,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
615 if (c3cn->wr_avail < wrs_needed) { 625 if (c3cn->wr_avail < wrs_needed) {
616 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " 626 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
617 "wr %d < %u.\n", 627 "wr %d < %u.\n",
618 c3cn, skb->len, skb->datalen, frags, 628 c3cn, skb->len, skb->data_len, frags,
619 wrs_needed, c3cn->wr_avail); 629 wrs_needed, c3cn->wr_avail);
620 break; 630 break;
621 } 631 }
@@ -627,20 +637,24 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
627 c3cn->wr_unacked += wrs_needed; 637 c3cn->wr_unacked += wrs_needed;
628 enqueue_wr(c3cn, skb); 638 enqueue_wr(c3cn, skb);
629 639
630 if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) { 640 c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, "
631 len += ulp_extra_len(skb); 641 "wr %d, left %u, unack %u.\n",
632 make_tx_data_wr(c3cn, skb, len); 642 c3cn, skb->len, skb->data_len, frags,
633 c3cn->snd_nxt += len; 643 wrs_needed, c3cn->wr_avail, c3cn->wr_unacked);
634 if ((req_completion 644
635 && c3cn->wr_unacked == wrs_needed)
636 || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
637 || c3cn->wr_unacked >= c3cn->wr_max / 2) {
638 struct work_request_hdr *wr = cplhdr(skb);
639 645
640 wr->wr_hi |= htonl(F_WR_COMPL); 646 if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) {
647 if ((req_completion &&
648 c3cn->wr_unacked == wrs_needed) ||
649 (skb_flags(skb) & C3CB_FLAG_COMPL) ||
650 c3cn->wr_unacked >= c3cn->wr_max / 2) {
651 req_completion = 1;
641 c3cn->wr_unacked = 0; 652 c3cn->wr_unacked = 0;
642 } 653 }
643 CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR; 654 len += ulp_extra_len(skb);
655 make_tx_data_wr(c3cn, skb, len, req_completion);
656 c3cn->snd_nxt += len;
657 skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR;
644 } 658 }
645 659
646 total_size += skb->truesize; 660 total_size += skb->truesize;
@@ -735,8 +749,11 @@ static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
735 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) 749 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
736 /* upper layer has requested closing */ 750 /* upper layer has requested closing */
737 send_abort_req(c3cn); 751 send_abort_req(c3cn);
738 else if (c3cn_push_tx_frames(c3cn, 1)) 752 else {
753 if (skb_queue_len(&c3cn->write_queue))
754 c3cn_push_tx_frames(c3cn, 1);
739 cxgb3i_conn_tx_open(c3cn); 755 cxgb3i_conn_tx_open(c3cn);
756 }
740} 757}
741 758
742static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, 759static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
@@ -1082,8 +1099,8 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1082 return; 1099 return;
1083 } 1100 }
1084 1101
1085 CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq); 1102 skb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
1086 CXGB3_SKB_CB(skb)->flags = 0; 1103 skb_flags(skb) = 0;
1087 1104
1088 skb_reset_transport_header(skb); 1105 skb_reset_transport_header(skb);
1089 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); 1106 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
@@ -1103,12 +1120,12 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1103 goto abort_conn; 1120 goto abort_conn;
1104 1121
1105 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; 1122 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
1106 skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len); 1123 skb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
1107 skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); 1124 skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
1108 status = ntohl(ddp_cpl.ddp_status); 1125 status = ntohl(ddp_cpl.ddp_status);
1109 1126
1110 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", 1127 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1111 skb, skb->len, skb_ulp_pdulen(skb), status); 1128 skb, skb->len, skb_rx_pdulen(skb), status);
1112 1129
1113 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) 1130 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
1114 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; 1131 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
@@ -1126,7 +1143,7 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1126 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) 1143 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
1127 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; 1144 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
1128 1145
1129 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb); 1146 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb);
1130 __pskb_trim(skb, len); 1147 __pskb_trim(skb, len);
1131 __skb_queue_tail(&c3cn->receive_queue, skb); 1148 __skb_queue_tail(&c3cn->receive_queue, skb);
1132 cxgb3i_conn_pdu_ready(c3cn); 1149 cxgb3i_conn_pdu_ready(c3cn);
@@ -1151,12 +1168,27 @@ static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
1151 * Process an acknowledgment of WR completion. Advance snd_una and send the 1168 * Process an acknowledgment of WR completion. Advance snd_una and send the
1152 * next batch of work requests from the write queue. 1169 * next batch of work requests from the write queue.
1153 */ 1170 */
1171static void check_wr_invariants(struct s3_conn *c3cn)
1172{
1173 int pending = count_pending_wrs(c3cn);
1174
1175 if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max))
1176 cxgb3i_log_error("TID %u: credit imbalance: avail %u, "
1177 "pending %u, total should be %u\n",
1178 c3cn->tid, c3cn->wr_avail, pending,
1179 c3cn->wr_max);
1180}
1181
1154static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) 1182static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1155{ 1183{
1156 struct cpl_wr_ack *hdr = cplhdr(skb); 1184 struct cpl_wr_ack *hdr = cplhdr(skb);
1157 unsigned int credits = ntohs(hdr->credits); 1185 unsigned int credits = ntohs(hdr->credits);
1158 u32 snd_una = ntohl(hdr->snd_una); 1186 u32 snd_una = ntohl(hdr->snd_una);
1159 1187
1188 c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n",
1189 credits, c3cn->wr_avail, c3cn->wr_unacked,
1190 c3cn->tid, c3cn->state);
1191
1160 c3cn->wr_avail += credits; 1192 c3cn->wr_avail += credits;
1161 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) 1193 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
1162 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; 1194 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
@@ -1171,6 +1203,17 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1171 break; 1203 break;
1172 } 1204 }
1173 if (unlikely(credits < p->csum)) { 1205 if (unlikely(credits < p->csum)) {
1206 struct tx_data_wr *w = cplhdr(p);
1207 cxgb3i_log_error("TID %u got %u WR credits need %u, "
1208 "len %u, main body %u, frags %u, "
1209 "seq # %u, ACK una %u, ACK nxt %u, "
1210 "WR_AVAIL %u, WRs pending %u\n",
1211 c3cn->tid, credits, p->csum, p->len,
1212 p->len - p->data_len,
1213 skb_shinfo(p)->nr_frags,
1214 ntohl(w->sndseq), snd_una,
1215 ntohl(hdr->snd_nxt), c3cn->wr_avail,
1216 count_pending_wrs(c3cn) - credits);
1174 p->csum -= credits; 1217 p->csum -= credits;
1175 break; 1218 break;
1176 } else { 1219 } else {
@@ -1180,15 +1223,24 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1180 } 1223 }
1181 } 1224 }
1182 1225
1183 if (unlikely(before(snd_una, c3cn->snd_una))) 1226 check_wr_invariants(c3cn);
1227
1228 if (unlikely(before(snd_una, c3cn->snd_una))) {
1229 cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK "
1230 "snd_una %u\n",
1231 c3cn->tid, snd_una, c3cn->snd_una);
1184 goto out_free; 1232 goto out_free;
1233 }
1185 1234
1186 if (c3cn->snd_una != snd_una) { 1235 if (c3cn->snd_una != snd_una) {
1187 c3cn->snd_una = snd_una; 1236 c3cn->snd_una = snd_una;
1188 dst_confirm(c3cn->dst_cache); 1237 dst_confirm(c3cn->dst_cache);
1189 } 1238 }
1190 1239
1191 if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0)) 1240 if (skb_queue_len(&c3cn->write_queue)) {
1241 if (c3cn_push_tx_frames(c3cn, 0))
1242 cxgb3i_conn_tx_open(c3cn);
1243 } else
1192 cxgb3i_conn_tx_open(c3cn); 1244 cxgb3i_conn_tx_open(c3cn);
1193out_free: 1245out_free:
1194 __kfree_skb(skb); 1246 __kfree_skb(skb);
@@ -1452,7 +1504,7 @@ static void init_offload_conn(struct s3_conn *c3cn,
1452 struct dst_entry *dst) 1504 struct dst_entry *dst)
1453{ 1505{
1454 BUG_ON(c3cn->cdev != cdev); 1506 BUG_ON(c3cn->cdev != cdev);
1455 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs; 1507 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1;
1456 c3cn->wr_unacked = 0; 1508 c3cn->wr_unacked = 0;
1457 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); 1509 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
1458 1510
@@ -1671,9 +1723,17 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
1671 goto out_err; 1723 goto out_err;
1672 } 1724 }
1673 1725
1674 err = -EPIPE;
1675 if (c3cn->err) { 1726 if (c3cn->err) {
1676 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); 1727 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
1728 err = -EPIPE;
1729 goto out_err;
1730 }
1731
1732 if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) {
1733 c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
1734 c3cn, c3cn->write_seq, c3cn->snd_una,
1735 cxgb3_snd_win);
1736 err = -EAGAIN;
1677 goto out_err; 1737 goto out_err;
1678 } 1738 }
1679 1739
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index d23156907ffd..6344b9eb2589 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -178,25 +178,33 @@ void cxgb3i_c3cn_release(struct s3_conn *);
178 * @flag: see C3CB_FLAG_* below 178 * @flag: see C3CB_FLAG_* below
179 * @ulp_mode: ULP mode/submode of sk_buff 179 * @ulp_mode: ULP mode/submode of sk_buff
180 * @seq: tcp sequence number 180 * @seq: tcp sequence number
181 * @ddigest: pdu data digest
182 * @pdulen: recovered pdu length
183 * @wr_data: scratch area for tx wr
184 */ 181 */
182struct cxgb3_skb_rx_cb {
183 __u32 ddigest; /* data digest */
184 __u32 pdulen; /* recovered pdu length */
185};
186
187struct cxgb3_skb_tx_cb {
188 struct sk_buff *wr_next; /* next wr */
189};
190
185struct cxgb3_skb_cb { 191struct cxgb3_skb_cb {
186 __u8 flags; 192 __u8 flags;
187 __u8 ulp_mode; 193 __u8 ulp_mode;
188 __u32 seq; 194 __u32 seq;
189 __u32 ddigest; 195 union {
190 __u32 pdulen; 196 struct cxgb3_skb_rx_cb rx;
191 struct sk_buff *wr_data; 197 struct cxgb3_skb_tx_cb tx;
198 };
192}; 199};
193 200
194#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) 201#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
195 202#define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags)
196#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) 203#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
197#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest) 204#define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq)
198#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen) 205#define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest)
199#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data) 206#define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen)
207#define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next)
200 208
201enum c3cb_flags { 209enum c3cb_flags {
202 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ 210 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
@@ -217,6 +225,7 @@ struct sge_opaque_hdr {
217/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ 225/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
218#define TX_HEADER_LEN \ 226#define TX_HEADER_LEN \
219 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) 227 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
228#define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN)
220 229
221/* 230/*
222 * get and set private ip for iscsi traffic 231 * get and set private ip for iscsi traffic
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index ce7ce8c6094c..17115c230d65 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -32,6 +32,10 @@
32#define cxgb3i_tx_debug(fmt...) 32#define cxgb3i_tx_debug(fmt...)
33#endif 33#endif
34 34
35/* always allocate rooms for AHS */
36#define SKB_TX_PDU_HEADER_LEN \
37 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
38static unsigned int skb_extra_headroom;
35static struct page *pad_page; 39static struct page *pad_page;
36 40
37/* 41/*
@@ -146,12 +150,13 @@ static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
146 150
147void cxgb3i_conn_cleanup_task(struct iscsi_task *task) 151void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
148{ 152{
149 struct iscsi_tcp_task *tcp_task = task->dd_data; 153 struct cxgb3i_task_data *tdata = task->dd_data +
154 sizeof(struct iscsi_tcp_task);
150 155
151 /* never reached the xmit task callout */ 156 /* never reached the xmit task callout */
152 if (tcp_task->dd_data) 157 if (tdata->skb)
153 kfree_skb(tcp_task->dd_data); 158 __kfree_skb(tdata->skb);
154 tcp_task->dd_data = NULL; 159 memset(tdata, 0, sizeof(struct cxgb3i_task_data));
155 160
156 /* MNC - Do we need a check in case this is called but 161 /* MNC - Do we need a check in case this is called but
157 * cxgb3i_conn_alloc_pdu has never been called on the task */ 162 * cxgb3i_conn_alloc_pdu has never been called on the task */
@@ -159,28 +164,102 @@ void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
159 iscsi_tcp_cleanup_task(task); 164 iscsi_tcp_cleanup_task(task);
160} 165}
161 166
162/* 167static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
163 * We do not support ahs yet 168 unsigned int offset, unsigned int *off,
164 */ 169 struct scatterlist **sgp)
170{
171 int i;
172 struct scatterlist *sg;
173
174 for_each_sg(sgl, sg, sgcnt, i) {
175 if (offset < sg->length) {
176 *off = offset;
177 *sgp = sg;
178 return 0;
179 }
180 offset -= sg->length;
181 }
182 return -EFAULT;
183}
184
185static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
186 unsigned int dlen, skb_frag_t *frags,
187 int frag_max)
188{
189 unsigned int datalen = dlen;
190 unsigned int sglen = sg->length - sgoffset;
191 struct page *page = sg_page(sg);
192 int i;
193
194 i = 0;
195 do {
196 unsigned int copy;
197
198 if (!sglen) {
199 sg = sg_next(sg);
200 if (!sg) {
201 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
202 __func__, datalen, dlen);
203 return -EINVAL;
204 }
205 sgoffset = 0;
206 sglen = sg->length;
207 page = sg_page(sg);
208
209 }
210 copy = min(datalen, sglen);
211 if (i && page == frags[i - 1].page &&
212 sgoffset + sg->offset ==
213 frags[i - 1].page_offset + frags[i - 1].size) {
214 frags[i - 1].size += copy;
215 } else {
216 if (i >= frag_max) {
217 cxgb3i_log_error("%s, too many pages %u, "
218 "dlen %u.\n", __func__,
219 frag_max, dlen);
220 return -EINVAL;
221 }
222
223 frags[i].page = page;
224 frags[i].page_offset = sg->offset + sgoffset;
225 frags[i].size = copy;
226 i++;
227 }
228 datalen -= copy;
229 sgoffset += copy;
230 sglen -= copy;
231 } while (datalen);
232
233 return i;
234}
235
165int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 236int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
166{ 237{
238 struct iscsi_conn *conn = task->conn;
167 struct iscsi_tcp_task *tcp_task = task->dd_data; 239 struct iscsi_tcp_task *tcp_task = task->dd_data;
168 struct sk_buff *skb; 240 struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
241 struct scsi_cmnd *sc = task->sc;
242 int headroom = SKB_TX_PDU_HEADER_LEN;
169 243
244 tcp_task->dd_data = tdata;
170 task->hdr = NULL; 245 task->hdr = NULL;
171 /* always allocate rooms for AHS */ 246
172 skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 247 /* write command, need to send data pdus */
173 TX_HEADER_LEN, GFP_ATOMIC); 248 if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
174 if (!skb) 249 (opcode == ISCSI_OP_SCSI_CMD &&
250 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
251 headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
252
253 tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
254 if (!tdata->skb)
175 return -ENOMEM; 255 return -ENOMEM;
256 skb_reserve(tdata->skb, TX_HEADER_LEN);
176 257
177 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", 258 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
178 task, opcode, skb); 259 task, opcode, tdata->skb);
179 260
180 tcp_task->dd_data = skb; 261 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
181 skb_reserve(skb, TX_HEADER_LEN); 262 task->hdr_max = SKB_TX_PDU_HEADER_LEN;
182 task->hdr = (struct iscsi_hdr *)skb->data;
183 task->hdr_max = sizeof(struct iscsi_hdr);
184 263
185 /* data_out uses scsi_cmd's itt */ 264 /* data_out uses scsi_cmd's itt */
186 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 265 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
@@ -192,13 +271,13 @@ int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
192int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 271int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
193 unsigned int count) 272 unsigned int count)
194{ 273{
195 struct iscsi_tcp_task *tcp_task = task->dd_data;
196 struct sk_buff *skb = tcp_task->dd_data;
197 struct iscsi_conn *conn = task->conn; 274 struct iscsi_conn *conn = task->conn;
198 struct page *pg; 275 struct iscsi_tcp_task *tcp_task = task->dd_data;
276 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
277 struct sk_buff *skb = tdata->skb;
199 unsigned int datalen = count; 278 unsigned int datalen = count;
200 int i, padlen = iscsi_padding(count); 279 int i, padlen = iscsi_padding(count);
201 skb_frag_t *frag; 280 struct page *pg;
202 281
203 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", 282 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
204 task, task->sc, offset, count, skb); 283 task, task->sc, offset, count, skb);
@@ -209,90 +288,94 @@ int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
209 return 0; 288 return 0;
210 289
211 if (task->sc) { 290 if (task->sc) {
212 struct scatterlist *sg; 291 struct scsi_data_buffer *sdb = scsi_out(task->sc);
213 struct scsi_data_buffer *sdb; 292 struct scatterlist *sg = NULL;
214 unsigned int sgoffset = offset; 293 int err;
215 struct page *sgpg; 294
216 unsigned int sglen; 295 tdata->offset = offset;
217 296 tdata->count = count;
218 sdb = scsi_out(task->sc); 297 err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
219 sg = sdb->table.sgl; 298 tdata->offset, &tdata->sgoffset, &sg);
220 299 if (err < 0) {
221 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { 300 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
222 cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n", 301 sdb->table.nents, tdata->offset,
223 i, sg_page(sg), sg->length, sg->offset); 302 sdb->length);
224 303 return err;
225 if (sgoffset < sg->length)
226 break;
227 sgoffset -= sg->length;
228 } 304 }
229 sgpg = sg_page(sg); 305 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
230 sglen = sg->length - sgoffset; 306 tdata->frags, MAX_PDU_FRAGS);
231 307 if (err < 0) {
232 do { 308 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
233 int j = skb_shinfo(skb)->nr_frags; 309 sdb->table.nents, tdata->offset,
234 unsigned int copy; 310 tdata->count);
235 311 return err;
236 if (!sglen) { 312 }
237 sg = sg_next(sg); 313 tdata->nr_frags = err;
238 sgpg = sg_page(sg); 314
239 sgoffset = 0; 315 if (tdata->nr_frags > MAX_SKB_FRAGS ||
240 sglen = sg->length; 316 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
241 ++i; 317 char *dst = skb->data + task->hdr_len;
318 skb_frag_t *frag = tdata->frags;
319
320 /* data fits in the skb's headroom */
321 for (i = 0; i < tdata->nr_frags; i++, frag++) {
322 char *src = kmap_atomic(frag->page,
323 KM_SOFTIRQ0);
324
325 memcpy(dst, src+frag->page_offset, frag->size);
326 dst += frag->size;
327 kunmap_atomic(src, KM_SOFTIRQ0);
242 } 328 }
243 copy = min(sglen, datalen); 329 if (padlen) {
244 if (j && skb_can_coalesce(skb, j, sgpg, 330 memset(dst, 0, padlen);
245 sg->offset + sgoffset)) { 331 padlen = 0;
246 skb_shinfo(skb)->frags[j - 1].size += copy;
247 } else {
248 get_page(sgpg);
249 skb_fill_page_desc(skb, j, sgpg,
250 sg->offset + sgoffset, copy);
251 } 332 }
252 sgoffset += copy; 333 skb_put(skb, count + padlen);
253 sglen -= copy; 334 } else {
254 datalen -= copy; 335 /* data fit into frag_list */
255 } while (datalen); 336 for (i = 0; i < tdata->nr_frags; i++)
337 get_page(tdata->frags[i].page);
338
339 memcpy(skb_shinfo(skb)->frags, tdata->frags,
340 sizeof(skb_frag_t) * tdata->nr_frags);
341 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
342 skb->len += count;
343 skb->data_len += count;
344 skb->truesize += count;
345 }
346
256 } else { 347 } else {
257 pg = virt_to_page(task->data); 348 pg = virt_to_page(task->data);
258 349
259 while (datalen) { 350 get_page(pg);
260 i = skb_shinfo(skb)->nr_frags; 351 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
261 frag = &skb_shinfo(skb)->frags[i]; 352 count);
262 353 skb->len += count;
263 get_page(pg); 354 skb->data_len += count;
264 frag->page = pg; 355 skb->truesize += count;
265 frag->page_offset = 0;
266 frag->size = min((unsigned int)PAGE_SIZE, datalen);
267
268 skb_shinfo(skb)->nr_frags++;
269 datalen -= frag->size;
270 pg++;
271 }
272 } 356 }
273 357
274 if (padlen) { 358 if (padlen) {
275 i = skb_shinfo(skb)->nr_frags; 359 i = skb_shinfo(skb)->nr_frags;
276 frag = &skb_shinfo(skb)->frags[i]; 360 get_page(pad_page);
277 frag->page = pad_page; 361 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
278 frag->page_offset = 0; 362 padlen);
279 frag->size = padlen; 363
280 skb_shinfo(skb)->nr_frags++; 364 skb->data_len += padlen;
365 skb->truesize += padlen;
366 skb->len += padlen;
281 } 367 }
282 368
283 datalen = count + padlen;
284 skb->data_len += datalen;
285 skb->truesize += datalen;
286 skb->len += datalen;
287 return 0; 369 return 0;
288} 370}
289 371
290int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) 372int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
291{ 373{
292 struct iscsi_tcp_task *tcp_task = task->dd_data;
293 struct sk_buff *skb = tcp_task->dd_data;
294 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 374 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
295 struct cxgb3i_conn *cconn = tcp_conn->dd_data; 375 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
376 struct iscsi_tcp_task *tcp_task = task->dd_data;
377 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
378 struct sk_buff *skb = tdata->skb;
296 unsigned int datalen; 379 unsigned int datalen;
297 int err; 380 int err;
298 381
@@ -300,13 +383,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
300 return 0; 383 return 0;
301 384
302 datalen = skb->data_len; 385 datalen = skb->data_len;
303 tcp_task->dd_data = NULL; 386 tdata->skb = NULL;
304 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); 387 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
305 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
306 task, skb, skb->len, skb->data_len, err);
307 if (err > 0) { 388 if (err > 0) {
308 int pdulen = err; 389 int pdulen = err;
309 390
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task, skb, skb->len, skb->data_len, err);
393
310 if (task->conn->hdrdgst_en) 394 if (task->conn->hdrdgst_en)
311 pdulen += ISCSI_DIGEST_SIZE; 395 pdulen += ISCSI_DIGEST_SIZE;
312 if (datalen && task->conn->datadgst_en) 396 if (datalen && task->conn->datadgst_en)
@@ -325,12 +409,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
325 return err; 409 return err;
326 } 410 }
327 /* reset skb to send when we are called again */ 411 /* reset skb to send when we are called again */
328 tcp_task->dd_data = skb; 412 tdata->skb = skb;
329 return -EAGAIN; 413 return -EAGAIN;
330} 414}
331 415
332int cxgb3i_pdu_init(void) 416int cxgb3i_pdu_init(void)
333{ 417{
418 if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
419 skb_extra_headroom = SKB_TX_HEADROOM;
334 pad_page = alloc_page(GFP_KERNEL); 420 pad_page = alloc_page(GFP_KERNEL);
335 if (!pad_page) 421 if (!pad_page)
336 return -ENOMEM; 422 return -ENOMEM;
@@ -366,7 +452,9 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
366 skb = skb_peek(&c3cn->receive_queue); 452 skb = skb_peek(&c3cn->receive_queue);
367 while (!err && skb) { 453 while (!err && skb) {
368 __skb_unlink(skb, &c3cn->receive_queue); 454 __skb_unlink(skb, &c3cn->receive_queue);
369 read += skb_ulp_pdulen(skb); 455 read += skb_rx_pdulen(skb);
456 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
457 conn, c3cn, skb, skb_rx_pdulen(skb));
370 err = cxgb3i_conn_read_pdu_skb(conn, skb); 458 err = cxgb3i_conn_read_pdu_skb(conn, skb);
371 __kfree_skb(skb); 459 __kfree_skb(skb);
372 skb = skb_peek(&c3cn->receive_queue); 460 skb = skb_peek(&c3cn->receive_queue);
@@ -377,6 +465,11 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
377 cxgb3i_c3cn_rx_credits(c3cn, read); 465 cxgb3i_c3cn_rx_credits(c3cn, read);
378 } 466 }
379 conn->rxdata_octets += read; 467 conn->rxdata_octets += read;
468
469 if (err) {
470 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
471 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
472 }
380} 473}
381 474
382void cxgb3i_conn_tx_open(struct s3_conn *c3cn) 475void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
index a3f685cc2362..0770b23d90da 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
@@ -53,7 +53,7 @@ struct cpl_rx_data_ddp_norss {
53#define ULP2_FLAG_DCRC_ERROR 0x20 53#define ULP2_FLAG_DCRC_ERROR 0x20
54#define ULP2_FLAG_PAD_ERROR 0x40 54#define ULP2_FLAG_PAD_ERROR 0x40
55 55
56void cxgb3i_conn_closing(struct s3_conn *); 56void cxgb3i_conn_closing(struct s3_conn *c3cn);
57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); 57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
58void cxgb3i_conn_tx_open(struct s3_conn *c3cn); 58void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
59#endif 59#endif
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index a48e4990fe12..34be88d7afa5 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1251,6 +1251,7 @@ static struct pci_device_id hptiop_id_table[] = {
1251 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, 1251 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1252 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, 1252 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1253 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, 1253 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1254 { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1254 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, 1255 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1255 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, 1256 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1256 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, 1257 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 940dc32ff0dc..b82ffd90632e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1040,12 +1040,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1040 action = ACTION_FAIL; 1040 action = ACTION_FAIL;
1041 break; 1041 break;
1042 case ABORTED_COMMAND: 1042 case ABORTED_COMMAND:
1043 action = ACTION_FAIL;
1043 if (sshdr.asc == 0x10) { /* DIF */ 1044 if (sshdr.asc == 0x10) { /* DIF */
1044 description = "Target Data Integrity Failure"; 1045 description = "Target Data Integrity Failure";
1045 action = ACTION_FAIL;
1046 error = -EILSEQ; 1046 error = -EILSEQ;
1047 } else 1047 }
1048 action = ACTION_RETRY;
1049 break; 1048 break;
1050 case NOT_READY: 1049 case NOT_READY:
1051 /* If the device is in the process of becoming 1050 /* If the device is in the process of becoming
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d57566b8be0a..55310dbc10a6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -107,6 +107,7 @@ static void scsi_disk_release(struct device *cdev);
107static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 107static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
108static void sd_print_result(struct scsi_disk *, int); 108static void sd_print_result(struct scsi_disk *, int);
109 109
110static DEFINE_SPINLOCK(sd_index_lock);
110static DEFINE_IDA(sd_index_ida); 111static DEFINE_IDA(sd_index_ida);
111 112
112/* This semaphore is used to mediate the 0->1 reference get in the 113/* This semaphore is used to mediate the 0->1 reference get in the
@@ -1914,7 +1915,9 @@ static int sd_probe(struct device *dev)
1914 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) 1915 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
1915 goto out_put; 1916 goto out_put;
1916 1917
1918 spin_lock(&sd_index_lock);
1917 error = ida_get_new(&sd_index_ida, &index); 1919 error = ida_get_new(&sd_index_ida, &index);
1920 spin_unlock(&sd_index_lock);
1918 } while (error == -EAGAIN); 1921 } while (error == -EAGAIN);
1919 1922
1920 if (error) 1923 if (error)
@@ -1936,7 +1939,9 @@ static int sd_probe(struct device *dev)
1936 return 0; 1939 return 0;
1937 1940
1938 out_free_index: 1941 out_free_index:
1942 spin_lock(&sd_index_lock);
1939 ida_remove(&sd_index_ida, index); 1943 ida_remove(&sd_index_ida, index);
1944 spin_unlock(&sd_index_lock);
1940 out_put: 1945 out_put:
1941 put_disk(gd); 1946 put_disk(gd);
1942 out_free: 1947 out_free:
@@ -1986,7 +1991,9 @@ static void scsi_disk_release(struct device *dev)
1986 struct scsi_disk *sdkp = to_scsi_disk(dev); 1991 struct scsi_disk *sdkp = to_scsi_disk(dev);
1987 struct gendisk *disk = sdkp->disk; 1992 struct gendisk *disk = sdkp->disk;
1988 1993
1994 spin_lock(&sd_index_lock);
1989 ida_remove(&sd_index_ida, sdkp->index); 1995 ida_remove(&sd_index_ida, sdkp->index);
1996 spin_unlock(&sd_index_lock);
1990 1997
1991 disk->private_data = NULL; 1998 disk->private_data = NULL;
1992 put_disk(disk); 1999 put_disk(disk);
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 3599828b9766..022e89ffec1d 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -133,7 +133,7 @@
133# define SCSPTR3 0xffed0024 /* 16 bit SCIF */ 133# define SCSPTR3 0xffed0024 /* 16 bit SCIF */
134# define SCSPTR4 0xffee0024 /* 16 bit SCIF */ 134# define SCSPTR4 0xffee0024 /* 16 bit SCIF */
135# define SCSPTR5 0xffef0024 /* 16 bit SCIF */ 135# define SCSPTR5 0xffef0024 /* 16 bit SCIF */
136# define SCIF_OPER 0x0001 /* Overrun error bit */ 136# define SCIF_ORER 0x0001 /* Overrun error bit */
137# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 137# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
138#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ 138#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
139 defined(CONFIG_CPU_SUBTYPE_SH7203) || \ 139 defined(CONFIG_CPU_SUBTYPE_SH7203) || \
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index ab69c1bf36a8..c2747bc88c6f 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -2164,19 +2164,20 @@ static void __exit panel_cleanup_module(void)
2164 if (scan_timer.function != NULL) 2164 if (scan_timer.function != NULL)
2165 del_timer(&scan_timer); 2165 del_timer(&scan_timer);
2166 2166
2167 if (keypad_enabled) 2167 if (pprt != NULL) {
2168 misc_deregister(&keypad_dev); 2168 if (keypad_enabled)
2169 misc_deregister(&keypad_dev);
2170
2171 if (lcd_enabled) {
2172 panel_lcd_print("\x0cLCD driver " PANEL_VERSION
2173 "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
2174 misc_deregister(&lcd_dev);
2175 }
2169 2176
2170 if (lcd_enabled) { 2177 /* TODO: free all input signals */
2171 panel_lcd_print("\x0cLCD driver " PANEL_VERSION 2178 parport_release(pprt);
2172 "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-"); 2179 parport_unregister_device(pprt);
2173 misc_deregister(&lcd_dev);
2174 } 2180 }
2175
2176 /* TODO: free all input signals */
2177
2178 parport_release(pprt);
2179 parport_unregister_device(pprt);
2180 parport_unregister_driver(&panel_driver); 2181 parport_unregister_driver(&panel_driver);
2181} 2182}
2182 2183
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 79c225acd1ad..f636296b54bc 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -1,5 +1,6 @@
1config RTL8187SE 1config RTL8187SE
2 tristate "RealTek RTL8187SE Wireless LAN NIC driver" 2 tristate "RealTek RTL8187SE Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI
4 depends on WIRELESS_EXT && COMPAT_NET_DEV_OPS
4 default N 5 default N
5 ---help--- 6 ---help---
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
index af64cfbe16db..7370296225e1 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
@@ -234,20 +234,21 @@ out:
234void ieee80211_crypto_deinit(void) 234void ieee80211_crypto_deinit(void)
235{ 235{
236 struct list_head *ptr, *n; 236 struct list_head *ptr, *n;
237 struct ieee80211_crypto_alg *alg = NULL;
237 238
238 if (hcrypt == NULL) 239 if (hcrypt == NULL)
239 return; 240 return;
240 241
241 for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs; 242 list_for_each_safe(ptr, n, &hcrypt->algs) {
242 ptr = n, n = ptr->next) { 243 alg = list_entry(ptr, struct ieee80211_crypto_alg, list);
243 struct ieee80211_crypto_alg *alg = 244 if (alg) {
244 (struct ieee80211_crypto_alg *) ptr; 245 list_del(ptr);
245 list_del(ptr); 246 printk(KERN_DEBUG
246 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " 247 "ieee80211_crypt: unregistered algorithm '%s' (deinit)\n",
247 "'%s' (deinit)\n", alg->ops->name); 248 alg->ops->name);
248 kfree(alg); 249 kfree(alg);
250 }
249 } 251 }
250
251 kfree(hcrypt); 252 kfree(hcrypt);
252} 253}
253 254
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 94534955e38b..66de5cc8ddf1 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -6161,10 +6161,10 @@ static void __exit rtl8180_pci_module_exit(void)
6161{ 6161{
6162 pci_unregister_driver (&rtl8180_pci_driver); 6162 pci_unregister_driver (&rtl8180_pci_driver);
6163 rtl8180_proc_module_remove(); 6163 rtl8180_proc_module_remove();
6164 ieee80211_crypto_deinit();
6165 ieee80211_crypto_tkip_exit(); 6164 ieee80211_crypto_tkip_exit();
6166 ieee80211_crypto_ccmp_exit(); 6165 ieee80211_crypto_ccmp_exit();
6167 ieee80211_crypto_wep_exit(); 6166 ieee80211_crypto_wep_exit();
6167 ieee80211_crypto_deinit();
6168 DMESG("Exiting"); 6168 DMESG("Exiting");
6169} 6169}
6170 6170
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index b003f9a7e151..f716b2e92b65 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -319,16 +319,18 @@ static int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id
319 struct usb_device *udev = interface_to_usbdev(intf); 319 struct usb_device *udev = interface_to_usbdev(intf);
320 struct wbsoft_priv *priv; 320 struct wbsoft_priv *priv;
321 struct ieee80211_hw *dev; 321 struct ieee80211_hw *dev;
322 int err; 322 int nr, err;
323 323
324 usb_get_dev(udev); 324 usb_get_dev(udev);
325 325
326 // 20060630.2 Check the device if it already be opened 326 // 20060630.2 Check the device if it already be opened
327 err = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ), 327 nr = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
328 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN, 328 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
329 0x0, 0x400, &ltmp, 4, HZ*100 ); 329 0x0, 0x400, &ltmp, 4, HZ*100 );
330 if (err) 330 if (nr < 0) {
331 err = nr;
331 goto error; 332 goto error;
333 }
332 334
333 ltmp = cpu_to_le32(ltmp); 335 ltmp = cpu_to_le32(ltmp);
334 if (ltmp) { // Is already initialized? 336 if (ltmp) { // Is already initialized?
@@ -337,8 +339,10 @@ static int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id
337 } 339 }
338 340
339 dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops); 341 dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
340 if (!dev) 342 if (!dev) {
343 err = -ENOMEM;
341 goto error; 344 goto error;
345 }
342 346
343 priv = dev->priv; 347 priv = dev->priv;
344 348
@@ -369,9 +373,11 @@ static int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id
369 } 373 }
370 374
371 dev->extra_tx_headroom = 12; /* FIXME */ 375 dev->extra_tx_headroom = 12; /* FIXME */
372 dev->flags = 0; 376 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
377 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
373 378
374 dev->channel_change_time = 1000; 379 dev->channel_change_time = 1000;
380 dev->max_signal = 100;
375 dev->queues = 1; 381 dev->queues = 1;
376 382
377 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &wbsoft_band_2GHz; 383 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &wbsoft_band_2GHz;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 326dd7f65ee9..b3d5a23ab56f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1376,6 +1376,15 @@ static struct usb_device_id acm_ids[] = {
1376 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */ 1376 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
1377 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1377 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1378 }, 1378 },
1379 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
1380 },
1381 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
1382 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
1383 data interface instead of
1384 communications interface.
1385 Maybe we should define a new
1386 quirk for this. */
1387 },
1379 1388
1380 /* control interfaces with various AT-command sets */ 1389 /* control interfaces with various AT-command sets */
1381 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1390 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 31fb204f44c6..49e7f56e0d7f 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -653,7 +653,7 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type,
653 if (result <= 0 && result != -ETIMEDOUT) 653 if (result <= 0 && result != -ETIMEDOUT)
654 continue; 654 continue;
655 if (result > 1 && ((u8 *)buf)[1] != type) { 655 if (result > 1 && ((u8 *)buf)[1] != type) {
656 result = -EPROTO; 656 result = -ENODATA;
657 continue; 657 continue;
658 } 658 }
659 break; 659 break;
@@ -696,8 +696,13 @@ static int usb_get_string(struct usb_device *dev, unsigned short langid,
696 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, 696 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
697 (USB_DT_STRING << 8) + index, langid, buf, size, 697 (USB_DT_STRING << 8) + index, langid, buf, size,
698 USB_CTRL_GET_TIMEOUT); 698 USB_CTRL_GET_TIMEOUT);
699 if (!(result == 0 || result == -EPIPE)) 699 if (result == 0 || result == -EPIPE)
700 break; 700 continue;
701 if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) {
702 result = -ENODATA;
703 continue;
704 }
705 break;
701 } 706 }
702 return result; 707 return result;
703} 708}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 3219d137340a..e55fef52a5dc 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -191,6 +191,7 @@ config USB_GADGET_OMAP
191 boolean "OMAP USB Device Controller" 191 boolean "OMAP USB Device Controller"
192 depends on ARCH_OMAP 192 depends on ARCH_OMAP
193 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG 193 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
194 select USB_OTG_UTILS if ARCH_OMAP
194 help 195 help
195 Many Texas Instruments OMAP processors have flexible full 196 Many Texas Instruments OMAP processors have flexible full
196 speed USB device controllers, with support for up to 30 197 speed USB device controllers, with support for up to 30
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index 80c2e7e9622f..38aa896cc5db 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -366,9 +366,9 @@ obex_bind(struct usb_configuration *c, struct usb_function *f)
366 f->hs_descriptors = usb_copy_descriptors(hs_function); 366 f->hs_descriptors = usb_copy_descriptors(hs_function);
367 367
368 obex->hs.obex_in = usb_find_endpoint(hs_function, 368 obex->hs.obex_in = usb_find_endpoint(hs_function,
369 f->descriptors, &obex_hs_ep_in_desc); 369 f->hs_descriptors, &obex_hs_ep_in_desc);
370 obex->hs.obex_out = usb_find_endpoint(hs_function, 370 obex->hs.obex_out = usb_find_endpoint(hs_function,
371 f->descriptors, &obex_hs_ep_out_desc); 371 f->hs_descriptors, &obex_hs_ep_out_desc);
372 } 372 }
373 373
374 /* Avoid letting this gadget enumerate until the userspace 374 /* Avoid letting this gadget enumerate until the userspace
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index b10fa31cc915..1ab9dac7e12d 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3879,7 +3879,11 @@ static int __init check_parameters(struct fsg_dev *fsg)
3879 mod_data.protocol_type = USB_SC_SCSI; 3879 mod_data.protocol_type = USB_SC_SCSI;
3880 mod_data.protocol_name = "Transparent SCSI"; 3880 mod_data.protocol_name = "Transparent SCSI";
3881 3881
3882 if (gadget_is_sh(fsg->gadget)) 3882 /* Some peripheral controllers are known not to be able to
3883 * halt bulk endpoints correctly. If one of them is present,
3884 * disable stalls.
3885 */
3886 if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
3883 mod_data.can_stall = 0; 3887 mod_data.can_stall = 0;
3884 3888
3885 if (mod_data.release == 0xffff) { // Parameter wasn't set 3889 if (mod_data.release == 0xffff) { // Parameter wasn't set
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index f3c6703cffda..d8d9a52a44b3 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -404,7 +404,10 @@ static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
404 } 404 }
405 if (zlt) 405 if (zlt)
406 tmp |= EP_QUEUE_HEAD_ZLT_SEL; 406 tmp |= EP_QUEUE_HEAD_ZLT_SEL;
407
407 p_QH->max_pkt_length = cpu_to_le32(tmp); 408 p_QH->max_pkt_length = cpu_to_le32(tmp);
409 p_QH->next_dtd_ptr = 1;
410 p_QH->size_ioc_int_sts = 0;
408 411
409 return; 412 return;
410} 413}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 4725d15d096f..e551bb38852b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -485,6 +485,7 @@ static int ehci_init(struct usb_hcd *hcd)
485 * periodic_size can shrink by USBCMD update if hcc_params allows. 485 * periodic_size can shrink by USBCMD update if hcc_params allows.
486 */ 486 */
487 ehci->periodic_size = DEFAULT_I_TDPS; 487 ehci->periodic_size = DEFAULT_I_TDPS;
488 INIT_LIST_HEAD(&ehci->cached_itd_list);
488 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 489 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
489 return retval; 490 return retval;
490 491
@@ -497,6 +498,7 @@ static int ehci_init(struct usb_hcd *hcd)
497 498
498 ehci->reclaim = NULL; 499 ehci->reclaim = NULL;
499 ehci->next_uframe = -1; 500 ehci->next_uframe = -1;
501 ehci->clock_frame = -1;
500 502
501 /* 503 /*
502 * dedicate a qh for the async ring head, since we couldn't unlink 504 * dedicate a qh for the async ring head, since we couldn't unlink
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 0431397836f6..10d52919abbb 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -128,6 +128,7 @@ static inline void qh_put (struct ehci_qh *qh)
128 128
129static void ehci_mem_cleanup (struct ehci_hcd *ehci) 129static void ehci_mem_cleanup (struct ehci_hcd *ehci)
130{ 130{
131 free_cached_itd_list(ehci);
131 if (ehci->async) 132 if (ehci->async)
132 qh_put (ehci->async); 133 qh_put (ehci->async);
133 ehci->async = NULL; 134 ehci->async = NULL;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a081ee65bde6..07bcb931021b 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1004,7 +1004,8 @@ iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
1004 1004
1005 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0; 1005 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
1006 stream->bEndpointAddress &= 0x0f; 1006 stream->bEndpointAddress &= 0x0f;
1007 stream->ep->hcpriv = NULL; 1007 if (stream->ep)
1008 stream->ep->hcpriv = NULL;
1008 1009
1009 if (stream->rescheduled) { 1010 if (stream->rescheduled) {
1010 ehci_info (ehci, "ep%d%s-iso rescheduled " 1011 ehci_info (ehci, "ep%d%s-iso rescheduled "
@@ -1653,14 +1654,28 @@ itd_complete (
1653 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 1654 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1654 } 1655 }
1655 iso_stream_put (ehci, stream); 1656 iso_stream_put (ehci, stream);
1656 /* OK to recycle this ITD now that its completion callback ran. */ 1657
1657done: 1658done:
1658 usb_put_urb(urb); 1659 usb_put_urb(urb);
1659 itd->urb = NULL; 1660 itd->urb = NULL;
1660 itd->stream = NULL; 1661 if (ehci->clock_frame != itd->frame || itd->index[7] != -1) {
1661 list_move(&itd->itd_list, &stream->free_list); 1662 /* OK to recycle this ITD now. */
1662 iso_stream_put(ehci, stream); 1663 itd->stream = NULL;
1663 1664 list_move(&itd->itd_list, &stream->free_list);
1665 iso_stream_put(ehci, stream);
1666 } else {
1667 /* HW might remember this ITD, so we can't recycle it yet.
1668 * Move it to a safe place until a new frame starts.
1669 */
1670 list_move(&itd->itd_list, &ehci->cached_itd_list);
1671 if (stream->refcount == 2) {
1672 /* If iso_stream_put() were called here, stream
1673 * would be freed. Instead, just prevent reuse.
1674 */
1675 stream->ep->hcpriv = NULL;
1676 stream->ep = NULL;
1677 }
1678 }
1664 return retval; 1679 return retval;
1665} 1680}
1666 1681
@@ -2101,6 +2116,20 @@ done:
2101 2116
2102/*-------------------------------------------------------------------------*/ 2117/*-------------------------------------------------------------------------*/
2103 2118
2119static void free_cached_itd_list(struct ehci_hcd *ehci)
2120{
2121 struct ehci_itd *itd, *n;
2122
2123 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2124 struct ehci_iso_stream *stream = itd->stream;
2125 itd->stream = NULL;
2126 list_move(&itd->itd_list, &stream->free_list);
2127 iso_stream_put(ehci, stream);
2128 }
2129}
2130
2131/*-------------------------------------------------------------------------*/
2132
2104static void 2133static void
2105scan_periodic (struct ehci_hcd *ehci) 2134scan_periodic (struct ehci_hcd *ehci)
2106{ 2135{
@@ -2115,10 +2144,17 @@ scan_periodic (struct ehci_hcd *ehci)
2115 * Touches as few pages as possible: cache-friendly. 2144 * Touches as few pages as possible: cache-friendly.
2116 */ 2145 */
2117 now_uframe = ehci->next_uframe; 2146 now_uframe = ehci->next_uframe;
2118 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2147 if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
2119 clock = ehci_readl(ehci, &ehci->regs->frame_index); 2148 clock = ehci_readl(ehci, &ehci->regs->frame_index);
2120 else 2149 clock_frame = (clock >> 3) % ehci->periodic_size;
2150 } else {
2121 clock = now_uframe + mod - 1; 2151 clock = now_uframe + mod - 1;
2152 clock_frame = -1;
2153 }
2154 if (ehci->clock_frame != clock_frame) {
2155 free_cached_itd_list(ehci);
2156 ehci->clock_frame = clock_frame;
2157 }
2122 clock %= mod; 2158 clock %= mod;
2123 clock_frame = clock >> 3; 2159 clock_frame = clock >> 3;
2124 2160
@@ -2277,6 +2313,10 @@ restart:
2277 /* rescan the rest of this frame, then ... */ 2313 /* rescan the rest of this frame, then ... */
2278 clock = now; 2314 clock = now;
2279 clock_frame = clock >> 3; 2315 clock_frame = clock >> 3;
2316 if (ehci->clock_frame != clock_frame) {
2317 free_cached_itd_list(ehci);
2318 ehci->clock_frame = clock_frame;
2319 }
2280 } else { 2320 } else {
2281 now_uframe++; 2321 now_uframe++;
2282 now_uframe %= mod; 2322 now_uframe %= mod;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index fb7054ccf4fc..262b00c9b334 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,6 +87,10 @@ struct ehci_hcd { /* one per controller */
87 int next_uframe; /* scan periodic, start here */ 87 int next_uframe; /* scan periodic, start here */
88 unsigned periodic_sched; /* periodic activity count */ 88 unsigned periodic_sched; /* periodic activity count */
89 89
90 /* list of itds completed while clock_frame was still active */
91 struct list_head cached_itd_list;
92 unsigned clock_frame;
93
90 /* per root hub port */ 94 /* per root hub port */
91 unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; 95 unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
92 96
@@ -220,6 +224,8 @@ timer_action (struct ehci_hcd *ehci, enum ehci_timer_action action)
220 } 224 }
221} 225}
222 226
227static void free_cached_itd_list(struct ehci_hcd *ehci);
228
223/*-------------------------------------------------------------------------*/ 229/*-------------------------------------------------------------------------*/
224 230
225#include <linux/usb/ehci_def.h> 231#include <linux/usb/ehci_def.h>
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 5a8fd5d57a11..2dc7606f319c 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -377,18 +377,8 @@ int __init musb_platform_init(struct musb *musb)
377 u32 revision; 377 u32 revision;
378 378
379 musb->mregs += DAVINCI_BASE_OFFSET; 379 musb->mregs += DAVINCI_BASE_OFFSET;
380#if 0
381 /* REVISIT there's something odd about clocking, this
382 * didn't appear do the job ...
383 */
384 musb->clock = clk_get(pDevice, "usb");
385 if (IS_ERR(musb->clock))
386 return PTR_ERR(musb->clock);
387 380
388 status = clk_enable(musb->clock); 381 clk_enable(musb->clock);
389 if (status < 0)
390 return -ENODEV;
391#endif
392 382
393 /* returns zero if e.g. not clocked */ 383 /* returns zero if e.g. not clocked */
394 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); 384 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
@@ -453,5 +443,8 @@ int musb_platform_exit(struct musb *musb)
453 } 443 }
454 444
455 phy_off(); 445 phy_off();
446
447 clk_disable(musb->clock);
448
456 return 0; 449 return 0;
457} 450}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 2cc34fa05b73..af77e4659006 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -115,7 +115,7 @@
115 115
116 116
117unsigned musb_debug; 117unsigned musb_debug;
118module_param(musb_debug, uint, S_IRUGO | S_IWUSR); 118module_param_named(debug, musb_debug, uint, S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); 119MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
120 120
121#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" 121#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
@@ -767,6 +767,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
767#ifdef CONFIG_USB_MUSB_HDRC_HCD 767#ifdef CONFIG_USB_MUSB_HDRC_HCD
768 case OTG_STATE_A_HOST: 768 case OTG_STATE_A_HOST:
769 case OTG_STATE_A_SUSPEND: 769 case OTG_STATE_A_SUSPEND:
770 usb_hcd_resume_root_hub(musb_to_hcd(musb));
770 musb_root_disconnect(musb); 771 musb_root_disconnect(musb);
771 if (musb->a_wait_bcon != 0) 772 if (musb->a_wait_bcon != 0)
772 musb_platform_try_idle(musb, jiffies 773 musb_platform_try_idle(musb, jiffies
@@ -1815,7 +1816,7 @@ static void musb_free(struct musb *musb)
1815#ifdef CONFIG_SYSFS 1816#ifdef CONFIG_SYSFS
1816 device_remove_file(musb->controller, &dev_attr_mode); 1817 device_remove_file(musb->controller, &dev_attr_mode);
1817 device_remove_file(musb->controller, &dev_attr_vbus); 1818 device_remove_file(musb->controller, &dev_attr_vbus);
1818#ifdef CONFIG_USB_MUSB_OTG 1819#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1819 device_remove_file(musb->controller, &dev_attr_srp); 1820 device_remove_file(musb->controller, &dev_attr_srp);
1820#endif 1821#endif
1821#endif 1822#endif
@@ -2063,7 +2064,7 @@ fail2:
2063#ifdef CONFIG_SYSFS 2064#ifdef CONFIG_SYSFS
2064 device_remove_file(musb->controller, &dev_attr_mode); 2065 device_remove_file(musb->controller, &dev_attr_mode);
2065 device_remove_file(musb->controller, &dev_attr_vbus); 2066 device_remove_file(musb->controller, &dev_attr_vbus);
2066#ifdef CONFIG_USB_MUSB_OTG 2067#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2067 device_remove_file(musb->controller, &dev_attr_srp); 2068 device_remove_file(musb->controller, &dev_attr_srp);
2068#endif 2069#endif
2069#endif 2070#endif
@@ -2243,10 +2244,10 @@ static int __init musb_init(void)
2243 return platform_driver_probe(&musb_driver, musb_probe); 2244 return platform_driver_probe(&musb_driver, musb_probe);
2244} 2245}
2245 2246
2246/* make us init after usbcore and before usb 2247/* make us init after usbcore and i2c (transceivers, regulators, etc)
2247 * gadget and host-side drivers start to register 2248 * and before usb gadget and host-side drivers start to register
2248 */ 2249 */
2249subsys_initcall(musb_init); 2250fs_initcall(musb_init);
2250 2251
2251static void __exit musb_cleanup(void) 2252static void __exit musb_cleanup(void)
2252{ 2253{
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 4ea305387981..c7ebd0867fcc 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -575,7 +575,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
575 struct usb_request *request = &req->request; 575 struct usb_request *request = &req->request;
576 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 576 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
577 void __iomem *epio = musb->endpoints[epnum].regs; 577 void __iomem *epio = musb->endpoints[epnum].regs;
578 u16 fifo_count = 0; 578 unsigned fifo_count = 0;
579 u16 len = musb_ep->packet_sz; 579 u16 len = musb_ep->packet_sz;
580 580
581 csr = musb_readw(epio, MUSB_RXCSR); 581 csr = musb_readw(epio, MUSB_RXCSR);
@@ -687,7 +687,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
687 len, fifo_count, 687 len, fifo_count,
688 musb_ep->packet_sz); 688 musb_ep->packet_sz);
689 689
690 fifo_count = min(len, fifo_count); 690 fifo_count = min_t(unsigned, len, fifo_count);
691 691
692#ifdef CONFIG_USB_TUSB_OMAP_DMA 692#ifdef CONFIG_USB_TUSB_OMAP_DMA
693 if (tusb_dma_omap() && musb_ep->dma) { 693 if (tusb_dma_omap() && musb_ep->dma) {
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index a035ceccf950..6dbbd0786a6a 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -335,16 +335,11 @@ musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
335static struct musb_qh * 335static struct musb_qh *
336musb_giveback(struct musb_qh *qh, struct urb *urb, int status) 336musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
337{ 337{
338 int is_in;
339 struct musb_hw_ep *ep = qh->hw_ep; 338 struct musb_hw_ep *ep = qh->hw_ep;
340 struct musb *musb = ep->musb; 339 struct musb *musb = ep->musb;
340 int is_in = usb_pipein(urb->pipe);
341 int ready = qh->is_ready; 341 int ready = qh->is_ready;
342 342
343 if (ep->is_shared_fifo)
344 is_in = 1;
345 else
346 is_in = usb_pipein(urb->pipe);
347
348 /* save toggle eagerly, for paranoia */ 343 /* save toggle eagerly, for paranoia */
349 switch (qh->type) { 344 switch (qh->type) {
350 case USB_ENDPOINT_XFER_BULK: 345 case USB_ENDPOINT_XFER_BULK:
@@ -432,7 +427,7 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
432 else 427 else
433 qh = musb_giveback(qh, urb, urb->status); 428 qh = musb_giveback(qh, urb, urb->status);
434 429
435 if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { 430 if (qh != NULL && qh->is_ready) {
436 DBG(4, "... next ep%d %cX urb %p\n", 431 DBG(4, "... next ep%d %cX urb %p\n",
437 hw_ep->epnum, is_in ? 'R' : 'T', 432 hw_ep->epnum, is_in ? 'R' : 'T',
438 next_urb(qh)); 433 next_urb(qh));
@@ -942,8 +937,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
942 switch (musb->ep0_stage) { 937 switch (musb->ep0_stage) {
943 case MUSB_EP0_IN: 938 case MUSB_EP0_IN:
944 fifo_dest = urb->transfer_buffer + urb->actual_length; 939 fifo_dest = urb->transfer_buffer + urb->actual_length;
945 fifo_count = min(len, ((u16) (urb->transfer_buffer_length 940 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
946 - urb->actual_length))); 941 urb->actual_length);
947 if (fifo_count < len) 942 if (fifo_count < len)
948 urb->status = -EOVERFLOW; 943 urb->status = -EOVERFLOW;
949 944
@@ -976,10 +971,9 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
976 } 971 }
977 /* FALLTHROUGH */ 972 /* FALLTHROUGH */
978 case MUSB_EP0_OUT: 973 case MUSB_EP0_OUT:
979 fifo_count = min(qh->maxpacket, ((u16) 974 fifo_count = min_t(size_t, qh->maxpacket,
980 (urb->transfer_buffer_length 975 urb->transfer_buffer_length -
981 - urb->actual_length))); 976 urb->actual_length);
982
983 if (fifo_count) { 977 if (fifo_count) {
984 fifo_dest = (u8 *) (urb->transfer_buffer 978 fifo_dest = (u8 *) (urb->transfer_buffer
985 + urb->actual_length); 979 + urb->actual_length);
@@ -1161,7 +1155,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1161 struct urb *urb; 1155 struct urb *urb;
1162 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1156 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1163 void __iomem *epio = hw_ep->regs; 1157 void __iomem *epio = hw_ep->regs;
1164 struct musb_qh *qh = hw_ep->out_qh; 1158 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1159 : hw_ep->out_qh;
1165 u32 status = 0; 1160 u32 status = 0;
1166 void __iomem *mbase = musb->mregs; 1161 void __iomem *mbase = musb->mregs;
1167 struct dma_channel *dma; 1162 struct dma_channel *dma;
@@ -1308,7 +1303,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1308 * packets before updating TXCSR ... other docs disagree ... 1303 * packets before updating TXCSR ... other docs disagree ...
1309 */ 1304 */
1310 /* PIO: start next packet in this URB */ 1305 /* PIO: start next packet in this URB */
1311 wLength = min(qh->maxpacket, (u16) wLength); 1306 if (wLength > qh->maxpacket)
1307 wLength = qh->maxpacket;
1312 musb_write_fifo(hw_ep, wLength, buf); 1308 musb_write_fifo(hw_ep, wLength, buf);
1313 qh->segsize = wLength; 1309 qh->segsize = wLength;
1314 1310
@@ -1867,19 +1863,21 @@ static int musb_urb_enqueue(
1867 } 1863 }
1868 qh->type_reg = type_reg; 1864 qh->type_reg = type_reg;
1869 1865
1870 /* precompute rxinterval/txinterval register */ 1866 /* Precompute RXINTERVAL/TXINTERVAL register */
1871 interval = min((u8)16, epd->bInterval); /* log encoding */
1872 switch (qh->type) { 1867 switch (qh->type) {
1873 case USB_ENDPOINT_XFER_INT: 1868 case USB_ENDPOINT_XFER_INT:
1874 /* fullspeed uses linear encoding */ 1869 /*
1875 if (USB_SPEED_FULL == urb->dev->speed) { 1870 * Full/low speeds use the linear encoding,
1876 interval = epd->bInterval; 1871 * high speed uses the logarithmic encoding.
1877 if (!interval) 1872 */
1878 interval = 1; 1873 if (urb->dev->speed <= USB_SPEED_FULL) {
1874 interval = max_t(u8, epd->bInterval, 1);
1875 break;
1879 } 1876 }
1880 /* FALLTHROUGH */ 1877 /* FALLTHROUGH */
1881 case USB_ENDPOINT_XFER_ISOC: 1878 case USB_ENDPOINT_XFER_ISOC:
1882 /* iso always uses log encoding */ 1879 /* ISO always uses logarithmic encoding */
1880 interval = min_t(u8, epd->bInterval, 16);
1883 break; 1881 break;
1884 default: 1882 default:
1885 /* REVISIT we actually want to use NAK limits, hinting to the 1883 /* REVISIT we actually want to use NAK limits, hinting to the
@@ -2037,9 +2035,9 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2037 goto done; 2035 goto done;
2038 2036
2039 /* Any URB not actively programmed into endpoint hardware can be 2037 /* Any URB not actively programmed into endpoint hardware can be
2040 * immediately given back. Such an URB must be at the head of its 2038 * immediately given back; that's any URB not at the head of an
2041 * endpoint queue, unless someday we get real DMA queues. And even 2039 * endpoint queue, unless someday we get real DMA queues. And even
2042 * then, it might not be known to the hardware... 2040 * if it's at the head, it might not be known to the hardware...
2043 * 2041 *
2044 * Otherwise abort current transfer, pending dma, etc.; urb->status 2042 * Otherwise abort current transfer, pending dma, etc.; urb->status
2045 * has already been updated. This is a synchronous abort; it'd be 2043 * has already been updated. This is a synchronous abort; it'd be
@@ -2078,6 +2076,15 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2078 qh->is_ready = 0; 2076 qh->is_ready = 0;
2079 __musb_giveback(musb, urb, 0); 2077 __musb_giveback(musb, urb, 0);
2080 qh->is_ready = ready; 2078 qh->is_ready = ready;
2079
2080 /* If nothing else (usually musb_giveback) is using it
2081 * and its URB list has emptied, recycle this qh.
2082 */
2083 if (ready && list_empty(&qh->hep->urb_list)) {
2084 qh->hep->hcpriv = NULL;
2085 list_del(&qh->ring);
2086 kfree(qh);
2087 }
2081 } else 2088 } else
2082 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2089 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2083done: 2090done:
@@ -2093,15 +2100,16 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2093 unsigned long flags; 2100 unsigned long flags;
2094 struct musb *musb = hcd_to_musb(hcd); 2101 struct musb *musb = hcd_to_musb(hcd);
2095 u8 is_in = epnum & USB_DIR_IN; 2102 u8 is_in = epnum & USB_DIR_IN;
2096 struct musb_qh *qh = hep->hcpriv; 2103 struct musb_qh *qh;
2097 struct urb *urb, *tmp; 2104 struct urb *urb;
2098 struct list_head *sched; 2105 struct list_head *sched;
2099 2106
2100 if (!qh)
2101 return;
2102
2103 spin_lock_irqsave(&musb->lock, flags); 2107 spin_lock_irqsave(&musb->lock, flags);
2104 2108
2109 qh = hep->hcpriv;
2110 if (qh == NULL)
2111 goto exit;
2112
2105 switch (qh->type) { 2113 switch (qh->type) {
2106 case USB_ENDPOINT_XFER_CONTROL: 2114 case USB_ENDPOINT_XFER_CONTROL:
2107 sched = &musb->control; 2115 sched = &musb->control;
@@ -2135,13 +2143,28 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2135 2143
2136 /* cleanup */ 2144 /* cleanup */
2137 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2145 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2138 } else
2139 urb = NULL;
2140 2146
2141 /* then just nuke all the others */ 2147 /* Then nuke all the others ... and advance the
2142 list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) 2148 * queue on hw_ep (e.g. bulk ring) when we're done.
2143 musb_giveback(qh, urb, -ESHUTDOWN); 2149 */
2150 while (!list_empty(&hep->urb_list)) {
2151 urb = next_urb(qh);
2152 urb->status = -ESHUTDOWN;
2153 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2154 }
2155 } else {
2156 /* Just empty the queue; the hardware is busy with
2157 * other transfers, and since !qh->is_ready nothing
2158 * will activate any of these as it advances.
2159 */
2160 while (!list_empty(&hep->urb_list))
2161 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2144 2162
2163 hep->hcpriv = NULL;
2164 list_del(&qh->ring);
2165 kfree(qh);
2166 }
2167exit:
2145 spin_unlock_irqrestore(&musb->lock, flags); 2168 spin_unlock_irqrestore(&musb->lock, flags);
2146} 2169}
2147 2170
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index bfd0b68ceccd..b7c132bded7f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -294,7 +294,11 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
294 294
295/* Ericsson products */ 295/* Ericsson products */
296#define ERICSSON_VENDOR_ID 0x0bdb 296#define ERICSSON_VENDOR_ID 0x0bdb
297#define ERICSSON_PRODUCT_F3507G 0x1900 297#define ERICSSON_PRODUCT_F3507G_1 0x1900
298#define ERICSSON_PRODUCT_F3507G_2 0x1902
299
300#define BENQ_VENDOR_ID 0x04a5
301#define BENQ_PRODUCT_H10 0x4068
298 302
299static struct usb_device_id option_ids[] = { 303static struct usb_device_id option_ids[] = {
300 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 304 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -509,7 +513,10 @@ static struct usb_device_id option_ids[] = {
509 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, 513 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
510 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 514 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
511 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, 515 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
512 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) }, 516 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G_1) },
517 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G_2) },
518 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
519 { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
513 { } /* Terminating entry */ 520 { } /* Terminating entry */
514}; 521};
515MODULE_DEVICE_TABLE(usb, option_ids); 522MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 50dc33a6065b..6f59c8e510ea 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -907,13 +907,13 @@ UNUSUAL_DEV( 0x05e3, 0x0701, 0x0000, 0xffff,
907 "Genesys Logic", 907 "Genesys Logic",
908 "USB to IDE Optical", 908 "USB to IDE Optical",
909 US_SC_DEVICE, US_PR_DEVICE, NULL, 909 US_SC_DEVICE, US_PR_DEVICE, NULL,
910 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 ), 910 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
911 911
912UNUSUAL_DEV( 0x05e3, 0x0702, 0x0000, 0xffff, 912UNUSUAL_DEV( 0x05e3, 0x0702, 0x0000, 0xffff,
913 "Genesys Logic", 913 "Genesys Logic",
914 "USB to IDE Disk", 914 "USB to IDE Disk",
915 US_SC_DEVICE, US_PR_DEVICE, NULL, 915 US_SC_DEVICE, US_PR_DEVICE, NULL,
916 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 ), 916 US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
917 917
918/* Reported by Ben Efros <ben@pc-doctor.com> */ 918/* Reported by Ben Efros <ben@pc-doctor.com> */
919UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451, 919UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451,
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 8d0b1fb1e52e..1f51366417b9 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,12 @@ config W1_SLAVE_SMEM
16 Say Y here if you want to connect 1-wire 16 Say Y here if you want to connect 1-wire
17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. 17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
18 18
19config W1_SLAVE_DS2431
20 tristate "1kb EEPROM family support (DS2431)"
21 help
22 Say Y here if you want to use a 1-wire
23 1kb EEPROM family device (DS2431)
24
19config W1_SLAVE_DS2433 25config W1_SLAVE_DS2433
20 tristate "4kb EEPROM family support (DS2433)" 26 tristate "4kb EEPROM family support (DS2433)"
21 help 27 help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 990f400b6d22..f1f51f19b129 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o 5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
7obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 8obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
8obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o 9obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
9obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o 10obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 858c16a544c2..139447148822 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -156,6 +156,9 @@ out_up:
156 */ 156 */
157static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data) 157static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
158{ 158{
159#ifdef CONFIG_W1_SLAVE_DS2433_CRC
160 struct w1_f23_data *f23 = sl->family_data;
161#endif
159 u8 wrbuf[4]; 162 u8 wrbuf[4];
160 u8 rdbuf[W1_PAGE_SIZE + 3]; 163 u8 rdbuf[W1_PAGE_SIZE + 3];
161 u8 es = (addr + len - 1) & 0x1f; 164 u8 es = (addr + len - 1) & 0x1f;
@@ -196,7 +199,9 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
196 199
197 /* Reset the bus to wake up the EEPROM (this may not be needed) */ 200 /* Reset the bus to wake up the EEPROM (this may not be needed) */
198 w1_reset_bus(sl->master); 201 w1_reset_bus(sl->master);
199 202#ifdef CONFIG_W1_SLAVE_DS2433_CRC
203 f23->validcrc &= ~(1 << (addr >> W1_PAGE_BITS));
204#endif
200 return 0; 205 return 0;
201} 206}
202 207
diff --git a/fs/Makefile b/fs/Makefile
index 38bc735c67ad..dc20db348679 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -69,10 +69,12 @@ obj-$(CONFIG_DLM) += dlm/
69# Do not add any filesystems before this line 69# Do not add any filesystems before this line
70obj-$(CONFIG_REISERFS_FS) += reiserfs/ 70obj-$(CONFIG_REISERFS_FS) += reiserfs/
71obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3 71obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
72obj-$(CONFIG_EXT4_FS) += ext4/ # Before ext2 so root fs can be ext4 72obj-$(CONFIG_EXT2_FS) += ext2/
73# We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
74# unless explicitly requested by rootfstype
75obj-$(CONFIG_EXT4_FS) += ext4/
73obj-$(CONFIG_JBD) += jbd/ 76obj-$(CONFIG_JBD) += jbd/
74obj-$(CONFIG_JBD2) += jbd2/ 77obj-$(CONFIG_JBD2) += jbd2/
75obj-$(CONFIG_EXT2_FS) += ext2/
76obj-$(CONFIG_CRAMFS) += cramfs/ 78obj-$(CONFIG_CRAMFS) += cramfs/
77obj-$(CONFIG_SQUASHFS) += squashfs/ 79obj-$(CONFIG_SQUASHFS) += squashfs/
78obj-y += ramfs/ 80obj-y += ramfs/
diff --git a/fs/bio.c b/fs/bio.c
index 72ab251cdb9c..124b95c4d582 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -302,7 +302,7 @@ void bio_init(struct bio *bio)
302struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 302struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
303{ 303{
304 struct bio *bio = NULL; 304 struct bio *bio = NULL;
305 void *p; 305 void *uninitialized_var(p);
306 306
307 if (bs) { 307 if (bs) {
308 p = mempool_alloc(bs->bio_pool, gfp_mask); 308 p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index a8c9693b75ac..72677ce2b74f 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -66,6 +66,9 @@ struct btrfs_inode {
66 */ 66 */
67 struct list_head delalloc_inodes; 67 struct list_head delalloc_inodes;
68 68
69 /* the space_info for where this inode's data allocations are done */
70 struct btrfs_space_info *space_info;
71
69 /* full 64 bit generation number, struct vfs_inode doesn't have a big 72 /* full 64 bit generation number, struct vfs_inode doesn't have a big
70 * enough field for this. 73 * enough field for this.
71 */ 74 */
@@ -94,6 +97,11 @@ struct btrfs_inode {
94 */ 97 */
95 u64 delalloc_bytes; 98 u64 delalloc_bytes;
96 99
100 /* total number of bytes that may be used for this inode for
101 * delalloc
102 */
103 u64 reserved_bytes;
104
97 /* 105 /*
98 * the size of the file stored in the metadata on disk. data=ordered 106 * the size of the file stored in the metadata on disk. data=ordered
99 * means the in-memory i_size might be larger than the size on disk 107 * means the in-memory i_size might be larger than the size on disk
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 766b31ae3186..82491ba8fa40 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -596,13 +596,27 @@ struct btrfs_block_group_item {
596 596
597struct btrfs_space_info { 597struct btrfs_space_info {
598 u64 flags; 598 u64 flags;
599 u64 total_bytes; 599
600 u64 bytes_used; 600 u64 total_bytes; /* total bytes in the space */
601 u64 bytes_pinned; 601 u64 bytes_used; /* total bytes used on disk */
602 u64 bytes_reserved; 602 u64 bytes_pinned; /* total bytes pinned, will be freed when the
603 u64 bytes_readonly; 603 transaction finishes */
604 int full; 604 u64 bytes_reserved; /* total bytes the allocator has reserved for
605 int force_alloc; 605 current allocations */
606 u64 bytes_readonly; /* total bytes that are read only */
607
608 /* delalloc accounting */
609 u64 bytes_delalloc; /* number of bytes reserved for allocation,
610 this space is not necessarily reserved yet
611 by the allocator */
612 u64 bytes_may_use; /* number of bytes that may be used for
613 delalloc */
614
615 int full; /* indicates that we cannot allocate any more
616 chunks for this space */
617 int force_alloc; /* set if we need to force a chunk alloc for
618 this space */
619
606 struct list_head list; 620 struct list_head list;
607 621
608 /* for block groups in our same type */ 622 /* for block groups in our same type */
@@ -1782,6 +1796,16 @@ int btrfs_add_dead_reloc_root(struct btrfs_root *root);
1782int btrfs_cleanup_reloc_trees(struct btrfs_root *root); 1796int btrfs_cleanup_reloc_trees(struct btrfs_root *root);
1783int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 1797int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
1784u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); 1798u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
1799void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
1800int btrfs_check_metadata_free_space(struct btrfs_root *root);
1801int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
1802 u64 bytes);
1803void btrfs_free_reserved_data_space(struct btrfs_root *root,
1804 struct inode *inode, u64 bytes);
1805void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
1806 u64 bytes);
1807void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
1808 u64 bytes);
1785/* ctree.c */ 1809/* ctree.c */
1786int btrfs_previous_item(struct btrfs_root *root, 1810int btrfs_previous_item(struct btrfs_root *root,
1787 struct btrfs_path *path, u64 min_objectid, 1811 struct btrfs_path *path, u64 min_objectid,
@@ -2027,8 +2051,6 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
2027unsigned long btrfs_force_ra(struct address_space *mapping, 2051unsigned long btrfs_force_ra(struct address_space *mapping,
2028 struct file_ra_state *ra, struct file *file, 2052 struct file_ra_state *ra, struct file *file,
2029 pgoff_t offset, pgoff_t last_index); 2053 pgoff_t offset, pgoff_t last_index);
2030int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
2031 int for_del);
2032int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page); 2054int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
2033int btrfs_readpage(struct file *file, struct page *page); 2055int btrfs_readpage(struct file *file, struct page *page);
2034void btrfs_delete_inode(struct inode *inode); 2056void btrfs_delete_inode(struct inode *inode);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0a5d796c9f7e..6b5966aacf44 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -60,6 +60,10 @@ static int update_block_group(struct btrfs_trans_handle *trans,
60 u64 bytenr, u64 num_bytes, int alloc, 60 u64 bytenr, u64 num_bytes, int alloc,
61 int mark_free); 61 int mark_free);
62 62
63static int do_chunk_alloc(struct btrfs_trans_handle *trans,
64 struct btrfs_root *extent_root, u64 alloc_bytes,
65 u64 flags, int force);
66
63static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 67static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
64{ 68{
65 return (cache->flags & bits) == bits; 69 return (cache->flags & bits) == bits;
@@ -1909,6 +1913,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1909 found->bytes_pinned = 0; 1913 found->bytes_pinned = 0;
1910 found->bytes_reserved = 0; 1914 found->bytes_reserved = 0;
1911 found->bytes_readonly = 0; 1915 found->bytes_readonly = 0;
1916 found->bytes_delalloc = 0;
1912 found->full = 0; 1917 found->full = 0;
1913 found->force_alloc = 0; 1918 found->force_alloc = 0;
1914 *space_info = found; 1919 *space_info = found;
@@ -1972,6 +1977,233 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1972 return flags; 1977 return flags;
1973} 1978}
1974 1979
1980static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
1981{
1982 struct btrfs_fs_info *info = root->fs_info;
1983 u64 alloc_profile;
1984
1985 if (data) {
1986 alloc_profile = info->avail_data_alloc_bits &
1987 info->data_alloc_profile;
1988 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
1989 } else if (root == root->fs_info->chunk_root) {
1990 alloc_profile = info->avail_system_alloc_bits &
1991 info->system_alloc_profile;
1992 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
1993 } else {
1994 alloc_profile = info->avail_metadata_alloc_bits &
1995 info->metadata_alloc_profile;
1996 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
1997 }
1998
1999 return btrfs_reduce_alloc_profile(root, data);
2000}
2001
2002void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2003{
2004 u64 alloc_target;
2005
2006 alloc_target = btrfs_get_alloc_profile(root, 1);
2007 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2008 alloc_target);
2009}
2010
2011/*
2012 * for now this just makes sure we have at least 5% of our metadata space free
2013 * for use.
2014 */
2015int btrfs_check_metadata_free_space(struct btrfs_root *root)
2016{
2017 struct btrfs_fs_info *info = root->fs_info;
2018 struct btrfs_space_info *meta_sinfo;
2019 u64 alloc_target, thresh;
2020 int committed = 0, ret;
2021
2022 /* get the space info for where the metadata will live */
2023 alloc_target = btrfs_get_alloc_profile(root, 0);
2024 meta_sinfo = __find_space_info(info, alloc_target);
2025
2026again:
2027 spin_lock(&meta_sinfo->lock);
2028 if (!meta_sinfo->full)
2029 thresh = meta_sinfo->total_bytes * 80;
2030 else
2031 thresh = meta_sinfo->total_bytes * 95;
2032
2033 do_div(thresh, 100);
2034
2035 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2036 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2037 struct btrfs_trans_handle *trans;
2038 if (!meta_sinfo->full) {
2039 meta_sinfo->force_alloc = 1;
2040 spin_unlock(&meta_sinfo->lock);
2041
2042 trans = btrfs_start_transaction(root, 1);
2043 if (!trans)
2044 return -ENOMEM;
2045
2046 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2047 2 * 1024 * 1024, alloc_target, 0);
2048 btrfs_end_transaction(trans, root);
2049 goto again;
2050 }
2051 spin_unlock(&meta_sinfo->lock);
2052
2053 if (!committed) {
2054 committed = 1;
2055 trans = btrfs_join_transaction(root, 1);
2056 if (!trans)
2057 return -ENOMEM;
2058 ret = btrfs_commit_transaction(trans, root);
2059 if (ret)
2060 return ret;
2061 goto again;
2062 }
2063 return -ENOSPC;
2064 }
2065 spin_unlock(&meta_sinfo->lock);
2066
2067 return 0;
2068}
2069
2070/*
2071 * This will check the space that the inode allocates from to make sure we have
2072 * enough space for bytes.
2073 */
2074int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2075 u64 bytes)
2076{
2077 struct btrfs_space_info *data_sinfo;
2078 int ret = 0, committed = 0;
2079
2080 /* make sure bytes are sectorsize aligned */
2081 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2082
2083 data_sinfo = BTRFS_I(inode)->space_info;
2084again:
2085 /* make sure we have enough space to handle the data first */
2086 spin_lock(&data_sinfo->lock);
2087 if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2088 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2089 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2090 data_sinfo->bytes_may_use < bytes) {
2091 struct btrfs_trans_handle *trans;
2092
2093 /*
2094 * if we don't have enough free bytes in this space then we need
2095 * to alloc a new chunk.
2096 */
2097 if (!data_sinfo->full) {
2098 u64 alloc_target;
2099
2100 data_sinfo->force_alloc = 1;
2101 spin_unlock(&data_sinfo->lock);
2102
2103 alloc_target = btrfs_get_alloc_profile(root, 1);
2104 trans = btrfs_start_transaction(root, 1);
2105 if (!trans)
2106 return -ENOMEM;
2107
2108 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2109 bytes + 2 * 1024 * 1024,
2110 alloc_target, 0);
2111 btrfs_end_transaction(trans, root);
2112 if (ret)
2113 return ret;
2114 goto again;
2115 }
2116 spin_unlock(&data_sinfo->lock);
2117
2118 /* commit the current transaction and try again */
2119 if (!committed) {
2120 committed = 1;
2121 trans = btrfs_join_transaction(root, 1);
2122 if (!trans)
2123 return -ENOMEM;
2124 ret = btrfs_commit_transaction(trans, root);
2125 if (ret)
2126 return ret;
2127 goto again;
2128 }
2129
2130 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2131 ", %llu bytes_used, %llu bytes_reserved, "
2132 "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
2133 "%llu total\n", bytes, data_sinfo->bytes_delalloc,
2134 data_sinfo->bytes_used, data_sinfo->bytes_reserved,
2135 data_sinfo->bytes_pinned, data_sinfo->bytes_readonly,
2136 data_sinfo->bytes_may_use, data_sinfo->total_bytes);
2137 return -ENOSPC;
2138 }
2139 data_sinfo->bytes_may_use += bytes;
2140 BTRFS_I(inode)->reserved_bytes += bytes;
2141 spin_unlock(&data_sinfo->lock);
2142
2143 return btrfs_check_metadata_free_space(root);
2144}
2145
2146/*
2147 * if there was an error for whatever reason after calling
2148 * btrfs_check_data_free_space, call this so we can cleanup the counters.
2149 */
2150void btrfs_free_reserved_data_space(struct btrfs_root *root,
2151 struct inode *inode, u64 bytes)
2152{
2153 struct btrfs_space_info *data_sinfo;
2154
2155 /* make sure bytes are sectorsize aligned */
2156 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2157
2158 data_sinfo = BTRFS_I(inode)->space_info;
2159 spin_lock(&data_sinfo->lock);
2160 data_sinfo->bytes_may_use -= bytes;
2161 BTRFS_I(inode)->reserved_bytes -= bytes;
2162 spin_unlock(&data_sinfo->lock);
2163}
2164
2165/* called when we are adding a delalloc extent to the inode's io_tree */
2166void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2167 u64 bytes)
2168{
2169 struct btrfs_space_info *data_sinfo;
2170
2171 /* get the space info for where this inode will be storing its data */
2172 data_sinfo = BTRFS_I(inode)->space_info;
2173
2174 /* make sure we have enough space to handle the data first */
2175 spin_lock(&data_sinfo->lock);
2176 data_sinfo->bytes_delalloc += bytes;
2177
2178 /*
2179 * we are adding a delalloc extent without calling
2180 * btrfs_check_data_free_space first. This happens on a weird
2181 * writepage condition, but shouldn't hurt our accounting
2182 */
2183 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2184 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2185 BTRFS_I(inode)->reserved_bytes = 0;
2186 } else {
2187 data_sinfo->bytes_may_use -= bytes;
2188 BTRFS_I(inode)->reserved_bytes -= bytes;
2189 }
2190
2191 spin_unlock(&data_sinfo->lock);
2192}
2193
2194/* called when we are clearing an delalloc extent from the inode's io_tree */
2195void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2196 u64 bytes)
2197{
2198 struct btrfs_space_info *info;
2199
2200 info = BTRFS_I(inode)->space_info;
2201
2202 spin_lock(&info->lock);
2203 info->bytes_delalloc -= bytes;
2204 spin_unlock(&info->lock);
2205}
2206
1975static int do_chunk_alloc(struct btrfs_trans_handle *trans, 2207static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1976 struct btrfs_root *extent_root, u64 alloc_bytes, 2208 struct btrfs_root *extent_root, u64 alloc_bytes,
1977 u64 flags, int force) 2209 u64 flags, int force)
@@ -3105,6 +3337,10 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3105 (unsigned long long)(info->total_bytes - info->bytes_used - 3337 (unsigned long long)(info->total_bytes - info->bytes_used -
3106 info->bytes_pinned - info->bytes_reserved), 3338 info->bytes_pinned - info->bytes_reserved),
3107 (info->full) ? "" : "not "); 3339 (info->full) ? "" : "not ");
3340 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
3341 " may_use=%llu, used=%llu\n", info->total_bytes,
3342 info->bytes_pinned, info->bytes_delalloc, info->bytes_may_use,
3343 info->bytes_used);
3108 3344
3109 down_read(&info->groups_sem); 3345 down_read(&info->groups_sem);
3110 list_for_each_entry(cache, &info->block_groups, list) { 3346 list_for_each_entry(cache, &info->block_groups, list) {
@@ -3131,24 +3367,10 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3131{ 3367{
3132 int ret; 3368 int ret;
3133 u64 search_start = 0; 3369 u64 search_start = 0;
3134 u64 alloc_profile;
3135 struct btrfs_fs_info *info = root->fs_info; 3370 struct btrfs_fs_info *info = root->fs_info;
3136 3371
3137 if (data) { 3372 data = btrfs_get_alloc_profile(root, data);
3138 alloc_profile = info->avail_data_alloc_bits &
3139 info->data_alloc_profile;
3140 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3141 } else if (root == root->fs_info->chunk_root) {
3142 alloc_profile = info->avail_system_alloc_bits &
3143 info->system_alloc_profile;
3144 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3145 } else {
3146 alloc_profile = info->avail_metadata_alloc_bits &
3147 info->metadata_alloc_profile;
3148 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3149 }
3150again: 3373again:
3151 data = btrfs_reduce_alloc_profile(root, data);
3152 /* 3374 /*
3153 * the only place that sets empty_size is btrfs_realloc_node, which 3375 * the only place that sets empty_size is btrfs_realloc_node, which
3154 * is not called recursively on allocations 3376 * is not called recursively on allocations
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 872f104576e5..dc78954861b3 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1091,19 +1091,24 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1091 WARN_ON(num_pages > nrptrs); 1091 WARN_ON(num_pages > nrptrs);
1092 memset(pages, 0, sizeof(struct page *) * nrptrs); 1092 memset(pages, 0, sizeof(struct page *) * nrptrs);
1093 1093
1094 ret = btrfs_check_free_space(root, write_bytes, 0); 1094 ret = btrfs_check_data_free_space(root, inode, write_bytes);
1095 if (ret) 1095 if (ret)
1096 goto out; 1096 goto out;
1097 1097
1098 ret = prepare_pages(root, file, pages, num_pages, 1098 ret = prepare_pages(root, file, pages, num_pages,
1099 pos, first_index, last_index, 1099 pos, first_index, last_index,
1100 write_bytes); 1100 write_bytes);
1101 if (ret) 1101 if (ret) {
1102 btrfs_free_reserved_data_space(root, inode,
1103 write_bytes);
1102 goto out; 1104 goto out;
1105 }
1103 1106
1104 ret = btrfs_copy_from_user(pos, num_pages, 1107 ret = btrfs_copy_from_user(pos, num_pages,
1105 write_bytes, pages, buf); 1108 write_bytes, pages, buf);
1106 if (ret) { 1109 if (ret) {
1110 btrfs_free_reserved_data_space(root, inode,
1111 write_bytes);
1107 btrfs_drop_pages(pages, num_pages); 1112 btrfs_drop_pages(pages, num_pages);
1108 goto out; 1113 goto out;
1109 } 1114 }
@@ -1111,8 +1116,11 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1111 ret = dirty_and_release_pages(NULL, root, file, pages, 1116 ret = dirty_and_release_pages(NULL, root, file, pages,
1112 num_pages, pos, write_bytes); 1117 num_pages, pos, write_bytes);
1113 btrfs_drop_pages(pages, num_pages); 1118 btrfs_drop_pages(pages, num_pages);
1114 if (ret) 1119 if (ret) {
1120 btrfs_free_reserved_data_space(root, inode,
1121 write_bytes);
1115 goto out; 1122 goto out;
1123 }
1116 1124
1117 if (will_write) { 1125 if (will_write) {
1118 btrfs_fdatawrite_range(inode->i_mapping, pos, 1126 btrfs_fdatawrite_range(inode->i_mapping, pos,
@@ -1136,6 +1144,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1136 } 1144 }
1137out: 1145out:
1138 mutex_unlock(&inode->i_mutex); 1146 mutex_unlock(&inode->i_mutex);
1147 if (ret)
1148 err = ret;
1139 1149
1140out_nolock: 1150out_nolock:
1141 kfree(pages); 1151 kfree(pages);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3cee77ae03c8..7d4f948bc22a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -102,34 +102,6 @@ static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
102} 102}
103 103
104/* 104/*
105 * a very lame attempt at stopping writes when the FS is 85% full. There
106 * are countless ways this is incorrect, but it is better than nothing.
107 */
108int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
109 int for_del)
110{
111 u64 total;
112 u64 used;
113 u64 thresh;
114 int ret = 0;
115
116 spin_lock(&root->fs_info->delalloc_lock);
117 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
118 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
119 if (for_del)
120 thresh = total * 90;
121 else
122 thresh = total * 85;
123
124 do_div(thresh, 100);
125
126 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
127 ret = -ENOSPC;
128 spin_unlock(&root->fs_info->delalloc_lock);
129 return ret;
130}
131
132/*
133 * this does all the hard work for inserting an inline extent into 105 * this does all the hard work for inserting an inline extent into
134 * the btree. The caller should have done a btrfs_drop_extents so that 106 * the btree. The caller should have done a btrfs_drop_extents so that
135 * no overlapping inline items exist in the btree 107 * no overlapping inline items exist in the btree
@@ -1190,6 +1162,7 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1190 */ 1162 */
1191 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1163 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1192 struct btrfs_root *root = BTRFS_I(inode)->root; 1164 struct btrfs_root *root = BTRFS_I(inode)->root;
1165 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1193 spin_lock(&root->fs_info->delalloc_lock); 1166 spin_lock(&root->fs_info->delalloc_lock);
1194 BTRFS_I(inode)->delalloc_bytes += end - start + 1; 1167 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1195 root->fs_info->delalloc_bytes += end - start + 1; 1168 root->fs_info->delalloc_bytes += end - start + 1;
@@ -1223,9 +1196,12 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1223 (unsigned long long)end - start + 1, 1196 (unsigned long long)end - start + 1,
1224 (unsigned long long) 1197 (unsigned long long)
1225 root->fs_info->delalloc_bytes); 1198 root->fs_info->delalloc_bytes);
1199 btrfs_delalloc_free_space(root, inode, (u64)-1);
1226 root->fs_info->delalloc_bytes = 0; 1200 root->fs_info->delalloc_bytes = 0;
1227 BTRFS_I(inode)->delalloc_bytes = 0; 1201 BTRFS_I(inode)->delalloc_bytes = 0;
1228 } else { 1202 } else {
1203 btrfs_delalloc_free_space(root, inode,
1204 end - start + 1);
1229 root->fs_info->delalloc_bytes -= end - start + 1; 1205 root->fs_info->delalloc_bytes -= end - start + 1;
1230 BTRFS_I(inode)->delalloc_bytes -= end - start + 1; 1206 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1231 } 1207 }
@@ -2245,10 +2221,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2245 2221
2246 root = BTRFS_I(dir)->root; 2222 root = BTRFS_I(dir)->root;
2247 2223
2248 ret = btrfs_check_free_space(root, 1, 1);
2249 if (ret)
2250 goto fail;
2251
2252 trans = btrfs_start_transaction(root, 1); 2224 trans = btrfs_start_transaction(root, 1);
2253 2225
2254 btrfs_set_trans_block_group(trans, dir); 2226 btrfs_set_trans_block_group(trans, dir);
@@ -2261,7 +2233,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2261 nr = trans->blocks_used; 2233 nr = trans->blocks_used;
2262 2234
2263 btrfs_end_transaction_throttle(trans, root); 2235 btrfs_end_transaction_throttle(trans, root);
2264fail:
2265 btrfs_btree_balance_dirty(root, nr); 2236 btrfs_btree_balance_dirty(root, nr);
2266 return ret; 2237 return ret;
2267} 2238}
@@ -2284,10 +2255,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2284 return -ENOTEMPTY; 2255 return -ENOTEMPTY;
2285 } 2256 }
2286 2257
2287 ret = btrfs_check_free_space(root, 1, 1);
2288 if (ret)
2289 goto fail;
2290
2291 trans = btrfs_start_transaction(root, 1); 2258 trans = btrfs_start_transaction(root, 1);
2292 btrfs_set_trans_block_group(trans, dir); 2259 btrfs_set_trans_block_group(trans, dir);
2293 2260
@@ -2304,7 +2271,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2304fail_trans: 2271fail_trans:
2305 nr = trans->blocks_used; 2272 nr = trans->blocks_used;
2306 ret = btrfs_end_transaction_throttle(trans, root); 2273 ret = btrfs_end_transaction_throttle(trans, root);
2307fail:
2308 btrfs_btree_balance_dirty(root, nr); 2274 btrfs_btree_balance_dirty(root, nr);
2309 2275
2310 if (ret && !err) 2276 if (ret && !err)
@@ -2818,7 +2784,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
2818 if (size <= hole_start) 2784 if (size <= hole_start)
2819 return 0; 2785 return 0;
2820 2786
2821 err = btrfs_check_free_space(root, 1, 0); 2787 err = btrfs_check_metadata_free_space(root);
2822 if (err) 2788 if (err)
2823 return err; 2789 return err;
2824 2790
@@ -3014,6 +2980,7 @@ static noinline void init_btrfs_i(struct inode *inode)
3014 bi->last_trans = 0; 2980 bi->last_trans = 0;
3015 bi->logged_trans = 0; 2981 bi->logged_trans = 0;
3016 bi->delalloc_bytes = 0; 2982 bi->delalloc_bytes = 0;
2983 bi->reserved_bytes = 0;
3017 bi->disk_i_size = 0; 2984 bi->disk_i_size = 0;
3018 bi->flags = 0; 2985 bi->flags = 0;
3019 bi->index_cnt = (u64)-1; 2986 bi->index_cnt = (u64)-1;
@@ -3035,6 +3002,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
3035 inode->i_ino = args->ino; 3002 inode->i_ino = args->ino;
3036 init_btrfs_i(inode); 3003 init_btrfs_i(inode);
3037 BTRFS_I(inode)->root = args->root; 3004 BTRFS_I(inode)->root = args->root;
3005 btrfs_set_inode_space_info(args->root, inode);
3038 return 0; 3006 return 0;
3039} 3007}
3040 3008
@@ -3455,6 +3423,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3455 BTRFS_I(inode)->index_cnt = 2; 3423 BTRFS_I(inode)->index_cnt = 2;
3456 BTRFS_I(inode)->root = root; 3424 BTRFS_I(inode)->root = root;
3457 BTRFS_I(inode)->generation = trans->transid; 3425 BTRFS_I(inode)->generation = trans->transid;
3426 btrfs_set_inode_space_info(root, inode);
3458 3427
3459 if (mode & S_IFDIR) 3428 if (mode & S_IFDIR)
3460 owner = 0; 3429 owner = 0;
@@ -3602,7 +3571,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3602 if (!new_valid_dev(rdev)) 3571 if (!new_valid_dev(rdev))
3603 return -EINVAL; 3572 return -EINVAL;
3604 3573
3605 err = btrfs_check_free_space(root, 1, 0); 3574 err = btrfs_check_metadata_free_space(root);
3606 if (err) 3575 if (err)
3607 goto fail; 3576 goto fail;
3608 3577
@@ -3665,7 +3634,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
3665 u64 objectid; 3634 u64 objectid;
3666 u64 index = 0; 3635 u64 index = 0;
3667 3636
3668 err = btrfs_check_free_space(root, 1, 0); 3637 err = btrfs_check_metadata_free_space(root);
3669 if (err) 3638 if (err)
3670 goto fail; 3639 goto fail;
3671 trans = btrfs_start_transaction(root, 1); 3640 trans = btrfs_start_transaction(root, 1);
@@ -3733,7 +3702,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3733 return -ENOENT; 3702 return -ENOENT;
3734 3703
3735 btrfs_inc_nlink(inode); 3704 btrfs_inc_nlink(inode);
3736 err = btrfs_check_free_space(root, 1, 0); 3705 err = btrfs_check_metadata_free_space(root);
3737 if (err) 3706 if (err)
3738 goto fail; 3707 goto fail;
3739 err = btrfs_set_inode_index(dir, &index); 3708 err = btrfs_set_inode_index(dir, &index);
@@ -3779,7 +3748,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3779 u64 index = 0; 3748 u64 index = 0;
3780 unsigned long nr = 1; 3749 unsigned long nr = 1;
3781 3750
3782 err = btrfs_check_free_space(root, 1, 0); 3751 err = btrfs_check_metadata_free_space(root);
3783 if (err) 3752 if (err)
3784 goto out_unlock; 3753 goto out_unlock;
3785 3754
@@ -4336,7 +4305,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4336 u64 page_start; 4305 u64 page_start;
4337 u64 page_end; 4306 u64 page_end;
4338 4307
4339 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0); 4308 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4340 if (ret) 4309 if (ret)
4341 goto out; 4310 goto out;
4342 4311
@@ -4349,6 +4318,7 @@ again:
4349 4318
4350 if ((page->mapping != inode->i_mapping) || 4319 if ((page->mapping != inode->i_mapping) ||
4351 (page_start >= size)) { 4320 (page_start >= size)) {
4321 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4352 /* page got truncated out from underneath us */ 4322 /* page got truncated out from underneath us */
4353 goto out_unlock; 4323 goto out_unlock;
4354 } 4324 }
@@ -4631,7 +4601,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4631 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 4601 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4632 return -EXDEV; 4602 return -EXDEV;
4633 4603
4634 ret = btrfs_check_free_space(root, 1, 0); 4604 ret = btrfs_check_metadata_free_space(root);
4635 if (ret) 4605 if (ret)
4636 goto out_unlock; 4606 goto out_unlock;
4637 4607
@@ -4749,7 +4719,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4749 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 4719 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4750 return -ENAMETOOLONG; 4720 return -ENAMETOOLONG;
4751 4721
4752 err = btrfs_check_free_space(root, 1, 0); 4722 err = btrfs_check_metadata_free_space(root);
4753 if (err) 4723 if (err)
4754 goto out_fail; 4724 goto out_fail;
4755 4725
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 988fdc8b49eb..bca729fc80c8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -70,7 +70,7 @@ static noinline int create_subvol(struct btrfs_root *root,
70 u64 index = 0; 70 u64 index = 0;
71 unsigned long nr = 1; 71 unsigned long nr = 1;
72 72
73 ret = btrfs_check_free_space(root, 1, 0); 73 ret = btrfs_check_metadata_free_space(root);
74 if (ret) 74 if (ret)
75 goto fail_commit; 75 goto fail_commit;
76 76
@@ -203,7 +203,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
203 if (!root->ref_cows) 203 if (!root->ref_cows)
204 return -EINVAL; 204 return -EINVAL;
205 205
206 ret = btrfs_check_free_space(root, 1, 0); 206 ret = btrfs_check_metadata_free_space(root);
207 if (ret) 207 if (ret)
208 goto fail_unlock; 208 goto fail_unlock;
209 209
@@ -374,7 +374,7 @@ static int btrfs_defrag_file(struct file *file)
374 unsigned long i; 374 unsigned long i;
375 int ret; 375 int ret;
376 376
377 ret = btrfs_check_free_space(root, inode->i_size, 0); 377 ret = btrfs_check_data_free_space(root, inode, inode->i_size);
378 if (ret) 378 if (ret)
379 return -ENOSPC; 379 return -ENOSPC;
380 380
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 39bd4d38e889..45e59d3c7f1f 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1913,6 +1913,9 @@ COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
1913/* 0x00 */ 1913/* 0x00 */
1914COMPATIBLE_IOCTL(FIBMAP) 1914COMPATIBLE_IOCTL(FIBMAP)
1915COMPATIBLE_IOCTL(FIGETBSZ) 1915COMPATIBLE_IOCTL(FIGETBSZ)
1916/* 'X' - originally XFS but some now in the VFS */
1917COMPATIBLE_IOCTL(FIFREEZE)
1918COMPATIBLE_IOCTL(FITHAW)
1916/* RAID */ 1919/* RAID */
1917COMPATIBLE_IOCTL(RAID_VERSION) 1920COMPATIBLE_IOCTL(RAID_VERSION)
1918COMPATIBLE_IOCTL(GET_ARRAY_INFO) 1921COMPATIBLE_IOCTL(GET_ARRAY_INFO)
diff --git a/fs/dcache.c b/fs/dcache.c
index 937df0fb0da5..07e2d4a44bda 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1180,7 +1180,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1180 iput(inode); 1180 iput(inode);
1181 return res; 1181 return res;
1182} 1182}
1183EXPORT_SYMBOL_GPL(d_obtain_alias); 1183EXPORT_SYMBOL(d_obtain_alias);
1184 1184
1185/** 1185/**
1186 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1186 * d_splice_alias - splice a disconnected dentry into the tree if one exists
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 9a50b8052dcf..de9459b4cb94 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -609,7 +609,9 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
609 */ 609 */
610int ext4_should_retry_alloc(struct super_block *sb, int *retries) 610int ext4_should_retry_alloc(struct super_block *sb, int *retries)
611{ 611{
612 if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || (*retries)++ > 3) 612 if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||
613 (*retries)++ > 3 ||
614 !EXT4_SB(sb)->s_journal)
613 return 0; 615 return 0;
614 616
615 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 617 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 4fb86a0061d0..f18a919be70b 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -715,6 +715,13 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
715 715
716 if (sbi->s_log_groups_per_flex) { 716 if (sbi->s_log_groups_per_flex) {
717 ret2 = find_group_flex(sb, dir, &group); 717 ret2 = find_group_flex(sb, dir, &group);
718 if (ret2 == -1) {
719 ret2 = find_group_other(sb, dir, &group);
720 if (ret2 == 0 && printk_ratelimit())
721 printk(KERN_NOTICE "ext4: find_group_flex "
722 "failed, fallback succeeded dir %lu\n",
723 dir->i_ino);
724 }
718 goto got_group; 725 goto got_group;
719 } 726 }
720 727
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbd2ca99d113..c7fed5b18745 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1368,6 +1368,10 @@ retry:
1368 goto out; 1368 goto out;
1369 } 1369 }
1370 1370
1371 /* We cannot recurse into the filesystem as the transaction is already
1372 * started */
1373 flags |= AOP_FLAG_NOFS;
1374
1371 page = grab_cache_page_write_begin(mapping, index, flags); 1375 page = grab_cache_page_write_begin(mapping, index, flags);
1372 if (!page) { 1376 if (!page) {
1373 ext4_journal_stop(handle); 1377 ext4_journal_stop(handle);
@@ -1377,7 +1381,7 @@ retry:
1377 *pagep = page; 1381 *pagep = page;
1378 1382
1379 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1383 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1380 ext4_get_block); 1384 ext4_get_block);
1381 1385
1382 if (!ret && ext4_should_journal_data(inode)) { 1386 if (!ret && ext4_should_journal_data(inode)) {
1383 ret = walk_page_buffers(handle, page_buffers(page), 1387 ret = walk_page_buffers(handle, page_buffers(page),
@@ -2540,7 +2544,7 @@ retry:
2540 2544
2541 ext4_journal_stop(handle); 2545 ext4_journal_stop(handle);
2542 2546
2543 if (mpd.retval == -ENOSPC) { 2547 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2544 /* commit the transaction which would 2548 /* commit the transaction which would
2545 * free blocks released in the transaction 2549 * free blocks released in the transaction
2546 * and try again 2550 * and try again
@@ -2667,6 +2671,9 @@ retry:
2667 ret = PTR_ERR(handle); 2671 ret = PTR_ERR(handle);
2668 goto out; 2672 goto out;
2669 } 2673 }
2674 /* We cannot recurse into the filesystem as the transaction is already
2675 * started */
2676 flags |= AOP_FLAG_NOFS;
2670 2677
2671 page = grab_cache_page_write_begin(mapping, index, flags); 2678 page = grab_cache_page_write_begin(mapping, index, flags);
2672 if (!page) { 2679 if (!page) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a5732c58f676..39d1993cfa13 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3091,7 +3091,6 @@ static int ext4_freeze(struct super_block *sb)
3091 3091
3092 /* Journal blocked and flushed, clear needs_recovery flag. */ 3092 /* Journal blocked and flushed, clear needs_recovery flag. */
3093 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3093 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3094 ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
3095 error = ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1); 3094 error = ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
3096 if (error) 3095 if (error)
3097 goto out; 3096 goto out;
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 3cceef4ad2b7..e9580104b6ba 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -95,13 +95,17 @@ static int jffs2_garbage_collect_thread(void *_c)
95 spin_unlock(&c->erase_completion_lock); 95 spin_unlock(&c->erase_completion_lock);
96 96
97 97
98 /* This thread is purely an optimisation. But if it runs when 98 /* Problem - immediately after bootup, the GCD spends a lot
99 other things could be running, it actually makes things a 99 * of time in places like jffs2_kill_fragtree(); so much so
100 lot worse. Use yield() and put it at the back of the runqueue 100 * that userspace processes (like gdm and X) are starved
101 every time. Especially during boot, pulling an inode in 101 * despite plenty of cond_resched()s and renicing. Yield()
102 with read_inode() is much preferable to having the GC thread 102 * doesn't help, either (presumably because userspace and GCD
103 get there first. */ 103 * are generally competing for a higher latency resource -
104 yield(); 104 * disk).
105 * This forces the GCD to slow the hell down. Pulling an
106 * inode in with read_inode() is much preferable to having
107 * the GC thread get there first. */
108 schedule_timeout_interruptible(msecs_to_jiffies(50));
105 109
106 /* Put_super will send a SIGKILL and then wait on the sem. 110 /* Put_super will send a SIGKILL and then wait on the sem.
107 */ 111 */
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 6ca08ad887c0..1fc1e92356ee 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -220,7 +220,7 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
220 struct jffs2_tmp_dnode_info *tn) 220 struct jffs2_tmp_dnode_info *tn)
221{ 221{
222 uint32_t fn_end = tn->fn->ofs + tn->fn->size; 222 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
223 struct jffs2_tmp_dnode_info *this; 223 struct jffs2_tmp_dnode_info *this, *ptn;
224 224
225 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); 225 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
226 226
@@ -251,11 +251,18 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
251 if (this) { 251 if (this) {
252 /* If the node is coincident with another at a lower address, 252 /* If the node is coincident with another at a lower address,
253 back up until the other node is found. It may be relevant */ 253 back up until the other node is found. It may be relevant */
254 while (this->overlapped) 254 while (this->overlapped) {
255 this = tn_prev(this); 255 ptn = tn_prev(this);
256 256 if (!ptn) {
257 /* First node should never be marked overlapped */ 257 /*
258 BUG_ON(!this); 258 * We killed a node which set the overlapped
259 * flags during the scan. Fix it up.
260 */
261 this->overlapped = 0;
262 break;
263 }
264 this = ptn;
265 }
259 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); 266 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
260 } 267 }
261 268
@@ -360,7 +367,17 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
360 } 367 }
361 if (!this->overlapped) 368 if (!this->overlapped)
362 break; 369 break;
363 this = tn_prev(this); 370
371 ptn = tn_prev(this);
372 if (!ptn) {
373 /*
374 * We killed a node which set the overlapped
375 * flags during the scan. Fix it up.
376 */
377 this->overlapped = 0;
378 break;
379 }
380 this = ptn;
364 } 381 }
365 } 382 }
366 383
@@ -456,8 +473,15 @@ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
456 eat_last(&rii->tn_root, &last->rb); 473 eat_last(&rii->tn_root, &last->rb);
457 ver_insert(&ver_root, last); 474 ver_insert(&ver_root, last);
458 475
459 if (unlikely(last->overlapped)) 476 if (unlikely(last->overlapped)) {
460 continue; 477 if (pen)
478 continue;
479 /*
480 * We killed a node which set the overlapped
481 * flags during the scan. Fix it up.
482 */
483 last->overlapped = 0;
484 }
461 485
462 /* Now we have a bunch of nodes in reverse version 486 /* Now we have a bunch of nodes in reverse version
463 order, in the tree at ver_root. Most of the time, 487 order, in the tree at ver_root. Most of the time,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 60fe74035db5..3a9e5deed74d 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4796,6 +4796,29 @@ out:
4796 return ret; 4796 return ret;
4797} 4797}
4798 4798
4799static int ocfs2_replace_extent_rec(struct inode *inode,
4800 handle_t *handle,
4801 struct ocfs2_path *path,
4802 struct ocfs2_extent_list *el,
4803 int split_index,
4804 struct ocfs2_extent_rec *split_rec)
4805{
4806 int ret;
4807
4808 ret = ocfs2_path_bh_journal_access(handle, inode, path,
4809 path_num_items(path) - 1);
4810 if (ret) {
4811 mlog_errno(ret);
4812 goto out;
4813 }
4814
4815 el->l_recs[split_index] = *split_rec;
4816
4817 ocfs2_journal_dirty(handle, path_leaf_bh(path));
4818out:
4819 return ret;
4820}
4821
4799/* 4822/*
4800 * Mark part or all of the extent record at split_index in the leaf 4823 * Mark part or all of the extent record at split_index in the leaf
4801 * pointed to by path as written. This removes the unwritten 4824 * pointed to by path as written. This removes the unwritten
@@ -4885,7 +4908,9 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
4885 4908
4886 if (ctxt.c_contig_type == CONTIG_NONE) { 4909 if (ctxt.c_contig_type == CONTIG_NONE) {
4887 if (ctxt.c_split_covers_rec) 4910 if (ctxt.c_split_covers_rec)
4888 el->l_recs[split_index] = *split_rec; 4911 ret = ocfs2_replace_extent_rec(inode, handle,
4912 path, el,
4913 split_index, split_rec);
4889 else 4914 else
4890 ret = ocfs2_split_and_insert(inode, handle, path, et, 4915 ret = ocfs2_split_and_insert(inode, handle, path, et,
4891 &last_eb_bh, split_index, 4916 &last_eb_bh, split_index,
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 54e182a27caf..0a2813947853 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1849,12 +1849,12 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1849 if (!mle) { 1849 if (!mle) {
1850 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1850 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1851 res->owner != assert->node_idx) { 1851 res->owner != assert->node_idx) {
1852 mlog(ML_ERROR, "assert_master from " 1852 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1853 "%u, but current owner is " 1853 "but current owner is %u! (%.*s)\n",
1854 "%u! (%.*s)\n", 1854 assert->node_idx, res->owner, namelen,
1855 assert->node_idx, res->owner, 1855 name);
1856 namelen, name); 1856 __dlm_print_one_lock_resource(res);
1857 goto kill; 1857 BUG();
1858 } 1858 }
1859 } else if (mle->type != DLM_MLE_MIGRATION) { 1859 } else if (mle->type != DLM_MLE_MIGRATION) {
1860 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1860 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index d1295203029f..4060bb328bc8 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -181,8 +181,7 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
181 181
182 spin_lock(&res->spinlock); 182 spin_lock(&res->spinlock);
183 /* This ensures that clear refmap is sent after the set */ 183 /* This ensures that clear refmap is sent after the set */
184 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_SETREF_INPROG | 184 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
185 DLM_LOCK_RES_MIGRATING));
186 spin_unlock(&res->spinlock); 185 spin_unlock(&res->spinlock);
187 186
188 /* clear our bit from the master's refmap, ignore errors */ 187 /* clear our bit from the master's refmap, ignore errors */
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 86ca085ef324..fcf879ed6930 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -117,11 +117,11 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
117 else 117 else
118 BUG_ON(res->owner == dlm->node_num); 118 BUG_ON(res->owner == dlm->node_num);
119 119
120 spin_lock(&dlm->spinlock); 120 spin_lock(&dlm->ast_lock);
121 /* We want to be sure that we're not freeing a lock 121 /* We want to be sure that we're not freeing a lock
122 * that still has AST's pending... */ 122 * that still has AST's pending... */
123 in_use = !list_empty(&lock->ast_list); 123 in_use = !list_empty(&lock->ast_list);
124 spin_unlock(&dlm->spinlock); 124 spin_unlock(&dlm->ast_lock);
125 if (in_use) { 125 if (in_use) {
126 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " 126 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
127 "while waiting for an ast!", res->lockname.len, 127 "while waiting for an ast!", res->lockname.len,
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 206a2370876a..7219a86d34cc 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -320,9 +320,14 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
320 struct ocfs2_lock_res *lockres); 320 struct ocfs2_lock_res *lockres);
321static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, 321static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
322 int convert); 322 int convert);
323#define ocfs2_log_dlm_error(_func, _err, _lockres) do { \ 323#define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
324 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \ 324 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
325 _err, _func, _lockres->l_name); \ 325 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
326 _err, _func, _lockres->l_name); \
327 else \
328 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
329 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
330 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
326} while (0) 331} while (0)
327static int ocfs2_downconvert_thread(void *arg); 332static int ocfs2_downconvert_thread(void *arg);
328static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, 333static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 077384135f4e..946d3c34b90b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -341,6 +341,9 @@ struct ocfs2_super
341 struct ocfs2_node_map osb_recovering_orphan_dirs; 341 struct ocfs2_node_map osb_recovering_orphan_dirs;
342 unsigned int *osb_orphan_wipes; 342 unsigned int *osb_orphan_wipes;
343 wait_queue_head_t osb_wipe_event; 343 wait_queue_head_t osb_wipe_event;
344
345 /* used to protect metaecc calculation check of xattr. */
346 spinlock_t osb_xattr_lock;
344}; 347};
345 348
346#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) 349#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index b1cb38fbe807..7ac83a81ee55 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1537,6 +1537,13 @@ static int ocfs2_get_sector(struct super_block *sb,
1537 unlock_buffer(*bh); 1537 unlock_buffer(*bh);
1538 ll_rw_block(READ, 1, bh); 1538 ll_rw_block(READ, 1, bh);
1539 wait_on_buffer(*bh); 1539 wait_on_buffer(*bh);
1540 if (!buffer_uptodate(*bh)) {
1541 mlog_errno(-EIO);
1542 brelse(*bh);
1543 *bh = NULL;
1544 return -EIO;
1545 }
1546
1540 return 0; 1547 return 0;
1541} 1548}
1542 1549
@@ -1747,6 +1754,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1747 INIT_LIST_HEAD(&osb->blocked_lock_list); 1754 INIT_LIST_HEAD(&osb->blocked_lock_list);
1748 osb->blocked_lock_count = 0; 1755 osb->blocked_lock_count = 0;
1749 spin_lock_init(&osb->osb_lock); 1756 spin_lock_init(&osb->osb_lock);
1757 spin_lock_init(&osb->osb_xattr_lock);
1750 ocfs2_init_inode_steal_slot(osb); 1758 ocfs2_init_inode_steal_slot(osb);
1751 1759
1752 atomic_set(&osb->alloc_stats.moves, 0); 1760 atomic_set(&osb->alloc_stats.moves, 0);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 915039fffe6e..4ddd788add67 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -82,13 +82,14 @@ struct ocfs2_xattr_set_ctxt {
82 82
83#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) 83#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
84#define OCFS2_XATTR_INLINE_SIZE 80 84#define OCFS2_XATTR_INLINE_SIZE 80
85#define OCFS2_XATTR_HEADER_GAP 4
85#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \ 86#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
86 - sizeof(struct ocfs2_xattr_header) \ 87 - sizeof(struct ocfs2_xattr_header) \
87 - sizeof(__u32)) 88 - OCFS2_XATTR_HEADER_GAP)
88#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \ 89#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
89 - sizeof(struct ocfs2_xattr_block) \ 90 - sizeof(struct ocfs2_xattr_block) \
90 - sizeof(struct ocfs2_xattr_header) \ 91 - sizeof(struct ocfs2_xattr_header) \
91 - sizeof(__u32)) 92 - OCFS2_XATTR_HEADER_GAP)
92 93
93static struct ocfs2_xattr_def_value_root def_xv = { 94static struct ocfs2_xattr_def_value_root def_xv = {
94 .xv.xr_list.l_count = cpu_to_le16(1), 95 .xv.xr_list.l_count = cpu_to_le16(1),
@@ -274,10 +275,12 @@ static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
274 bucket->bu_blocks, bucket->bu_bhs, 0, 275 bucket->bu_blocks, bucket->bu_bhs, 0,
275 NULL); 276 NULL);
276 if (!rc) { 277 if (!rc) {
278 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
277 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb, 279 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
278 bucket->bu_bhs, 280 bucket->bu_bhs,
279 bucket->bu_blocks, 281 bucket->bu_blocks,
280 &bucket_xh(bucket)->xh_check); 282 &bucket_xh(bucket)->xh_check);
283 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
281 if (rc) 284 if (rc)
282 mlog_errno(rc); 285 mlog_errno(rc);
283 } 286 }
@@ -310,9 +313,11 @@ static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
310{ 313{
311 int i; 314 int i;
312 315
316 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
313 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb, 317 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
314 bucket->bu_bhs, bucket->bu_blocks, 318 bucket->bu_bhs, bucket->bu_blocks,
315 &bucket_xh(bucket)->xh_check); 319 &bucket_xh(bucket)->xh_check);
320 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
316 321
317 for (i = 0; i < bucket->bu_blocks; i++) 322 for (i = 0; i < bucket->bu_blocks; i++)
318 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); 323 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
@@ -1507,7 +1512,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode,
1507 last += 1; 1512 last += 1;
1508 } 1513 }
1509 1514
1510 free = min_offs - ((void *)last - xs->base) - sizeof(__u32); 1515 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
1511 if (free < 0) 1516 if (free < 0)
1512 return -EIO; 1517 return -EIO;
1513 1518
@@ -2190,7 +2195,7 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
2190 last += 1; 2195 last += 1;
2191 } 2196 }
2192 2197
2193 free = min_offs - ((void *)last - xs->base) - sizeof(__u32); 2198 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
2194 if (free < 0) 2199 if (free < 0)
2195 return 0; 2200 return 0;
2196 2201
@@ -2592,8 +2597,9 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
2592 2597
2593 if (!ret) { 2598 if (!ret) {
2594 /* Update inode ctime. */ 2599 /* Update inode ctime. */
2595 ret = ocfs2_journal_access(ctxt->handle, inode, xis->inode_bh, 2600 ret = ocfs2_journal_access_di(ctxt->handle, inode,
2596 OCFS2_JOURNAL_ACCESS_WRITE); 2601 xis->inode_bh,
2602 OCFS2_JOURNAL_ACCESS_WRITE);
2597 if (ret) { 2603 if (ret) {
2598 mlog_errno(ret); 2604 mlog_errno(ret);
2599 goto out; 2605 goto out;
@@ -5060,8 +5066,8 @@ try_again:
5060 xh_free_start = le16_to_cpu(xh->xh_free_start); 5066 xh_free_start = le16_to_cpu(xh->xh_free_start);
5061 header_size = sizeof(struct ocfs2_xattr_header) + 5067 header_size = sizeof(struct ocfs2_xattr_header) +
5062 count * sizeof(struct ocfs2_xattr_entry); 5068 count * sizeof(struct ocfs2_xattr_entry);
5063 max_free = OCFS2_XATTR_BUCKET_SIZE - 5069 max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
5064 le16_to_cpu(xh->xh_name_value_len) - header_size; 5070 le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
5065 5071
5066 mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " 5072 mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
5067 "of %u which exceed block size\n", 5073 "of %u which exceed block size\n",
@@ -5094,7 +5100,7 @@ try_again:
5094 need = 0; 5100 need = 0;
5095 } 5101 }
5096 5102
5097 free = xh_free_start - header_size; 5103 free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
5098 /* 5104 /*
5099 * We need to make sure the new name/value pair 5105 * We need to make sure the new name/value pair
5100 * can exist in the same block. 5106 * can exist in the same block.
@@ -5127,7 +5133,8 @@ try_again:
5127 } 5133 }
5128 5134
5129 xh_free_start = le16_to_cpu(xh->xh_free_start); 5135 xh_free_start = le16_to_cpu(xh->xh_free_start);
5130 free = xh_free_start - header_size; 5136 free = xh_free_start - header_size
5137 - OCFS2_XATTR_HEADER_GAP;
5131 if (xh_free_start % blocksize < need) 5138 if (xh_free_start % blocksize < need)
5132 free -= xh_free_start % blocksize; 5139 free -= xh_free_start % blocksize;
5133 5140
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 3e76bb9b3ad6..d8bb5c671f42 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -485,8 +485,10 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
485 } 485 }
486 } 486 }
487 unlock_new_inode(inode); 487 unlock_new_inode(inode);
488 } else 488 } else {
489 module_put(de->owner); 489 module_put(de->owner);
490 de_put(de);
491 }
490 return inode; 492 return inode;
491 493
492out_ino: 494out_ino:
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 767d95a6d1b1..2d1345112a42 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -107,7 +107,7 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
107 else 107 else
108 kflags = ppage->flags; 108 kflags = ppage->flags;
109 109
110 uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) | 110 uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) |
111 kpf_copy_bit(kflags, KPF_ERROR, PG_error) | 111 kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
112 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) | 112 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
113 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) | 113 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 0b0d236c2154..c7d4b2e606a5 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -76,6 +76,7 @@ struct drm_encoder_helper_funcs {
76 void (*mode_set)(struct drm_encoder *encoder, 76 void (*mode_set)(struct drm_encoder *encoder,
77 struct drm_display_mode *mode, 77 struct drm_display_mode *mode,
78 struct drm_display_mode *adjusted_mode); 78 struct drm_display_mode *adjusted_mode);
79 struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
79 /* detect for DAC style encoders */ 80 /* detect for DAC style encoders */
80 enum drm_connector_status (*detect)(struct drm_encoder *encoder, 81 enum drm_connector_status (*detect)(struct drm_encoder *encoder,
81 struct drm_connector *connector); 82 struct drm_connector *connector);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c707c15f5164..ff8d27af4786 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -58,10 +58,10 @@ struct detailed_pixel_timing {
58 u8 hsync_pulse_width_lo; 58 u8 hsync_pulse_width_lo;
59 u8 vsync_pulse_width_lo:4; 59 u8 vsync_pulse_width_lo:4;
60 u8 vsync_offset_lo:4; 60 u8 vsync_offset_lo:4;
61 u8 hsync_pulse_width_hi:2;
62 u8 hsync_offset_hi:2;
63 u8 vsync_pulse_width_hi:2; 61 u8 vsync_pulse_width_hi:2;
64 u8 vsync_offset_hi:2; 62 u8 vsync_offset_hi:2;
63 u8 hsync_pulse_width_hi:2;
64 u8 hsync_offset_hi:2;
65 u8 width_mm_lo; 65 u8 width_mm_lo;
66 u8 height_mm_lo; 66 u8 height_mm_lo;
67 u8 height_mm_hi:4; 67 u8 height_mm_hi:4;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b97cdc516a8f..106c3ba50844 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -52,6 +52,7 @@ header-y += const.h
52header-y += cgroupstats.h 52header-y += cgroupstats.h
53header-y += cramfs_fs.h 53header-y += cramfs_fs.h
54header-y += cycx_cfm.h 54header-y += cycx_cfm.h
55header-y += dcbnl.h
55header-y += dlmconstants.h 56header-y += dlmconstants.h
56header-y += dlm_device.h 57header-y += dlm_device.h
57header-y += dlm_netlink.h 58header-y += dlm_netlink.h
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index dcaa0fd84b02..465d6babc847 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -708,6 +708,8 @@ struct req_iterator {
708}; 708};
709 709
710/* This should not be used directly - use rq_for_each_segment */ 710/* This should not be used directly - use rq_for_each_segment */
711#define for_each_bio(_bio) \
712 for (; _bio; _bio = _bio->bi_next)
711#define __rq_for_each_bio(_bio, rq) \ 713#define __rq_for_each_bio(_bio, rq) \
712 if ((rq->bio)) \ 714 if ((rq->bio)) \
713 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 715 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index b0ef274e0031..7d2e10006188 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -20,10 +20,12 @@
20#ifndef __LINUX_DCBNL_H__ 20#ifndef __LINUX_DCBNL_H__
21#define __LINUX_DCBNL_H__ 21#define __LINUX_DCBNL_H__
22 22
23#include <linux/types.h>
24
23#define DCB_PROTO_VERSION 1 25#define DCB_PROTO_VERSION 1
24 26
25struct dcbmsg { 27struct dcbmsg {
26 unsigned char dcb_family; 28 __u8 dcb_family;
27 __u8 cmd; 29 __u8 cmd;
28 __u16 dcb_pad; 30 __u16 dcb_pad;
29}; 31};
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
new file mode 100644
index 000000000000..115272137a9c
--- /dev/null
+++ b/include/linux/decompress/bunzip2.h
@@ -0,0 +1,10 @@
1#ifndef DECOMPRESS_BUNZIP2_H
2#define DECOMPRESS_BUNZIP2_H
3
4int bunzip2(unsigned char *inbuf, int len,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *pos,
9 void(*error)(char *x));
10#endif
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
new file mode 100644
index 000000000000..6dfb856327bb
--- /dev/null
+++ b/include/linux/decompress/generic.h
@@ -0,0 +1,33 @@
1#ifndef DECOMPRESS_GENERIC_H
2#define DECOMPRESS_GENERIC_H
3
4/* Minimal chunksize to be read.
5 *Bzip2 prefers at least 4096
6 *Lzma prefers 0x10000 */
7#define COMPR_IOBUF_SIZE 4096
8
9typedef int (*decompress_fn) (unsigned char *inbuf, int len,
10 int(*fill)(void*, unsigned int),
11 int(*writebb)(void*, unsigned int),
12 unsigned char *output,
13 int *posp,
14 void(*error)(char *x));
15
16/* inbuf - input buffer
17 *len - len of pre-read data in inbuf
18 *fill - function to fill inbuf if empty
19 *writebb - function to write out outbug
20 *posp - if non-null, input position (number of bytes read) will be
21 * returned here
22 *
23 *If len != 0, the inbuf is initialized (with as much data), and fill
24 *should not be called
25 *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
26 *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
27 */
28
29/* Utility routine to detect the decompression method */
30decompress_fn decompress_method(const unsigned char *inbuf, int len,
31 const char **name);
32
33#endif
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
new file mode 100644
index 000000000000..f9b06ccc3e5c
--- /dev/null
+++ b/include/linux/decompress/inflate.h
@@ -0,0 +1,13 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
4/* Other housekeeping constants */
5#define INBUFSIZ 4096
6
7int gunzip(unsigned char *inbuf, int len,
8 int(*fill)(void*, unsigned int),
9 int(*flush)(void*, unsigned int),
10 unsigned char *output,
11 int *pos,
12 void(*error_fn)(char *x));
13#endif
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
new file mode 100644
index 000000000000..12ff8c3f1d05
--- /dev/null
+++ b/include/linux/decompress/mm.h
@@ -0,0 +1,87 @@
1/*
2 * linux/compr_mm.h
3 *
4 * Memory management for pre-boot and ramdisk uncompressors
5 *
6 * Authors: Alain Knaff <alain@knaff.lu>
7 *
8 */
9
10#ifndef DECOMPR_MM_H
11#define DECOMPR_MM_H
12
13#ifdef STATIC
14
15/* Code active when included from pre-boot environment: */
16
17/* A trivial malloc implementation, adapted from
18 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
19 */
20static unsigned long malloc_ptr;
21static int malloc_count;
22
23static void *malloc(int size)
24{
25 void *p;
26
27 if (size < 0)
28 error("Malloc error");
29 if (!malloc_ptr)
30 malloc_ptr = free_mem_ptr;
31
32 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
33
34 p = (void *)malloc_ptr;
35 malloc_ptr += size;
36
37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
38 error("Out of memory");
39
40 malloc_count++;
41 return p;
42}
43
44static void free(void *where)
45{
46 malloc_count--;
47 if (!malloc_count)
48 malloc_ptr = free_mem_ptr;
49}
50
51#define large_malloc(a) malloc(a)
52#define large_free(a) free(a)
53
54#define set_error_fn(x)
55
56#define INIT
57
58#else /* STATIC */
59
60/* Code active when compiled standalone for use when loading ramdisk: */
61
62#include <linux/kernel.h>
63#include <linux/fs.h>
64#include <linux/string.h>
65#include <linux/vmalloc.h>
66
67/* Use defines rather than static inline in order to avoid spurious
68 * warnings when not needed (indeed large_malloc / large_free are not
69 * needed by inflate */
70
71#define malloc(a) kmalloc(a, GFP_KERNEL)
72#define free(a) kfree(a)
73
74#define large_malloc(a) vmalloc(a)
75#define large_free(a) vfree(a)
76
77static void(*error)(char *m);
78#define set_error_fn(x) error = x;
79
80#define INIT __init
81#define STATIC
82
83#include <linux/init.h>
84
85#endif /* STATIC */
86
87#endif /* DECOMPR_MM_H */
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
new file mode 100644
index 000000000000..7796538f1bf4
--- /dev/null
+++ b/include/linux/decompress/unlzma.h
@@ -0,0 +1,12 @@
1#ifndef DECOMPRESS_UNLZMA_H
2#define DECOMPRESS_UNLZMA_H
3
4int unlzma(unsigned char *, int,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *posp,
9 void(*error)(char *x)
10 );
11
12#endif
diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h
index 311315b56b61..fd53bfd26470 100644
--- a/include/linux/i2c-dev.h
+++ b/include/linux/i2c-dev.h
@@ -33,7 +33,7 @@
33 */ 33 */
34#define I2C_RETRIES 0x0701 /* number of times a device address should 34#define I2C_RETRIES 0x0701 /* number of times a device address should
35 be polled when not acknowledging */ 35 be polled when not acknowledging */
36#define I2C_TIMEOUT 0x0702 /* set timeout in jiffies - call with int */ 36#define I2C_TIMEOUT 0x0702 /* set timeout in units of 10 ms */
37 37
38/* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses 38/* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses
39 * are NOT supported! (due to code brokenness) 39 * are NOT supported! (due to code brokenness)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index fcfbfea3af72..c86c3b07604c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -361,7 +361,7 @@ struct i2c_adapter {
361 struct mutex bus_lock; 361 struct mutex bus_lock;
362 struct mutex clist_lock; 362 struct mutex clist_lock;
363 363
364 int timeout; 364 int timeout; /* in jiffies */
365 int retries; 365 int retries;
366 struct device dev; /* the adapter device */ 366 struct device dev; /* the adapter device */
367 367
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 194da5a4b0d6..fe235b65207e 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -663,7 +663,7 @@ typedef struct ide_drive_s ide_drive_t;
663#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev) 663#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
664 664
665#define to_ide_drv(obj, cont_type) \ 665#define to_ide_drv(obj, cont_type) \
666 container_of(obj, struct cont_type, kref) 666 container_of(obj, struct cont_type, dev)
667 667
668#define ide_drv_g(disk, cont_type) \ 668#define ide_drv_g(disk, cont_type) \
669 container_of((disk)->private_data, struct cont_type, driver) 669 container_of((disk)->private_data, struct cont_type, driver)
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index f8ff918c208f..e1ff5b14310e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -210,6 +210,7 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
210 210
211 /* Move the mac addresses to the beginning of the new header. */ 211 /* Move the mac addresses to the beginning of the new header. */
212 memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); 212 memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
213 skb->mac_header -= VLAN_HLEN;
213 214
214 /* first, the ethernet type */ 215 /* first, the ethernet type */
215 veth->h_vlan_proto = htons(ETH_P_8021Q); 216 veth->h_vlan_proto = htons(ETH_P_8021Q);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c4f6c101dbcd..d2e3cbfba14f 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -194,6 +194,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
194/* FSTS_REG */ 194/* FSTS_REG */
195#define DMA_FSTS_PPF ((u32)2) 195#define DMA_FSTS_PPF ((u32)2)
196#define DMA_FSTS_PFO ((u32)1) 196#define DMA_FSTS_PFO ((u32)1)
197#define DMA_FSTS_IQE (1 << 4)
197#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) 198#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
198 199
199/* FRCD_REG, 32 bits access */ 200/* FRCD_REG, 32 bits access */
@@ -328,7 +329,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
328 unsigned int size_order, u64 type, 329 unsigned int size_order, u64 type,
329 int non_present_entry_flush); 330 int non_present_entry_flush);
330 331
331extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 332extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
332 333
333extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); 334extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
334extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); 335extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 82df31726a54..0adb0f91568c 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -30,11 +30,14 @@
30 * See Documentation/io_mapping.txt 30 * See Documentation/io_mapping.txt
31 */ 31 */
32 32
33/* this struct isn't actually defined anywhere */
34struct io_mapping;
35
36#ifdef CONFIG_HAVE_ATOMIC_IOMAP 33#ifdef CONFIG_HAVE_ATOMIC_IOMAP
37 34
35struct io_mapping {
36 resource_size_t base;
37 unsigned long size;
38 pgprot_t prot;
39};
40
38/* 41/*
39 * For small address space machines, mapping large objects 42 * For small address space machines, mapping large objects
40 * into the kernel virtual space isn't practical. Where 43 * into the kernel virtual space isn't practical. Where
@@ -43,23 +46,40 @@ struct io_mapping;
43 */ 46 */
44 47
45static inline struct io_mapping * 48static inline struct io_mapping *
46io_mapping_create_wc(unsigned long base, unsigned long size) 49io_mapping_create_wc(resource_size_t base, unsigned long size)
47{ 50{
48 return (struct io_mapping *) base; 51 struct io_mapping *iomap;
52
53 if (!is_io_mapping_possible(base, size))
54 return NULL;
55
56 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
57 if (!iomap)
58 return NULL;
59
60 iomap->base = base;
61 iomap->size = size;
62 iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL));
63 return iomap;
49} 64}
50 65
51static inline void 66static inline void
52io_mapping_free(struct io_mapping *mapping) 67io_mapping_free(struct io_mapping *mapping)
53{ 68{
69 kfree(mapping);
54} 70}
55 71
56/* Atomic map/unmap */ 72/* Atomic map/unmap */
57static inline void * 73static inline void *
58io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) 74io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
59{ 75{
60 offset += (unsigned long) mapping; 76 resource_size_t phys_addr;
61 return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0, 77 unsigned long pfn;
62 __pgprot(__PAGE_KERNEL_WC)); 78
79 BUG_ON(offset >= mapping->size);
80 phys_addr = mapping->base + offset;
81 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
82 return iomap_atomic_prot_pfn(pfn, KM_USER0, mapping->prot);
63} 83}
64 84
65static inline void 85static inline void
@@ -71,8 +91,12 @@ io_mapping_unmap_atomic(void *vaddr)
71static inline void * 91static inline void *
72io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 92io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
73{ 93{
74 offset += (unsigned long) mapping; 94 resource_size_t phys_addr;
75 return ioremap_wc(offset, PAGE_SIZE); 95
96 BUG_ON(offset >= mapping->size);
97 phys_addr = mapping->base + offset;
98
99 return ioremap_wc(phys_addr, PAGE_SIZE);
76} 100}
77 101
78static inline void 102static inline void
@@ -83,9 +107,12 @@ io_mapping_unmap(void *vaddr)
83 107
84#else 108#else
85 109
110/* this struct isn't actually defined anywhere */
111struct io_mapping;
112
86/* Create the io_mapping object*/ 113/* Create the io_mapping object*/
87static inline struct io_mapping * 114static inline struct io_mapping *
88io_mapping_create_wc(unsigned long base, unsigned long size) 115io_mapping_create_wc(resource_size_t base, unsigned long size)
89{ 116{
90 return (struct io_mapping *) ioremap_wc(base, size); 117 return (struct io_mapping *) ioremap_wc(base, size);
91} 118}
diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h
index cdcd0ed58f7a..4b36aeb46a10 100644
--- a/include/linux/netfilter/xt_NFLOG.h
+++ b/include/linux/netfilter/xt_NFLOG.h
@@ -2,7 +2,7 @@
2#define _XT_NFLOG_TARGET 2#define _XT_NFLOG_TARGET
3 3
4#define XT_NFLOG_DEFAULT_GROUP 0x1 4#define XT_NFLOG_DEFAULT_GROUP 0x1
5#define XT_NFLOG_DEFAULT_THRESHOLD 1 5#define XT_NFLOG_DEFAULT_THRESHOLD 0
6 6
7#define XT_NFLOG_MASK 0x0 7#define XT_NFLOG_MASK 0x0
8 8
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cf2cb50f77d1..9dcf956ad18a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -416,15 +416,6 @@ extern void skb_over_panic(struct sk_buff *skb, int len,
416 void *here); 416 void *here);
417extern void skb_under_panic(struct sk_buff *skb, int len, 417extern void skb_under_panic(struct sk_buff *skb, int len,
418 void *here); 418 void *here);
419extern void skb_truesize_bug(struct sk_buff *skb);
420
421static inline void skb_truesize_check(struct sk_buff *skb)
422{
423 int len = sizeof(struct sk_buff) + skb->len;
424
425 if (unlikely((int)skb->truesize < len))
426 skb_truesize_bug(skb);
427}
428 419
429extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 420extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
430 int getfrag(void *from, char *to, int offset, 421 int getfrag(void *from, char *to, int offset,
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 315bcd375224..cc4f45361dbb 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -13,6 +13,7 @@ struct user_namespace {
13 struct kref kref; 13 struct kref kref;
14 struct hlist_head uidhash_table[UIDHASH_SZ]; 14 struct hlist_head uidhash_table[UIDHASH_SZ];
15 struct user_struct *creator; 15 struct user_struct *creator;
16 struct work_struct destroyer;
16}; 17};
17 18
18extern struct user_namespace init_user_ns; 19extern struct user_namespace init_user_ns;
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index e78afe7f28e3..c25068e38516 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -59,7 +59,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
59 struct nf_conn *ct = (struct nf_conn *)skb->nfct; 59 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
60 int ret = NF_ACCEPT; 60 int ret = NF_ACCEPT;
61 61
62 if (ct) { 62 if (ct && ct != &nf_conntrack_untracked) {
63 if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 63 if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
64 ret = __nf_conntrack_confirm(skb); 64 ret = __nf_conntrack_confirm(skb);
65 nf_ct_deliver_cached_events(ct); 65 nf_ct_deliver_cached_events(ct);
diff --git a/include/net/sock.h b/include/net/sock.h
index ce3b5b622683..eefeeaf7fc46 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -860,7 +860,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
860 860
861static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 861static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
862{ 862{
863 skb_truesize_check(skb);
864 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 863 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
865 sk->sk_wmem_queued -= skb->truesize; 864 sk->sk_wmem_queued -= skb->truesize;
866 sk_mem_uncharge(sk, skb->truesize); 865 sk_mem_uncharge(sk, skb->truesize);
diff --git a/init/Kconfig b/init/Kconfig
index f068071fcc5d..95a66131403a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -101,6 +101,66 @@ config LOCALVERSION_AUTO
101 101
102 which is done within the script "scripts/setlocalversion".) 102 which is done within the script "scripts/setlocalversion".)
103 103
104config HAVE_KERNEL_GZIP
105 bool
106
107config HAVE_KERNEL_BZIP2
108 bool
109
110config HAVE_KERNEL_LZMA
111 bool
112
113choice
114 prompt "Kernel compression mode"
115 default KERNEL_GZIP
116 depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA
117 help
118 The linux kernel is a kind of self-extracting executable.
119 Several compression algorithms are available, which differ
120 in efficiency, compression and decompression speed.
121 Compression speed is only relevant when building a kernel.
122 Decompression speed is relevant at each boot.
123
124 If you have any problems with bzip2 or lzma compressed
125 kernels, mail me (Alain Knaff) <alain@knaff.lu>. (An older
126 version of this functionality (bzip2 only), for 2.4, was
127 supplied by Christian Ludwig)
128
129 High compression options are mostly useful for users, who
130 are low on disk space (embedded systems), but for whom ram
131 size matters less.
132
133 If in doubt, select 'gzip'
134
135config KERNEL_GZIP
136 bool "Gzip"
137 depends on HAVE_KERNEL_GZIP
138 help
139 The old and tried gzip compression. Its compression ratio is
140 the poorest among the 3 choices; however its speed (both
141 compression and decompression) is the fastest.
142
143config KERNEL_BZIP2
144 bool "Bzip2"
145 depends on HAVE_KERNEL_BZIP2
146 help
147 Its compression ratio and speed is intermediate.
148 Decompression speed is slowest among the three. The kernel
149 size is about 10% smaller with bzip2, in comparison to gzip.
150 Bzip2 uses a large amount of memory. For modern kernels you
151 will need at least 8MB RAM or more for booting.
152
153config KERNEL_LZMA
154 bool "LZMA"
155 depends on HAVE_KERNEL_LZMA
156 help
157 The most recent compression algorithm.
158 Its ratio is best, decompression speed is between the other
159 two. Compression is slowest. The kernel size is about 33%
160 smaller with LZMA in comparison to gzip.
161
162endchoice
163
104config SWAP 164config SWAP
105 bool "Support for paging of anonymous memory (swap)" 165 bool "Support for paging of anonymous memory (swap)"
106 depends on MMU && BLOCK 166 depends on MMU && BLOCK
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 0f0f0cf3ba9a..027a402708de 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -11,6 +11,9 @@
11#include "do_mounts.h" 11#include "do_mounts.h"
12#include "../fs/squashfs/squashfs_fs.h" 12#include "../fs/squashfs/squashfs_fs.h"
13 13
14#include <linux/decompress/generic.h>
15
16
14int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ 17int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
15 18
16static int __init prompt_ramdisk(char *str) 19static int __init prompt_ramdisk(char *str)
@@ -29,7 +32,7 @@ static int __init ramdisk_start_setup(char *str)
29} 32}
30__setup("ramdisk_start=", ramdisk_start_setup); 33__setup("ramdisk_start=", ramdisk_start_setup);
31 34
32static int __init crd_load(int in_fd, int out_fd); 35static int __init crd_load(int in_fd, int out_fd, decompress_fn deco);
33 36
34/* 37/*
35 * This routine tries to find a RAM disk image to load, and returns the 38 * This routine tries to find a RAM disk image to load, and returns the
@@ -38,15 +41,15 @@ static int __init crd_load(int in_fd, int out_fd);
38 * numbers could not be found. 41 * numbers could not be found.
39 * 42 *
40 * We currently check for the following magic numbers: 43 * We currently check for the following magic numbers:
41 * minix 44 * minix
42 * ext2 45 * ext2
43 * romfs 46 * romfs
44 * cramfs 47 * cramfs
45 * squashfs 48 * squashfs
46 * gzip 49 * gzip
47 */ 50 */
48static int __init 51static int __init
49identify_ramdisk_image(int fd, int start_block) 52identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
50{ 53{
51 const int size = 512; 54 const int size = 512;
52 struct minix_super_block *minixsb; 55 struct minix_super_block *minixsb;
@@ -56,6 +59,7 @@ identify_ramdisk_image(int fd, int start_block)
56 struct squashfs_super_block *squashfsb; 59 struct squashfs_super_block *squashfsb;
57 int nblocks = -1; 60 int nblocks = -1;
58 unsigned char *buf; 61 unsigned char *buf;
62 const char *compress_name;
59 63
60 buf = kmalloc(size, GFP_KERNEL); 64 buf = kmalloc(size, GFP_KERNEL);
61 if (!buf) 65 if (!buf)
@@ -69,18 +73,19 @@ identify_ramdisk_image(int fd, int start_block)
69 memset(buf, 0xe5, size); 73 memset(buf, 0xe5, size);
70 74
71 /* 75 /*
72 * Read block 0 to test for gzipped kernel 76 * Read block 0 to test for compressed kernel
73 */ 77 */
74 sys_lseek(fd, start_block * BLOCK_SIZE, 0); 78 sys_lseek(fd, start_block * BLOCK_SIZE, 0);
75 sys_read(fd, buf, size); 79 sys_read(fd, buf, size);
76 80
77 /* 81 *decompressor = decompress_method(buf, size, &compress_name);
78 * If it matches the gzip magic numbers, return 0 82 if (compress_name) {
79 */ 83 printk(KERN_NOTICE "RAMDISK: %s image found at block %d\n",
80 if (buf[0] == 037 && ((buf[1] == 0213) || (buf[1] == 0236))) { 84 compress_name, start_block);
81 printk(KERN_NOTICE 85 if (!*decompressor)
82 "RAMDISK: Compressed image found at block %d\n", 86 printk(KERN_EMERG
83 start_block); 87 "RAMDISK: %s decompressor not configured!\n",
88 compress_name);
84 nblocks = 0; 89 nblocks = 0;
85 goto done; 90 goto done;
86 } 91 }
@@ -142,7 +147,7 @@ identify_ramdisk_image(int fd, int start_block)
142 printk(KERN_NOTICE 147 printk(KERN_NOTICE
143 "RAMDISK: Couldn't find valid RAM disk image starting at %d.\n", 148 "RAMDISK: Couldn't find valid RAM disk image starting at %d.\n",
144 start_block); 149 start_block);
145 150
146done: 151done:
147 sys_lseek(fd, start_block * BLOCK_SIZE, 0); 152 sys_lseek(fd, start_block * BLOCK_SIZE, 0);
148 kfree(buf); 153 kfree(buf);
@@ -157,6 +162,7 @@ int __init rd_load_image(char *from)
157 int nblocks, i, disk; 162 int nblocks, i, disk;
158 char *buf = NULL; 163 char *buf = NULL;
159 unsigned short rotate = 0; 164 unsigned short rotate = 0;
165 decompress_fn decompressor = NULL;
160#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) 166#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
161 char rotator[4] = { '|' , '/' , '-' , '\\' }; 167 char rotator[4] = { '|' , '/' , '-' , '\\' };
162#endif 168#endif
@@ -169,12 +175,12 @@ int __init rd_load_image(char *from)
169 if (in_fd < 0) 175 if (in_fd < 0)
170 goto noclose_input; 176 goto noclose_input;
171 177
172 nblocks = identify_ramdisk_image(in_fd, rd_image_start); 178 nblocks = identify_ramdisk_image(in_fd, rd_image_start, &decompressor);
173 if (nblocks < 0) 179 if (nblocks < 0)
174 goto done; 180 goto done;
175 181
176 if (nblocks == 0) { 182 if (nblocks == 0) {
177 if (crd_load(in_fd, out_fd) == 0) 183 if (crd_load(in_fd, out_fd, decompressor) == 0)
178 goto successful_load; 184 goto successful_load;
179 goto done; 185 goto done;
180 } 186 }
@@ -200,7 +206,7 @@ int __init rd_load_image(char *from)
200 nblocks, rd_blocks); 206 nblocks, rd_blocks);
201 goto done; 207 goto done;
202 } 208 }
203 209
204 /* 210 /*
205 * OK, time to copy in the data 211 * OK, time to copy in the data
206 */ 212 */
@@ -273,138 +279,48 @@ int __init rd_load_disk(int n)
273 return rd_load_image("/dev/root"); 279 return rd_load_image("/dev/root");
274} 280}
275 281
276/*
277 * gzip declarations
278 */
279
280#define OF(args) args
281
282#ifndef memzero
283#define memzero(s, n) memset ((s), 0, (n))
284#endif
285
286typedef unsigned char uch;
287typedef unsigned short ush;
288typedef unsigned long ulg;
289
290#define INBUFSIZ 4096
291#define WSIZE 0x8000 /* window size--must be a power of two, and */
292 /* at least 32K for zip's deflate method */
293
294static uch *inbuf;
295static uch *window;
296
297static unsigned insize; /* valid bytes in inbuf */
298static unsigned inptr; /* index of next byte to be processed in inbuf */
299static unsigned outcnt; /* bytes in output buffer */
300static int exit_code; 282static int exit_code;
301static int unzip_error; 283static int decompress_error;
302static long bytes_out;
303static int crd_infd, crd_outfd; 284static int crd_infd, crd_outfd;
304 285
305#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) 286static int __init compr_fill(void *buf, unsigned int len)
306
307/* Diagnostic functions (stubbed out) */
308#define Assert(cond,msg)
309#define Trace(x)
310#define Tracev(x)
311#define Tracevv(x)
312#define Tracec(c,x)
313#define Tracecv(c,x)
314
315#define STATIC static
316#define INIT __init
317
318static int __init fill_inbuf(void);
319static void __init flush_window(void);
320static void __init error(char *m);
321
322#define NO_INFLATE_MALLOC
323
324#include "../lib/inflate.c"
325
326/* ===========================================================================
327 * Fill the input buffer. This is called only when the buffer is empty
328 * and at least one byte is really needed.
329 * Returning -1 does not guarantee that gunzip() will ever return.
330 */
331static int __init fill_inbuf(void)
332{ 287{
333 if (exit_code) return -1; 288 int r = sys_read(crd_infd, buf, len);
334 289 if (r < 0)
335 insize = sys_read(crd_infd, inbuf, INBUFSIZ); 290 printk(KERN_ERR "RAMDISK: error while reading compressed data");
336 if (insize == 0) { 291 else if (r == 0)
337 error("RAMDISK: ran out of compressed data"); 292 printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
338 return -1; 293 return r;
339 }
340
341 inptr = 1;
342
343 return inbuf[0];
344} 294}
345 295
346/* =========================================================================== 296static int __init compr_flush(void *window, unsigned int outcnt)
347 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
348 * (Used for the decompressed data only.)
349 */
350static void __init flush_window(void)
351{ 297{
352 ulg c = crc; /* temporary variable */ 298 int written = sys_write(crd_outfd, window, outcnt);
353 unsigned n, written; 299 if (written != outcnt) {
354 uch *in, ch; 300 if (decompress_error == 0)
355 301 printk(KERN_ERR
356 written = sys_write(crd_outfd, window, outcnt); 302 "RAMDISK: incomplete write (%d != %d)\n",
357 if (written != outcnt && unzip_error == 0) { 303 written, outcnt);
358 printk(KERN_ERR "RAMDISK: incomplete write (%d != %d) %ld\n", 304 decompress_error = 1;
359 written, outcnt, bytes_out); 305 return -1;
360 unzip_error = 1; 306 }
361 } 307 return outcnt;
362 in = window;
363 for (n = 0; n < outcnt; n++) {
364 ch = *in++;
365 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
366 }
367 crc = c;
368 bytes_out += (ulg)outcnt;
369 outcnt = 0;
370} 308}
371 309
372static void __init error(char *x) 310static void __init error(char *x)
373{ 311{
374 printk(KERN_ERR "%s\n", x); 312 printk(KERN_ERR "%s\n", x);
375 exit_code = 1; 313 exit_code = 1;
376 unzip_error = 1; 314 decompress_error = 1;
377} 315}
378 316
379static int __init crd_load(int in_fd, int out_fd) 317static int __init crd_load(int in_fd, int out_fd, decompress_fn deco)
380{ 318{
381 int result; 319 int result;
382
383 insize = 0; /* valid bytes in inbuf */
384 inptr = 0; /* index of next byte to be processed in inbuf */
385 outcnt = 0; /* bytes in output buffer */
386 exit_code = 0;
387 bytes_out = 0;
388 crc = (ulg)0xffffffffL; /* shift register contents */
389
390 crd_infd = in_fd; 320 crd_infd = in_fd;
391 crd_outfd = out_fd; 321 crd_outfd = out_fd;
392 inbuf = kmalloc(INBUFSIZ, GFP_KERNEL); 322 result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error);
393 if (!inbuf) { 323 if (decompress_error)
394 printk(KERN_ERR "RAMDISK: Couldn't allocate gzip buffer\n");
395 return -1;
396 }
397 window = kmalloc(WSIZE, GFP_KERNEL);
398 if (!window) {
399 printk(KERN_ERR "RAMDISK: Couldn't allocate gzip window\n");
400 kfree(inbuf);
401 return -1;
402 }
403 makecrc();
404 result = gunzip();
405 if (unzip_error)
406 result = 1; 324 result = 1;
407 kfree(inbuf);
408 kfree(window);
409 return result; 325 return result;
410} 326}
diff --git a/init/initramfs.c b/init/initramfs.c
index d9c941c0c3ca..7dcde7ea6603 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -390,11 +390,13 @@ static int __init write_buffer(char *buf, unsigned len)
390 return len - count; 390 return len - count;
391} 391}
392 392
393static void __init flush_buffer(char *buf, unsigned len) 393static int __init flush_buffer(void *bufv, unsigned len)
394{ 394{
395 char *buf = (char *) bufv;
395 int written; 396 int written;
397 int origLen = len;
396 if (message) 398 if (message)
397 return; 399 return -1;
398 while ((written = write_buffer(buf, len)) < len && !message) { 400 while ((written = write_buffer(buf, len)) < len && !message) {
399 char c = buf[written]; 401 char c = buf[written];
400 if (c == '0') { 402 if (c == '0') {
@@ -408,84 +410,28 @@ static void __init flush_buffer(char *buf, unsigned len)
408 } else 410 } else
409 error("junk in compressed archive"); 411 error("junk in compressed archive");
410 } 412 }
413 return origLen;
411} 414}
412 415
413/* 416static unsigned my_inptr; /* index of next byte to be processed in inbuf */
414 * gzip declarations
415 */
416 417
417#define OF(args) args 418#include <linux/decompress/generic.h>
418
419#ifndef memzero
420#define memzero(s, n) memset ((s), 0, (n))
421#endif
422
423typedef unsigned char uch;
424typedef unsigned short ush;
425typedef unsigned long ulg;
426
427#define WSIZE 0x8000 /* window size--must be a power of two, and */
428 /* at least 32K for zip's deflate method */
429
430static uch *inbuf;
431static uch *window;
432
433static unsigned insize; /* valid bytes in inbuf */
434static unsigned inptr; /* index of next byte to be processed in inbuf */
435static unsigned outcnt; /* bytes in output buffer */
436static long bytes_out;
437
438#define get_byte() (inptr < insize ? inbuf[inptr++] : -1)
439
440/* Diagnostic functions (stubbed out) */
441#define Assert(cond,msg)
442#define Trace(x)
443#define Tracev(x)
444#define Tracevv(x)
445#define Tracec(c,x)
446#define Tracecv(c,x)
447
448#define STATIC static
449#define INIT __init
450
451static void __init flush_window(void);
452static void __init error(char *m);
453
454#define NO_INFLATE_MALLOC
455
456#include "../lib/inflate.c"
457
458/* ===========================================================================
459 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
460 * (Used for the decompressed data only.)
461 */
462static void __init flush_window(void)
463{
464 ulg c = crc; /* temporary variable */
465 unsigned n;
466 uch *in, ch;
467
468 flush_buffer(window, outcnt);
469 in = window;
470 for (n = 0; n < outcnt; n++) {
471 ch = *in++;
472 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
473 }
474 crc = c;
475 bytes_out += (ulg)outcnt;
476 outcnt = 0;
477}
478 419
479static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only) 420static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
480{ 421{
481 int written; 422 int written;
423 decompress_fn decompress;
424 const char *compress_name;
425 static __initdata char msg_buf[64];
426
482 dry_run = check_only; 427 dry_run = check_only;
483 header_buf = kmalloc(110, GFP_KERNEL); 428 header_buf = kmalloc(110, GFP_KERNEL);
484 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL); 429 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
485 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL); 430 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
486 window = kmalloc(WSIZE, GFP_KERNEL); 431
487 if (!window || !header_buf || !symlink_buf || !name_buf) 432 if (!header_buf || !symlink_buf || !name_buf)
488 panic("can't allocate buffers"); 433 panic("can't allocate buffers");
434
489 state = Start; 435 state = Start;
490 this_header = 0; 436 this_header = 0;
491 message = NULL; 437 message = NULL;
@@ -505,22 +451,25 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
505 continue; 451 continue;
506 } 452 }
507 this_header = 0; 453 this_header = 0;
508 insize = len; 454 decompress = decompress_method(buf, len, &compress_name);
509 inbuf = buf; 455 if (decompress)
510 inptr = 0; 456 decompress(buf, len, NULL, flush_buffer, NULL,
511 outcnt = 0; /* bytes in output buffer */ 457 &my_inptr, error);
512 bytes_out = 0; 458 else if (compress_name) {
513 crc = (ulg)0xffffffffL; /* shift register contents */ 459 if (!message) {
514 makecrc(); 460 snprintf(msg_buf, sizeof msg_buf,
515 gunzip(); 461 "compression method %s not configured",
462 compress_name);
463 message = msg_buf;
464 }
465 }
516 if (state != Reset) 466 if (state != Reset)
517 error("junk in gzipped archive"); 467 error("junk in compressed archive");
518 this_header = saved_offset + inptr; 468 this_header = saved_offset + my_inptr;
519 buf += inptr; 469 buf += my_inptr;
520 len -= inptr; 470 len -= my_inptr;
521 } 471 }
522 dir_utime(); 472 dir_utime();
523 kfree(window);
524 kfree(name_buf); 473 kfree(name_buf);
525 kfree(symlink_buf); 474 kfree(symlink_buf);
526 kfree(header_buf); 475 kfree(header_buf);
@@ -579,7 +528,7 @@ static int __init populate_rootfs(void)
579 char *err = unpack_to_rootfs(__initramfs_start, 528 char *err = unpack_to_rootfs(__initramfs_start,
580 __initramfs_end - __initramfs_start, 0); 529 __initramfs_end - __initramfs_start, 0);
581 if (err) 530 if (err)
582 panic(err); 531 panic(err); /* Failed to decompress INTERNAL initramfs */
583 if (initrd_start) { 532 if (initrd_start) {
584#ifdef CONFIG_BLK_DEV_RAM 533#ifdef CONFIG_BLK_DEV_RAM
585 int fd; 534 int fd;
@@ -605,9 +554,12 @@ static int __init populate_rootfs(void)
605 printk(KERN_INFO "Unpacking initramfs..."); 554 printk(KERN_INFO "Unpacking initramfs...");
606 err = unpack_to_rootfs((char *)initrd_start, 555 err = unpack_to_rootfs((char *)initrd_start,
607 initrd_end - initrd_start, 0); 556 initrd_end - initrd_start, 0);
608 if (err) 557 if (err) {
609 panic(err); 558 printk(" failed!\n");
610 printk(" done\n"); 559 printk(KERN_EMERG "%s\n", err);
560 } else {
561 printk(" done\n");
562 }
611 free_initrd(); 563 free_initrd();
612#endif 564#endif
613 } 565 }
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index ad64fcb731f2..57d4b13b631d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/seccomp.h> 9#include <linux/seccomp.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/compat.h>
11 12
12/* #define SECCOMP_DEBUG 1 */ 13/* #define SECCOMP_DEBUG 1 */
13#define NR_SECCOMP_MODES 1 14#define NR_SECCOMP_MODES 1
@@ -22,7 +23,7 @@ static int mode1_syscalls[] = {
22 0, /* null terminated */ 23 0, /* null terminated */
23}; 24};
24 25
25#ifdef TIF_32BIT 26#ifdef CONFIG_COMPAT
26static int mode1_syscalls_32[] = { 27static int mode1_syscalls_32[] = {
27 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, 28 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
28 0, /* null terminated */ 29 0, /* null terminated */
@@ -37,8 +38,8 @@ void __secure_computing(int this_syscall)
37 switch (mode) { 38 switch (mode) {
38 case 1: 39 case 1:
39 syscall = mode1_syscalls; 40 syscall = mode1_syscalls;
40#ifdef TIF_32BIT 41#ifdef CONFIG_COMPAT
41 if (test_thread_flag(TIF_32BIT)) 42 if (is_compat_task())
42 syscall = mode1_syscalls_32; 43 syscall = mode1_syscalls_32;
43#endif 44#endif
44 do { 45 do {
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 79084311ee57..076c7c8215b0 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -60,12 +60,25 @@ int create_user_ns(struct cred *new)
60 return 0; 60 return 0;
61} 61}
62 62
63void free_user_ns(struct kref *kref) 63/*
64 * Deferred destructor for a user namespace. This is required because
65 * free_user_ns() may be called with uidhash_lock held, but we need to call
66 * back to free_uid() which will want to take the lock again.
67 */
68static void free_user_ns_work(struct work_struct *work)
64{ 69{
65 struct user_namespace *ns; 70 struct user_namespace *ns =
66 71 container_of(work, struct user_namespace, destroyer);
67 ns = container_of(kref, struct user_namespace, kref);
68 free_uid(ns->creator); 72 free_uid(ns->creator);
69 kfree(ns); 73 kfree(ns);
70} 74}
75
76void free_user_ns(struct kref *kref)
77{
78 struct user_namespace *ns =
79 container_of(kref, struct user_namespace, kref);
80
81 INIT_WORK(&ns->destroyer, free_user_ns_work);
82 schedule_work(&ns->destroyer);
83}
71EXPORT_SYMBOL(free_user_ns); 84EXPORT_SYMBOL(free_user_ns);
diff --git a/lib/Kconfig b/lib/Kconfig
index 03c2c24b9083..daa481824d9c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -98,6 +98,20 @@ config LZO_DECOMPRESS
98 tristate 98 tristate
99 99
100# 100#
101# These all provide a common interface (hence the apparent duplication with
102# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
103#
104config DECOMPRESS_GZIP
105 select ZLIB_INFLATE
106 tristate
107
108config DECOMPRESS_BZIP2
109 tristate
110
111config DECOMPRESS_LZMA
112 tristate
113
114#
101# Generic allocator support is selected if needed 115# Generic allocator support is selected if needed
102# 116#
103config GENERIC_ALLOCATOR 117config GENERIC_ALLOCATOR
diff --git a/lib/Makefile b/lib/Makefile
index 32b0e64ded27..790de7c25d0d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o decompress.o
15 16
16lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -65,6 +66,10 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
65obj-$(CONFIG_LZO_COMPRESS) += lzo/ 66obj-$(CONFIG_LZO_COMPRESS) += lzo/
66obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 67obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
67 68
69lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
70lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
71lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
72
68obj-$(CONFIG_TEXTSEARCH) += textsearch.o 73obj-$(CONFIG_TEXTSEARCH) += textsearch.o
69obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 74obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
70obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o 75obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
diff --git a/lib/decompress.c b/lib/decompress.c
new file mode 100644
index 000000000000..d2842f571674
--- /dev/null
+++ b/lib/decompress.c
@@ -0,0 +1,54 @@
1/*
2 * decompress.c
3 *
4 * Detect the decompression method based on magic number
5 */
6
7#include <linux/decompress/generic.h>
8
9#include <linux/decompress/bunzip2.h>
10#include <linux/decompress/unlzma.h>
11#include <linux/decompress/inflate.h>
12
13#include <linux/types.h>
14#include <linux/string.h>
15
16#ifndef CONFIG_DECOMPRESS_GZIP
17# define gunzip NULL
18#endif
19#ifndef CONFIG_DECOMPRESS_BZIP2
20# define bunzip2 NULL
21#endif
22#ifndef CONFIG_DECOMPRESS_LZMA
23# define unlzma NULL
24#endif
25
26static const struct compress_format {
27 unsigned char magic[2];
28 const char *name;
29 decompress_fn decompressor;
30} compressed_formats[] = {
31 { {037, 0213}, "gzip", gunzip },
32 { {037, 0236}, "gzip", gunzip },
33 { {0x42, 0x5a}, "bzip2", bunzip2 },
34 { {0x5d, 0x00}, "lzma", unlzma },
35 { {0, 0}, NULL, NULL }
36};
37
38decompress_fn decompress_method(const unsigned char *inbuf, int len,
39 const char **name)
40{
41 const struct compress_format *cf;
42
43 if (len < 2)
44 return NULL; /* Need at least this much... */
45
46 for (cf = compressed_formats; cf->name; cf++) {
47 if (!memcmp(inbuf, cf->magic, 2))
48 break;
49
50 }
51 if (name)
52 *name = cf->name;
53 return cf->decompressor;
54}
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
new file mode 100644
index 000000000000..5d3ddb5fcfd9
--- /dev/null
+++ b/lib/decompress_bunzip2.c
@@ -0,0 +1,735 @@
1/* vi: set sw = 4 ts = 4: */
2/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
3
4 Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
5 which also acknowledges contributions by Mike Burrows, David Wheeler,
6 Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
7 Robert Sedgewick, and Jon L. Bentley.
8
9 This code is licensed under the LGPLv2:
10 LGPL (http://www.gnu.org/copyleft/lgpl.html
11*/
12
13/*
14 Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
15
16 More efficient reading of Huffman codes, a streamlined read_bunzip()
17 function, and various other tweaks. In (limited) tests, approximately
18 20% faster than bzcat on x86 and about 10% faster on arm.
19
20 Note that about 2/3 of the time is spent in read_unzip() reversing
21 the Burrows-Wheeler transformation. Much of that time is delay
22 resulting from cache misses.
23
24 I would ask that anyone benefiting from this work, especially those
25 using it in commercial products, consider making a donation to my local
26 non-profit hospice organization in the name of the woman I loved, who
27 passed away Feb. 12, 2003.
28
29 In memory of Toni W. Hagan
30
31 Hospice of Acadiana, Inc.
32 2600 Johnston St., Suite 200
33 Lafayette, LA 70503-3240
34
35 Phone (337) 232-1234 or 1-800-738-2226
36 Fax (337) 232-1297
37
38 http://www.hospiceacadiana.com/
39
40 Manuel
41 */
42
43/*
44 Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
45*/
46
47
48#ifndef STATIC
49#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */
51
52#include <linux/decompress/mm.h>
53
54#ifndef INT_MAX
55#define INT_MAX 0x7fffffff
56#endif
57
58/* Constants for Huffman coding */
59#define MAX_GROUPS 6
60#define GROUP_SIZE 50 /* 64 would have been more efficient */
61#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
62#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
63#define SYMBOL_RUNA 0
64#define SYMBOL_RUNB 1
65
66/* Status return values */
67#define RETVAL_OK 0
68#define RETVAL_LAST_BLOCK (-1)
69#define RETVAL_NOT_BZIP_DATA (-2)
70#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
71#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
72#define RETVAL_DATA_ERROR (-5)
73#define RETVAL_OUT_OF_MEMORY (-6)
74#define RETVAL_OBSOLETE_INPUT (-7)
75
76/* Other housekeeping constants */
77#define BZIP2_IOBUF_SIZE 4096
78
79/* This is what we know about each Huffman coding group */
80struct group_data {
81 /* We have an extra slot at the end of limit[] for a sentinal value. */
82 int limit[MAX_HUFCODE_BITS+1];
83 int base[MAX_HUFCODE_BITS];
84 int permute[MAX_SYMBOLS];
85 int minLen, maxLen;
86};
87
88/* Structure holding all the housekeeping data, including IO buffers and
89 memory that persists between calls to bunzip */
90struct bunzip_data {
91 /* State for interrupting output loop */
92 int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
93 /* I/O tracking data (file handles, buffers, positions, etc.) */
94 int (*fill)(void*, unsigned int);
95 int inbufCount, inbufPos /*, outbufPos*/;
96 unsigned char *inbuf /*,*outbuf*/;
97 unsigned int inbufBitCount, inbufBits;
98 /* The CRC values stored in the block header and calculated from the
99 data */
100 unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
101 /* Intermediate buffer and its size (in bytes) */
102 unsigned int *dbuf, dbufSize;
103 /* These things are a bit too big to go on the stack */
104 unsigned char selectors[32768]; /* nSelectors = 15 bits */
105 struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
106 int io_error; /* non-zero if we have IO error */
107};
108
109
110/* Return the next nnn bits of input. All reads from the compressed input
111 are done through this function. All reads are big endian */
112static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
113{
114 unsigned int bits = 0;
115
116 /* If we need to get more data from the byte buffer, do so.
117 (Loop getting one byte at a time to enforce endianness and avoid
118 unaligned access.) */
119 while (bd->inbufBitCount < bits_wanted) {
120 /* If we need to read more data from file into byte buffer, do
121 so */
122 if (bd->inbufPos == bd->inbufCount) {
123 if (bd->io_error)
124 return 0;
125 bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
126 if (bd->inbufCount <= 0) {
127 bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
128 return 0;
129 }
130 bd->inbufPos = 0;
131 }
132 /* Avoid 32-bit overflow (dump bit buffer to top of output) */
133 if (bd->inbufBitCount >= 24) {
134 bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
135 bits_wanted -= bd->inbufBitCount;
136 bits <<= bits_wanted;
137 bd->inbufBitCount = 0;
138 }
139 /* Grab next 8 bits of input from buffer. */
140 bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
141 bd->inbufBitCount += 8;
142 }
143 /* Calculate result */
144 bd->inbufBitCount -= bits_wanted;
145 bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
146
147 return bits;
148}
149
150/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
151
152static int INIT get_next_block(struct bunzip_data *bd)
153{
154 struct group_data *hufGroup = NULL;
155 int *base = NULL;
156 int *limit = NULL;
157 int dbufCount, nextSym, dbufSize, groupCount, selector,
158 i, j, k, t, runPos, symCount, symTotal, nSelectors,
159 byteCount[256];
160 unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
161 unsigned int *dbuf, origPtr;
162
163 dbuf = bd->dbuf;
164 dbufSize = bd->dbufSize;
165 selectors = bd->selectors;
166
167 /* Read in header signature and CRC, then validate signature.
168 (last block signature means CRC is for whole file, return now) */
169 i = get_bits(bd, 24);
170 j = get_bits(bd, 24);
171 bd->headerCRC = get_bits(bd, 32);
172 if ((i == 0x177245) && (j == 0x385090))
173 return RETVAL_LAST_BLOCK;
174 if ((i != 0x314159) || (j != 0x265359))
175 return RETVAL_NOT_BZIP_DATA;
176 /* We can add support for blockRandomised if anybody complains.
177 There was some code for this in busybox 1.0.0-pre3, but nobody ever
178 noticed that it didn't actually work. */
179 if (get_bits(bd, 1))
180 return RETVAL_OBSOLETE_INPUT;
181 origPtr = get_bits(bd, 24);
182 if (origPtr > dbufSize)
183 return RETVAL_DATA_ERROR;
184 /* mapping table: if some byte values are never used (encoding things
185 like ascii text), the compression code removes the gaps to have fewer
186 symbols to deal with, and writes a sparse bitfield indicating which
187 values were present. We make a translation table to convert the
188 symbols back to the corresponding bytes. */
189 t = get_bits(bd, 16);
190 symTotal = 0;
191 for (i = 0; i < 16; i++) {
192 if (t&(1 << (15-i))) {
193 k = get_bits(bd, 16);
194 for (j = 0; j < 16; j++)
195 if (k&(1 << (15-j)))
196 symToByte[symTotal++] = (16*i)+j;
197 }
198 }
199 /* How many different Huffman coding groups does this block use? */
200 groupCount = get_bits(bd, 3);
201 if (groupCount < 2 || groupCount > MAX_GROUPS)
202 return RETVAL_DATA_ERROR;
203 /* nSelectors: Every GROUP_SIZE many symbols we select a new
204 Huffman coding group. Read in the group selector list,
205 which is stored as MTF encoded bit runs. (MTF = Move To
206 Front, as each value is used it's moved to the start of the
207 list.) */
208 nSelectors = get_bits(bd, 15);
209 if (!nSelectors)
210 return RETVAL_DATA_ERROR;
211 for (i = 0; i < groupCount; i++)
212 mtfSymbol[i] = i;
213 for (i = 0; i < nSelectors; i++) {
214 /* Get next value */
215 for (j = 0; get_bits(bd, 1); j++)
216 if (j >= groupCount)
217 return RETVAL_DATA_ERROR;
218 /* Decode MTF to get the next selector */
219 uc = mtfSymbol[j];
220 for (; j; j--)
221 mtfSymbol[j] = mtfSymbol[j-1];
222 mtfSymbol[0] = selectors[i] = uc;
223 }
224 /* Read the Huffman coding tables for each group, which code
225 for symTotal literal symbols, plus two run symbols (RUNA,
226 RUNB) */
227 symCount = symTotal+2;
228 for (j = 0; j < groupCount; j++) {
229 unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
230 int minLen, maxLen, pp;
231 /* Read Huffman code lengths for each symbol. They're
232 stored in a way similar to mtf; record a starting
233 value for the first symbol, and an offset from the
234 previous value for everys symbol after that.
235 (Subtracting 1 before the loop and then adding it
236 back at the end is an optimization that makes the
237 test inside the loop simpler: symbol length 0
238 becomes negative, so an unsigned inequality catches
239 it.) */
240 t = get_bits(bd, 5)-1;
241 for (i = 0; i < symCount; i++) {
242 for (;;) {
243 if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
244 return RETVAL_DATA_ERROR;
245
246 /* If first bit is 0, stop. Else
247 second bit indicates whether to
248 increment or decrement the value.
249 Optimization: grab 2 bits and unget
250 the second if the first was 0. */
251
252 k = get_bits(bd, 2);
253 if (k < 2) {
254 bd->inbufBitCount++;
255 break;
256 }
257 /* Add one if second bit 1, else
258 * subtract 1. Avoids if/else */
259 t += (((k+1)&2)-1);
260 }
261 /* Correct for the initial -1, to get the
262 * final symbol length */
263 length[i] = t+1;
264 }
265 /* Find largest and smallest lengths in this group */
266 minLen = maxLen = length[0];
267
268 for (i = 1; i < symCount; i++) {
269 if (length[i] > maxLen)
270 maxLen = length[i];
271 else if (length[i] < minLen)
272 minLen = length[i];
273 }
274
275 /* Calculate permute[], base[], and limit[] tables from
276 * length[].
277 *
278 * permute[] is the lookup table for converting
279 * Huffman coded symbols into decoded symbols. base[]
280 * is the amount to subtract from the value of a
281 * Huffman symbol of a given length when using
282 * permute[].
283 *
284 * limit[] indicates the largest numerical value a
285 * symbol with a given number of bits can have. This
286 * is how the Huffman codes can vary in length: each
287 * code with a value > limit[length] needs another
288 * bit.
289 */
290 hufGroup = bd->groups+j;
291 hufGroup->minLen = minLen;
292 hufGroup->maxLen = maxLen;
293 /* Note that minLen can't be smaller than 1, so we
294 adjust the base and limit array pointers so we're
295 not always wasting the first entry. We do this
296 again when using them (during symbol decoding).*/
297 base = hufGroup->base-1;
298 limit = hufGroup->limit-1;
299 /* Calculate permute[]. Concurently, initialize
300 * temp[] and limit[]. */
301 pp = 0;
302 for (i = minLen; i <= maxLen; i++) {
303 temp[i] = limit[i] = 0;
304 for (t = 0; t < symCount; t++)
305 if (length[t] == i)
306 hufGroup->permute[pp++] = t;
307 }
308 /* Count symbols coded for at each bit length */
309 for (i = 0; i < symCount; i++)
310 temp[length[i]]++;
311 /* Calculate limit[] (the largest symbol-coding value
312 *at each bit length, which is (previous limit <<
313 *1)+symbols at this level), and base[] (number of
314 *symbols to ignore at each bit length, which is limit
315 *minus the cumulative count of symbols coded for
316 *already). */
317 pp = t = 0;
318 for (i = minLen; i < maxLen; i++) {
319 pp += temp[i];
320 /* We read the largest possible symbol size
321 and then unget bits after determining how
322 many we need, and those extra bits could be
323 set to anything. (They're noise from
324 future symbols.) At each level we're
325 really only interested in the first few
326 bits, so here we set all the trailing
327 to-be-ignored bits to 1 so they don't
328 affect the value > limit[length]
329 comparison. */
330 limit[i] = (pp << (maxLen - i)) - 1;
331 pp <<= 1;
332 base[i+1] = pp-(t += temp[i]);
333 }
334 limit[maxLen+1] = INT_MAX; /* Sentinal value for
335 * reading next sym. */
336 limit[maxLen] = pp+temp[maxLen]-1;
337 base[minLen] = 0;
338 }
339 /* We've finished reading and digesting the block header. Now
340 read this block's Huffman coded symbols from the file and
341 undo the Huffman coding and run length encoding, saving the
342 result into dbuf[dbufCount++] = uc */
343
344 /* Initialize symbol occurrence counters and symbol Move To
345 * Front table */
346 for (i = 0; i < 256; i++) {
347 byteCount[i] = 0;
348 mtfSymbol[i] = (unsigned char)i;
349 }
350 /* Loop through compressed symbols. */
351 runPos = dbufCount = symCount = selector = 0;
352 for (;;) {
353 /* Determine which Huffman coding group to use. */
354 if (!(symCount--)) {
355 symCount = GROUP_SIZE-1;
356 if (selector >= nSelectors)
357 return RETVAL_DATA_ERROR;
358 hufGroup = bd->groups+selectors[selector++];
359 base = hufGroup->base-1;
360 limit = hufGroup->limit-1;
361 }
362 /* Read next Huffman-coded symbol. */
363 /* Note: It is far cheaper to read maxLen bits and
364 back up than it is to read minLen bits and then an
365 additional bit at a time, testing as we go.
366 Because there is a trailing last block (with file
367 CRC), there is no danger of the overread causing an
368 unexpected EOF for a valid compressed file. As a
369 further optimization, we do the read inline
370 (falling back to a call to get_bits if the buffer
371 runs dry). The following (up to got_huff_bits:) is
372 equivalent to j = get_bits(bd, hufGroup->maxLen);
373 */
374 while (bd->inbufBitCount < hufGroup->maxLen) {
375 if (bd->inbufPos == bd->inbufCount) {
376 j = get_bits(bd, hufGroup->maxLen);
377 goto got_huff_bits;
378 }
379 bd->inbufBits =
380 (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
381 bd->inbufBitCount += 8;
382 };
383 bd->inbufBitCount -= hufGroup->maxLen;
384 j = (bd->inbufBits >> bd->inbufBitCount)&
385 ((1 << hufGroup->maxLen)-1);
386got_huff_bits:
387 /* Figure how how many bits are in next symbol and
388 * unget extras */
389 i = hufGroup->minLen;
390 while (j > limit[i])
391 ++i;
392 bd->inbufBitCount += (hufGroup->maxLen - i);
393 /* Huffman decode value to get nextSym (with bounds checking) */
394 if ((i > hufGroup->maxLen)
395 || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
396 >= MAX_SYMBOLS))
397 return RETVAL_DATA_ERROR;
398 nextSym = hufGroup->permute[j];
399 /* We have now decoded the symbol, which indicates
400 either a new literal byte, or a repeated run of the
401 most recent literal byte. First, check if nextSym
402 indicates a repeated run, and if so loop collecting
403 how many times to repeat the last literal. */
404 if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
405 /* If this is the start of a new run, zero out
406 * counter */
407 if (!runPos) {
408 runPos = 1;
409 t = 0;
410 }
411 /* Neat trick that saves 1 symbol: instead of
412 or-ing 0 or 1 at each bit position, add 1
413 or 2 instead. For example, 1011 is 1 << 0
414 + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
415 + 1 << 2. You can make any bit pattern
416 that way using 1 less symbol than the basic
417 or 0/1 method (except all bits 0, which
418 would use no symbols, but a run of length 0
419 doesn't mean anything in this context).
420 Thus space is saved. */
421 t += (runPos << nextSym);
422 /* +runPos if RUNA; +2*runPos if RUNB */
423
424 runPos <<= 1;
425 continue;
426 }
427 /* When we hit the first non-run symbol after a run,
428 we now know how many times to repeat the last
429 literal, so append that many copies to our buffer
430 of decoded symbols (dbuf) now. (The last literal
431 used is the one at the head of the mtfSymbol
432 array.) */
433 if (runPos) {
434 runPos = 0;
435 if (dbufCount+t >= dbufSize)
436 return RETVAL_DATA_ERROR;
437
438 uc = symToByte[mtfSymbol[0]];
439 byteCount[uc] += t;
440 while (t--)
441 dbuf[dbufCount++] = uc;
442 }
443 /* Is this the terminating symbol? */
444 if (nextSym > symTotal)
445 break;
446 /* At this point, nextSym indicates a new literal
447 character. Subtract one to get the position in the
448 MTF array at which this literal is currently to be
449 found. (Note that the result can't be -1 or 0,
450 because 0 and 1 are RUNA and RUNB. But another
451 instance of the first symbol in the mtf array,
452 position 0, would have been handled as part of a
453 run above. Therefore 1 unused mtf position minus 2
454 non-literal nextSym values equals -1.) */
455 if (dbufCount >= dbufSize)
456 return RETVAL_DATA_ERROR;
457 i = nextSym - 1;
458 uc = mtfSymbol[i];
459 /* Adjust the MTF array. Since we typically expect to
460 *move only a small number of symbols, and are bound
461 *by 256 in any case, using memmove here would
462 *typically be bigger and slower due to function call
463 *overhead and other assorted setup costs. */
464 do {
465 mtfSymbol[i] = mtfSymbol[i-1];
466 } while (--i);
467 mtfSymbol[0] = uc;
468 uc = symToByte[uc];
469 /* We have our literal byte. Save it into dbuf. */
470 byteCount[uc]++;
471 dbuf[dbufCount++] = (unsigned int)uc;
472 }
473 /* At this point, we've read all the Huffman-coded symbols
474 (and repeated runs) for this block from the input stream,
475 and decoded them into the intermediate buffer. There are
476 dbufCount many decoded bytes in dbuf[]. Now undo the
477 Burrows-Wheeler transform on dbuf. See
478 http://dogma.net/markn/articles/bwt/bwt.htm
479 */
480 /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
481 j = 0;
482 for (i = 0; i < 256; i++) {
483 k = j+byteCount[i];
484 byteCount[i] = j;
485 j = k;
486 }
487 /* Figure out what order dbuf would be in if we sorted it. */
488 for (i = 0; i < dbufCount; i++) {
489 uc = (unsigned char)(dbuf[i] & 0xff);
490 dbuf[byteCount[uc]] |= (i << 8);
491 byteCount[uc]++;
492 }
493 /* Decode first byte by hand to initialize "previous" byte.
494 Note that it doesn't get output, and if the first three
495 characters are identical it doesn't qualify as a run (hence
496 writeRunCountdown = 5). */
497 if (dbufCount) {
498 if (origPtr >= dbufCount)
499 return RETVAL_DATA_ERROR;
500 bd->writePos = dbuf[origPtr];
501 bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
502 bd->writePos >>= 8;
503 bd->writeRunCountdown = 5;
504 }
505 bd->writeCount = dbufCount;
506
507 return RETVAL_OK;
508}
509
510/* Undo burrows-wheeler transform on intermediate buffer to produce output.
511 If start_bunzip was initialized with out_fd =-1, then up to len bytes of
512 data are written to outbuf. Return value is number of bytes written or
513 error (all errors are negative numbers). If out_fd!=-1, outbuf and len
514 are ignored, data is written to out_fd and return is RETVAL_OK or error.
515*/
516
517static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
518{
519 const unsigned int *dbuf;
520 int pos, xcurrent, previous, gotcount;
521
522 /* If last read was short due to end of file, return last block now */
523 if (bd->writeCount < 0)
524 return bd->writeCount;
525
526 gotcount = 0;
527 dbuf = bd->dbuf;
528 pos = bd->writePos;
529 xcurrent = bd->writeCurrent;
530
531 /* We will always have pending decoded data to write into the output
532 buffer unless this is the very first call (in which case we haven't
533 Huffman-decoded a block into the intermediate buffer yet). */
534
535 if (bd->writeCopies) {
536 /* Inside the loop, writeCopies means extra copies (beyond 1) */
537 --bd->writeCopies;
538 /* Loop outputting bytes */
539 for (;;) {
540 /* If the output buffer is full, snapshot
541 * state and return */
542 if (gotcount >= len) {
543 bd->writePos = pos;
544 bd->writeCurrent = xcurrent;
545 bd->writeCopies++;
546 return len;
547 }
548 /* Write next byte into output buffer, updating CRC */
549 outbuf[gotcount++] = xcurrent;
550 bd->writeCRC = (((bd->writeCRC) << 8)
551 ^bd->crc32Table[((bd->writeCRC) >> 24)
552 ^xcurrent]);
553 /* Loop now if we're outputting multiple
554 * copies of this byte */
555 if (bd->writeCopies) {
556 --bd->writeCopies;
557 continue;
558 }
559decode_next_byte:
560 if (!bd->writeCount--)
561 break;
562 /* Follow sequence vector to undo
563 * Burrows-Wheeler transform */
564 previous = xcurrent;
565 pos = dbuf[pos];
566 xcurrent = pos&0xff;
567 pos >>= 8;
568 /* After 3 consecutive copies of the same
569 byte, the 4th is a repeat count. We count
570 down from 4 instead *of counting up because
571 testing for non-zero is faster */
572 if (--bd->writeRunCountdown) {
573 if (xcurrent != previous)
574 bd->writeRunCountdown = 4;
575 } else {
576 /* We have a repeated run, this byte
577 * indicates the count */
578 bd->writeCopies = xcurrent;
579 xcurrent = previous;
580 bd->writeRunCountdown = 5;
581 /* Sometimes there are just 3 bytes
582 * (run length 0) */
583 if (!bd->writeCopies)
584 goto decode_next_byte;
585 /* Subtract the 1 copy we'd output
586 * anyway to get extras */
587 --bd->writeCopies;
588 }
589 }
590 /* Decompression of this block completed successfully */
591 bd->writeCRC = ~bd->writeCRC;
592 bd->totalCRC = ((bd->totalCRC << 1) |
593 (bd->totalCRC >> 31)) ^ bd->writeCRC;
594 /* If this block had a CRC error, force file level CRC error. */
595 if (bd->writeCRC != bd->headerCRC) {
596 bd->totalCRC = bd->headerCRC+1;
597 return RETVAL_LAST_BLOCK;
598 }
599 }
600
601 /* Refill the intermediate buffer by Huffman-decoding next
602 * block of input */
603 /* (previous is just a convenient unused temp variable here) */
604 previous = get_next_block(bd);
605 if (previous) {
606 bd->writeCount = previous;
607 return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
608 }
609 bd->writeCRC = 0xffffffffUL;
610 pos = bd->writePos;
611 xcurrent = bd->writeCurrent;
612 goto decode_next_byte;
613}
614
615static int INIT nofill(void *buf, unsigned int len)
616{
617 return -1;
618}
619
620/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
621 a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
622 ignored, and data is read from file handle into temporary buffer. */
623static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
624 int (*fill)(void*, unsigned int))
625{
626 struct bunzip_data *bd;
627 unsigned int i, j, c;
628 const unsigned int BZh0 =
629 (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
630 +(((unsigned int)'h') << 8)+(unsigned int)'0';
631
632 /* Figure out how much data to allocate */
633 i = sizeof(struct bunzip_data);
634
635 /* Allocate bunzip_data. Most fields initialize to zero. */
636 bd = *bdp = malloc(i);
637 memset(bd, 0, sizeof(struct bunzip_data));
638 /* Setup input buffer */
639 bd->inbuf = inbuf;
640 bd->inbufCount = len;
641 if (fill != NULL)
642 bd->fill = fill;
643 else
644 bd->fill = nofill;
645
646 /* Init the CRC32 table (big endian) */
647 for (i = 0; i < 256; i++) {
648 c = i << 24;
649 for (j = 8; j; j--)
650 c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
651 bd->crc32Table[i] = c;
652 }
653
654 /* Ensure that file starts with "BZh['1'-'9']." */
655 i = get_bits(bd, 32);
656 if (((unsigned int)(i-BZh0-1)) >= 9)
657 return RETVAL_NOT_BZIP_DATA;
658
659 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
660 uncompressed data. Allocate intermediate buffer for block. */
661 bd->dbufSize = 100000*(i-BZh0);
662
663 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
664 return RETVAL_OK;
665}
666
667/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
668 not end of file.) */
669STATIC int INIT bunzip2(unsigned char *buf, int len,
670 int(*fill)(void*, unsigned int),
671 int(*flush)(void*, unsigned int),
672 unsigned char *outbuf,
673 int *pos,
674 void(*error_fn)(char *x))
675{
676 struct bunzip_data *bd;
677 int i = -1;
678 unsigned char *inbuf;
679
680 set_error_fn(error_fn);
681 if (flush)
682 outbuf = malloc(BZIP2_IOBUF_SIZE);
683 else
684 len -= 4; /* Uncompressed size hack active in pre-boot
685 environment */
686 if (!outbuf) {
687 error("Could not allocate output bufer");
688 return -1;
689 }
690 if (buf)
691 inbuf = buf;
692 else
693 inbuf = malloc(BZIP2_IOBUF_SIZE);
694 if (!inbuf) {
695 error("Could not allocate input bufer");
696 goto exit_0;
697 }
698 i = start_bunzip(&bd, inbuf, len, fill);
699 if (!i) {
700 for (;;) {
701 i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
702 if (i <= 0)
703 break;
704 if (!flush)
705 outbuf += i;
706 else
707 if (i != flush(outbuf, i)) {
708 i = RETVAL_UNEXPECTED_OUTPUT_EOF;
709 break;
710 }
711 }
712 }
713 /* Check CRC and release memory */
714 if (i == RETVAL_LAST_BLOCK) {
715 if (bd->headerCRC != bd->totalCRC)
716 error("Data integrity error when decompressing.");
717 else
718 i = RETVAL_OK;
719 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
720 error("Compressed file ends unexpectedly");
721 }
722 if (bd->dbuf)
723 large_free(bd->dbuf);
724 if (pos)
725 *pos = bd->inbufPos;
726 free(bd);
727 if (!buf)
728 free(inbuf);
729exit_0:
730 if (flush)
731 free(outbuf);
732 return i;
733}
734
735#define decompress bunzip2
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
new file mode 100644
index 000000000000..839a329b4fc4
--- /dev/null
+++ b/lib/decompress_inflate.c
@@ -0,0 +1,167 @@
1#ifdef STATIC
2/* Pre-boot environment: included */
3
4/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
5 * errors about console_printk etc... on ARM */
6#define _LINUX_KERNEL_H
7
8#include "zlib_inflate/inftrees.c"
9#include "zlib_inflate/inffast.c"
10#include "zlib_inflate/inflate.c"
11
12#else /* STATIC */
13/* initramfs et al: linked */
14
15#include <linux/zutil.h>
16
17#include "zlib_inflate/inftrees.h"
18#include "zlib_inflate/inffast.h"
19#include "zlib_inflate/inflate.h"
20
21#include "zlib_inflate/infutil.h"
22
23#endif /* STATIC */
24
25#include <linux/decompress/mm.h>
26
27#define INBUF_LEN (16*1024)
28
29/* Included from initramfs et al code */
30STATIC int INIT gunzip(unsigned char *buf, int len,
31 int(*fill)(void*, unsigned int),
32 int(*flush)(void*, unsigned int),
33 unsigned char *out_buf,
34 int *pos,
35 void(*error_fn)(char *x)) {
36 u8 *zbuf;
37 struct z_stream_s *strm;
38 int rc;
39 size_t out_len;
40
41 set_error_fn(error_fn);
42 rc = -1;
43 if (flush) {
44 out_len = 0x8000; /* 32 K */
45 out_buf = malloc(out_len);
46 } else {
47 out_len = 0x7fffffff; /* no limit */
48 }
49 if (!out_buf) {
50 error("Out of memory while allocating output buffer");
51 goto gunzip_nomem1;
52 }
53
54 if (buf)
55 zbuf = buf;
56 else {
57 zbuf = malloc(INBUF_LEN);
58 len = 0;
59 }
60 if (!zbuf) {
61 error("Out of memory while allocating input buffer");
62 goto gunzip_nomem2;
63 }
64
65 strm = malloc(sizeof(*strm));
66 if (strm == NULL) {
67 error("Out of memory while allocating z_stream");
68 goto gunzip_nomem3;
69 }
70
71 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
72 sizeof(struct inflate_state));
73 if (strm->workspace == NULL) {
74 error("Out of memory while allocating workspace");
75 goto gunzip_nomem4;
76 }
77
78 if (len == 0)
79 len = fill(zbuf, INBUF_LEN);
80
81 /* verify the gzip header */
82 if (len < 10 ||
83 zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
84 if (pos)
85 *pos = 0;
86 error("Not a gzip file");
87 goto gunzip_5;
88 }
89
90 /* skip over gzip header (1f,8b,08... 10 bytes total +
91 * possible asciz filename)
92 */
93 strm->next_in = zbuf + 10;
94 /* skip over asciz filename */
95 if (zbuf[3] & 0x8) {
96 while (strm->next_in[0])
97 strm->next_in++;
98 strm->next_in++;
99 }
100 strm->avail_in = len - (strm->next_in - zbuf);
101
102 strm->next_out = out_buf;
103 strm->avail_out = out_len;
104
105 rc = zlib_inflateInit2(strm, -MAX_WBITS);
106
107 if (!flush) {
108 WS(strm)->inflate_state.wsize = 0;
109 WS(strm)->inflate_state.window = NULL;
110 }
111
112 while (rc == Z_OK) {
113 if (strm->avail_in == 0) {
114 /* TODO: handle case where both pos and fill are set */
115 len = fill(zbuf, INBUF_LEN);
116 if (len < 0) {
117 rc = -1;
118 error("read error");
119 break;
120 }
121 strm->next_in = zbuf;
122 strm->avail_in = len;
123 }
124 rc = zlib_inflate(strm, 0);
125
126 /* Write any data generated */
127 if (flush && strm->next_out > out_buf) {
128 int l = strm->next_out - out_buf;
129 if (l != flush(out_buf, l)) {
130 rc = -1;
131 error("write error");
132 break;
133 }
134 strm->next_out = out_buf;
135 strm->avail_out = out_len;
136 }
137
138 /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
139 if (rc == Z_STREAM_END) {
140 rc = 0;
141 break;
142 } else if (rc != Z_OK) {
143 error("uncompression error");
144 rc = -1;
145 }
146 }
147
148 zlib_inflateEnd(strm);
149 if (pos)
150 /* add + 8 to skip over trailer */
151 *pos = strm->next_in - zbuf+8;
152
153gunzip_5:
154 free(strm->workspace);
155gunzip_nomem4:
156 free(strm);
157gunzip_nomem3:
158 if (!buf)
159 free(zbuf);
160gunzip_nomem2:
161 if (flush)
162 free(out_buf);
163gunzip_nomem1:
164 return rc; /* returns Z_OK (0) if successful */
165}
166
167#define decompress gunzip
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
new file mode 100644
index 000000000000..546f2f4c157e
--- /dev/null
+++ b/lib/decompress_unlzma.c
@@ -0,0 +1,647 @@
1/* Lzma decompressor for Linux kernel. Shamelessly snarfed
2 *from busybox 1.1.1
3 *
4 *Linux kernel adaptation
5 *Copyright (C) 2006 Alain < alain@knaff.lu >
6 *
7 *Based on small lzma deflate implementation/Small range coder
8 *implementation for lzma.
9 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
10 *
11 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
12 *Copyright (C) 1999-2005 Igor Pavlov
13 *
14 *Copyrights of the parts, see headers below.
15 *
16 *
17 *This program is free software; you can redistribute it and/or
18 *modify it under the terms of the GNU Lesser General Public
19 *License as published by the Free Software Foundation; either
20 *version 2.1 of the License, or (at your option) any later version.
21 *
22 *This program is distributed in the hope that it will be useful,
23 *but WITHOUT ANY WARRANTY; without even the implied warranty of
24 *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 *Lesser General Public License for more details.
26 *
27 *You should have received a copy of the GNU Lesser General Public
28 *License along with this library; if not, write to the Free Software
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32#ifndef STATIC
33#include <linux/decompress/unlzma.h>
34#endif /* STATIC */
35
36#include <linux/decompress/mm.h>
37
38#define MIN(a, b) (((a) < (b)) ? (a) : (b))
39
40static long long INIT read_int(unsigned char *ptr, int size)
41{
42 int i;
43 long long ret = 0;
44
45 for (i = 0; i < size; i++)
46 ret = (ret << 8) | ptr[size-i-1];
47 return ret;
48}
49
50#define ENDIAN_CONVERT(x) \
51 x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
52
53
54/* Small range coder implementation for lzma.
55 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
56 *
57 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
58 *Copyright (c) 1999-2005 Igor Pavlov
59 */
60
61#include <linux/compiler.h>
62
63#define LZMA_IOBUF_SIZE 0x10000
64
65struct rc {
66 int (*fill)(void*, unsigned int);
67 uint8_t *ptr;
68 uint8_t *buffer;
69 uint8_t *buffer_end;
70 int buffer_size;
71 uint32_t code;
72 uint32_t range;
73 uint32_t bound;
74};
75
76
77#define RC_TOP_BITS 24
78#define RC_MOVE_BITS 5
79#define RC_MODEL_TOTAL_BITS 11
80
81
82/* Called twice: once at startup and once in rc_normalize() */
83static void INIT rc_read(struct rc *rc)
84{
85 rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
86 if (rc->buffer_size <= 0)
87 error("unexpected EOF");
88 rc->ptr = rc->buffer;
89 rc->buffer_end = rc->buffer + rc->buffer_size;
90}
91
92/* Called once */
93static inline void INIT rc_init(struct rc *rc,
94 int (*fill)(void*, unsigned int),
95 char *buffer, int buffer_size)
96{
97 rc->fill = fill;
98 rc->buffer = (uint8_t *)buffer;
99 rc->buffer_size = buffer_size;
100 rc->buffer_end = rc->buffer + rc->buffer_size;
101 rc->ptr = rc->buffer;
102
103 rc->code = 0;
104 rc->range = 0xFFFFFFFF;
105}
106
107static inline void INIT rc_init_code(struct rc *rc)
108{
109 int i;
110
111 for (i = 0; i < 5; i++) {
112 if (rc->ptr >= rc->buffer_end)
113 rc_read(rc);
114 rc->code = (rc->code << 8) | *rc->ptr++;
115 }
116}
117
118
119/* Called once. TODO: bb_maybe_free() */
120static inline void INIT rc_free(struct rc *rc)
121{
122 free(rc->buffer);
123}
124
125/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
126static void INIT rc_do_normalize(struct rc *rc)
127{
128 if (rc->ptr >= rc->buffer_end)
129 rc_read(rc);
130 rc->range <<= 8;
131 rc->code = (rc->code << 8) | *rc->ptr++;
132}
133static inline void INIT rc_normalize(struct rc *rc)
134{
135 if (rc->range < (1 << RC_TOP_BITS))
136 rc_do_normalize(rc);
137}
138
139/* Called 9 times */
140/* Why rc_is_bit_0_helper exists?
141 *Because we want to always expose (rc->code < rc->bound) to optimizer
142 */
143static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
144{
145 rc_normalize(rc);
146 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
147 return rc->bound;
148}
149static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
150{
151 uint32_t t = rc_is_bit_0_helper(rc, p);
152 return rc->code < t;
153}
154
155/* Called ~10 times, but very small, thus inlined */
156static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
157{
158 rc->range = rc->bound;
159 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
160}
161static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
162{
163 rc->range -= rc->bound;
164 rc->code -= rc->bound;
165 *p -= *p >> RC_MOVE_BITS;
166}
167
168/* Called 4 times in unlzma loop */
169static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
170{
171 if (rc_is_bit_0(rc, p)) {
172 rc_update_bit_0(rc, p);
173 *symbol *= 2;
174 return 0;
175 } else {
176 rc_update_bit_1(rc, p);
177 *symbol = *symbol * 2 + 1;
178 return 1;
179 }
180}
181
182/* Called once */
183static inline int INIT rc_direct_bit(struct rc *rc)
184{
185 rc_normalize(rc);
186 rc->range >>= 1;
187 if (rc->code >= rc->range) {
188 rc->code -= rc->range;
189 return 1;
190 }
191 return 0;
192}
193
194/* Called twice */
195static inline void INIT
196rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
197{
198 int i = num_levels;
199
200 *symbol = 1;
201 while (i--)
202 rc_get_bit(rc, p + *symbol, symbol);
203 *symbol -= 1 << num_levels;
204}
205
206
207/*
208 * Small lzma deflate implementation.
209 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
210 *
211 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
212 * Copyright (C) 1999-2005 Igor Pavlov
213 */
214
215
216struct lzma_header {
217 uint8_t pos;
218 uint32_t dict_size;
219 uint64_t dst_size;
220} __attribute__ ((packed)) ;
221
222
223#define LZMA_BASE_SIZE 1846
224#define LZMA_LIT_SIZE 768
225
226#define LZMA_NUM_POS_BITS_MAX 4
227
228#define LZMA_LEN_NUM_LOW_BITS 3
229#define LZMA_LEN_NUM_MID_BITS 3
230#define LZMA_LEN_NUM_HIGH_BITS 8
231
232#define LZMA_LEN_CHOICE 0
233#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
234#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
235#define LZMA_LEN_MID (LZMA_LEN_LOW \
236 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
237#define LZMA_LEN_HIGH (LZMA_LEN_MID \
238 +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
239#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
240
241#define LZMA_NUM_STATES 12
242#define LZMA_NUM_LIT_STATES 7
243
244#define LZMA_START_POS_MODEL_INDEX 4
245#define LZMA_END_POS_MODEL_INDEX 14
246#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
247
248#define LZMA_NUM_POS_SLOT_BITS 6
249#define LZMA_NUM_LEN_TO_POS_STATES 4
250
251#define LZMA_NUM_ALIGN_BITS 4
252
253#define LZMA_MATCH_MIN_LEN 2
254
255#define LZMA_IS_MATCH 0
256#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
257#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
258#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
259#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
260#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
261#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
262 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
263#define LZMA_SPEC_POS (LZMA_POS_SLOT \
264 +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
265#define LZMA_ALIGN (LZMA_SPEC_POS \
266 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
267#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
268#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
269#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
270
271
272struct writer {
273 uint8_t *buffer;
274 uint8_t previous_byte;
275 size_t buffer_pos;
276 int bufsize;
277 size_t global_pos;
278 int(*flush)(void*, unsigned int);
279 struct lzma_header *header;
280};
281
282struct cstate {
283 int state;
284 uint32_t rep0, rep1, rep2, rep3;
285};
286
287static inline size_t INIT get_pos(struct writer *wr)
288{
289 return
290 wr->global_pos + wr->buffer_pos;
291}
292
293static inline uint8_t INIT peek_old_byte(struct writer *wr,
294 uint32_t offs)
295{
296 if (!wr->flush) {
297 int32_t pos;
298 while (offs > wr->header->dict_size)
299 offs -= wr->header->dict_size;
300 pos = wr->buffer_pos - offs;
301 return wr->buffer[pos];
302 } else {
303 uint32_t pos = wr->buffer_pos - offs;
304 while (pos >= wr->header->dict_size)
305 pos += wr->header->dict_size;
306 return wr->buffer[pos];
307 }
308
309}
310
311static inline void INIT write_byte(struct writer *wr, uint8_t byte)
312{
313 wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
314 if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
315 wr->buffer_pos = 0;
316 wr->global_pos += wr->header->dict_size;
317 wr->flush((char *)wr->buffer, wr->header->dict_size);
318 }
319}
320
321
322static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
323{
324 write_byte(wr, peek_old_byte(wr, offs));
325}
326
327static inline void INIT copy_bytes(struct writer *wr,
328 uint32_t rep0, int len)
329{
330 do {
331 copy_byte(wr, rep0);
332 len--;
333 } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
334}
335
336static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
337 struct cstate *cst, uint16_t *p,
338 int pos_state, uint16_t *prob,
339 int lc, uint32_t literal_pos_mask) {
340 int mi = 1;
341 rc_update_bit_0(rc, prob);
342 prob = (p + LZMA_LITERAL +
343 (LZMA_LIT_SIZE
344 * (((get_pos(wr) & literal_pos_mask) << lc)
345 + (wr->previous_byte >> (8 - lc))))
346 );
347
348 if (cst->state >= LZMA_NUM_LIT_STATES) {
349 int match_byte = peek_old_byte(wr, cst->rep0);
350 do {
351 int bit;
352 uint16_t *prob_lit;
353
354 match_byte <<= 1;
355 bit = match_byte & 0x100;
356 prob_lit = prob + 0x100 + bit + mi;
357 if (rc_get_bit(rc, prob_lit, &mi)) {
358 if (!bit)
359 break;
360 } else {
361 if (bit)
362 break;
363 }
364 } while (mi < 0x100);
365 }
366 while (mi < 0x100) {
367 uint16_t *prob_lit = prob + mi;
368 rc_get_bit(rc, prob_lit, &mi);
369 }
370 write_byte(wr, mi);
371 if (cst->state < 4)
372 cst->state = 0;
373 else if (cst->state < 10)
374 cst->state -= 3;
375 else
376 cst->state -= 6;
377}
378
379static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
380 struct cstate *cst, uint16_t *p,
381 int pos_state, uint16_t *prob) {
382 int offset;
383 uint16_t *prob_len;
384 int num_bits;
385 int len;
386
387 rc_update_bit_1(rc, prob);
388 prob = p + LZMA_IS_REP + cst->state;
389 if (rc_is_bit_0(rc, prob)) {
390 rc_update_bit_0(rc, prob);
391 cst->rep3 = cst->rep2;
392 cst->rep2 = cst->rep1;
393 cst->rep1 = cst->rep0;
394 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
395 prob = p + LZMA_LEN_CODER;
396 } else {
397 rc_update_bit_1(rc, prob);
398 prob = p + LZMA_IS_REP_G0 + cst->state;
399 if (rc_is_bit_0(rc, prob)) {
400 rc_update_bit_0(rc, prob);
401 prob = (p + LZMA_IS_REP_0_LONG
402 + (cst->state <<
403 LZMA_NUM_POS_BITS_MAX) +
404 pos_state);
405 if (rc_is_bit_0(rc, prob)) {
406 rc_update_bit_0(rc, prob);
407
408 cst->state = cst->state < LZMA_NUM_LIT_STATES ?
409 9 : 11;
410 copy_byte(wr, cst->rep0);
411 return;
412 } else {
413 rc_update_bit_1(rc, prob);
414 }
415 } else {
416 uint32_t distance;
417
418 rc_update_bit_1(rc, prob);
419 prob = p + LZMA_IS_REP_G1 + cst->state;
420 if (rc_is_bit_0(rc, prob)) {
421 rc_update_bit_0(rc, prob);
422 distance = cst->rep1;
423 } else {
424 rc_update_bit_1(rc, prob);
425 prob = p + LZMA_IS_REP_G2 + cst->state;
426 if (rc_is_bit_0(rc, prob)) {
427 rc_update_bit_0(rc, prob);
428 distance = cst->rep2;
429 } else {
430 rc_update_bit_1(rc, prob);
431 distance = cst->rep3;
432 cst->rep3 = cst->rep2;
433 }
434 cst->rep2 = cst->rep1;
435 }
436 cst->rep1 = cst->rep0;
437 cst->rep0 = distance;
438 }
439 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
440 prob = p + LZMA_REP_LEN_CODER;
441 }
442
443 prob_len = prob + LZMA_LEN_CHOICE;
444 if (rc_is_bit_0(rc, prob_len)) {
445 rc_update_bit_0(rc, prob_len);
446 prob_len = (prob + LZMA_LEN_LOW
447 + (pos_state <<
448 LZMA_LEN_NUM_LOW_BITS));
449 offset = 0;
450 num_bits = LZMA_LEN_NUM_LOW_BITS;
451 } else {
452 rc_update_bit_1(rc, prob_len);
453 prob_len = prob + LZMA_LEN_CHOICE_2;
454 if (rc_is_bit_0(rc, prob_len)) {
455 rc_update_bit_0(rc, prob_len);
456 prob_len = (prob + LZMA_LEN_MID
457 + (pos_state <<
458 LZMA_LEN_NUM_MID_BITS));
459 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
460 num_bits = LZMA_LEN_NUM_MID_BITS;
461 } else {
462 rc_update_bit_1(rc, prob_len);
463 prob_len = prob + LZMA_LEN_HIGH;
464 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
465 + (1 << LZMA_LEN_NUM_MID_BITS));
466 num_bits = LZMA_LEN_NUM_HIGH_BITS;
467 }
468 }
469
470 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
471 len += offset;
472
473 if (cst->state < 4) {
474 int pos_slot;
475
476 cst->state += LZMA_NUM_LIT_STATES;
477 prob =
478 p + LZMA_POS_SLOT +
479 ((len <
480 LZMA_NUM_LEN_TO_POS_STATES ? len :
481 LZMA_NUM_LEN_TO_POS_STATES - 1)
482 << LZMA_NUM_POS_SLOT_BITS);
483 rc_bit_tree_decode(rc, prob,
484 LZMA_NUM_POS_SLOT_BITS,
485 &pos_slot);
486 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
487 int i, mi;
488 num_bits = (pos_slot >> 1) - 1;
489 cst->rep0 = 2 | (pos_slot & 1);
490 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
491 cst->rep0 <<= num_bits;
492 prob = p + LZMA_SPEC_POS +
493 cst->rep0 - pos_slot - 1;
494 } else {
495 num_bits -= LZMA_NUM_ALIGN_BITS;
496 while (num_bits--)
497 cst->rep0 = (cst->rep0 << 1) |
498 rc_direct_bit(rc);
499 prob = p + LZMA_ALIGN;
500 cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
501 num_bits = LZMA_NUM_ALIGN_BITS;
502 }
503 i = 1;
504 mi = 1;
505 while (num_bits--) {
506 if (rc_get_bit(rc, prob + mi, &mi))
507 cst->rep0 |= i;
508 i <<= 1;
509 }
510 } else
511 cst->rep0 = pos_slot;
512 if (++(cst->rep0) == 0)
513 return;
514 }
515
516 len += LZMA_MATCH_MIN_LEN;
517
518 copy_bytes(wr, cst->rep0, len);
519}
520
521
522
523STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
524 int(*fill)(void*, unsigned int),
525 int(*flush)(void*, unsigned int),
526 unsigned char *output,
527 int *posp,
528 void(*error_fn)(char *x)
529 )
530{
531 struct lzma_header header;
532 int lc, pb, lp;
533 uint32_t pos_state_mask;
534 uint32_t literal_pos_mask;
535 uint16_t *p;
536 int num_probs;
537 struct rc rc;
538 int i, mi;
539 struct writer wr;
540 struct cstate cst;
541 unsigned char *inbuf;
542 int ret = -1;
543
544 set_error_fn(error_fn);
545 if (!flush)
546 in_len -= 4; /* Uncompressed size hack active in pre-boot
547 environment */
548 if (buf)
549 inbuf = buf;
550 else
551 inbuf = malloc(LZMA_IOBUF_SIZE);
552 if (!inbuf) {
553 error("Could not allocate input bufer");
554 goto exit_0;
555 }
556
557 cst.state = 0;
558 cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
559
560 wr.header = &header;
561 wr.flush = flush;
562 wr.global_pos = 0;
563 wr.previous_byte = 0;
564 wr.buffer_pos = 0;
565
566 rc_init(&rc, fill, inbuf, in_len);
567
568 for (i = 0; i < sizeof(header); i++) {
569 if (rc.ptr >= rc.buffer_end)
570 rc_read(&rc);
571 ((unsigned char *)&header)[i] = *rc.ptr++;
572 }
573
574 if (header.pos >= (9 * 5 * 5))
575 error("bad header");
576
577 mi = 0;
578 lc = header.pos;
579 while (lc >= 9) {
580 mi++;
581 lc -= 9;
582 }
583 pb = 0;
584 lp = mi;
585 while (lp >= 5) {
586 pb++;
587 lp -= 5;
588 }
589 pos_state_mask = (1 << pb) - 1;
590 literal_pos_mask = (1 << lp) - 1;
591
592 ENDIAN_CONVERT(header.dict_size);
593 ENDIAN_CONVERT(header.dst_size);
594
595 if (header.dict_size == 0)
596 header.dict_size = 1;
597
598 if (output)
599 wr.buffer = output;
600 else {
601 wr.bufsize = MIN(header.dst_size, header.dict_size);
602 wr.buffer = large_malloc(wr.bufsize);
603 }
604 if (wr.buffer == NULL)
605 goto exit_1;
606
607 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
608 p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
609 if (p == 0)
610 goto exit_2;
611 num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
612 for (i = 0; i < num_probs; i++)
613 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
614
615 rc_init_code(&rc);
616
617 while (get_pos(&wr) < header.dst_size) {
618 int pos_state = get_pos(&wr) & pos_state_mask;
619 uint16_t *prob = p + LZMA_IS_MATCH +
620 (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
621 if (rc_is_bit_0(&rc, prob))
622 process_bit0(&wr, &rc, &cst, p, pos_state, prob,
623 lc, literal_pos_mask);
624 else {
625 process_bit1(&wr, &rc, &cst, p, pos_state, prob);
626 if (cst.rep0 == 0)
627 break;
628 }
629 }
630
631 if (posp)
632 *posp = rc.ptr-rc.buffer;
633 if (wr.flush)
634 wr.flush(wr.buffer, wr.buffer_pos);
635 ret = 0;
636 large_free(p);
637exit_2:
638 if (!output)
639 large_free(wr.buffer);
640exit_1:
641 if (!buf)
642 free(inbuf);
643exit_0:
644 return ret;
645}
646
647#define decompress unlzma
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
index df8a6c92052d..3d17b3d1b21f 100644
--- a/lib/zlib_inflate/inflate.h
+++ b/lib/zlib_inflate/inflate.h
@@ -1,3 +1,6 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
1/* inflate.h -- internal inflate state definition 4/* inflate.h -- internal inflate state definition
2 * Copyright (C) 1995-2004 Mark Adler 5 * Copyright (C) 1995-2004 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -105,3 +108,4 @@ struct inflate_state {
105 unsigned short work[288]; /* work area for code table building */ 108 unsigned short work[288]; /* work area for code table building */
106 code codes[ENOUGH]; /* space for code tables */ 109 code codes[ENOUGH]; /* space for code tables */
107}; 110};
111#endif
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
index 5f5219b1240e..b70b4731ac7a 100644
--- a/lib/zlib_inflate/inftrees.h
+++ b/lib/zlib_inflate/inftrees.h
@@ -1,3 +1,6 @@
1#ifndef INFTREES_H
2#define INFTREES_H
3
1/* inftrees.h -- header to use inftrees.c 4/* inftrees.h -- header to use inftrees.c
2 * Copyright (C) 1995-2005 Mark Adler 5 * Copyright (C) 1995-2005 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -53,3 +56,4 @@ typedef enum {
53extern int zlib_inflate_table (codetype type, unsigned short *lens, 56extern int zlib_inflate_table (codetype type, unsigned short *lens,
54 unsigned codes, code **table, 57 unsigned codes, code **table,
55 unsigned *bits, unsigned short *work); 58 unsigned *bits, unsigned short *work);
59#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index 23acefe51808..126d3973b3d1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1823,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1823 int copy = min(bytes, iov->iov_len - base); 1823 int copy = min(bytes, iov->iov_len - base);
1824 1824
1825 base = 0; 1825 base = 0;
1826 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1826 left = __copy_from_user_inatomic(vaddr, buf, copy);
1827 copied += copy; 1827 copied += copy;
1828 bytes -= copy; 1828 bytes -= copy;
1829 vaddr += copy; 1829 vaddr += copy;
@@ -1851,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
1851 if (likely(i->nr_segs == 1)) { 1851 if (likely(i->nr_segs == 1)) {
1852 int left; 1852 int left;
1853 char __user *buf = i->iov->iov_base + i->iov_offset; 1853 char __user *buf = i->iov->iov_base + i->iov_offset;
1854 left = __copy_from_user_inatomic_nocache(kaddr + offset, 1854 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1855 buf, bytes);
1856 copied = bytes - left; 1855 copied = bytes - left;
1857 } else { 1856 } else {
1858 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1857 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
1880 if (likely(i->nr_segs == 1)) { 1879 if (likely(i->nr_segs == 1)) {
1881 int left; 1880 int left;
1882 char __user *buf = i->iov->iov_base + i->iov_offset; 1881 char __user *buf = i->iov->iov_base + i->iov_offset;
1883 left = __copy_from_user_nocache(kaddr + offset, buf, bytes); 1882 left = __copy_from_user(kaddr + offset, buf, bytes);
1884 copied = bytes - left; 1883 copied = bytes - left;
1885 } else { 1884 } else {
1886 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1885 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
diff --git a/mm/shmem.c b/mm/shmem.c
index 19d566ccdeea..4103a239ce84 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
169 */ 169 */
170static inline int shmem_acct_size(unsigned long flags, loff_t size) 170static inline int shmem_acct_size(unsigned long flags, loff_t size)
171{ 171{
172 return (flags & VM_ACCOUNT) ? 172 return (flags & VM_NORESERVE) ?
173 security_vm_enough_memory_kern(VM_ACCT(size)) : 0; 173 0 : security_vm_enough_memory_kern(VM_ACCT(size));
174} 174}
175 175
176static inline void shmem_unacct_size(unsigned long flags, loff_t size) 176static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177{ 177{
178 if (flags & VM_ACCOUNT) 178 if (!(flags & VM_NORESERVE))
179 vm_unacct_memory(VM_ACCT(size)); 179 vm_unacct_memory(VM_ACCT(size));
180} 180}
181 181
@@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
187 */ 187 */
188static inline int shmem_acct_block(unsigned long flags) 188static inline int shmem_acct_block(unsigned long flags)
189{ 189{
190 return (flags & VM_ACCOUNT) ? 190 return (flags & VM_NORESERVE) ?
191 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)); 191 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
192} 192}
193 193
194static inline void shmem_unacct_blocks(unsigned long flags, long pages) 194static inline void shmem_unacct_blocks(unsigned long flags, long pages)
195{ 195{
196 if (!(flags & VM_ACCOUNT)) 196 if (flags & VM_NORESERVE)
197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
198} 198}
199 199
@@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1515 return 0; 1515 return 0;
1516} 1516}
1517 1517
1518static struct inode * 1518static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1519shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1519 dev_t dev, unsigned long flags)
1520{ 1520{
1521 struct inode *inode; 1521 struct inode *inode;
1522 struct shmem_inode_info *info; 1522 struct shmem_inode_info *info;
@@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1537 info = SHMEM_I(inode); 1537 info = SHMEM_I(inode);
1538 memset(info, 0, (char *)inode - (char *)info); 1538 memset(info, 0, (char *)inode - (char *)info);
1539 spin_lock_init(&info->lock); 1539 spin_lock_init(&info->lock);
1540 info->flags = flags & VM_NORESERVE;
1540 INIT_LIST_HEAD(&info->swaplist); 1541 INIT_LIST_HEAD(&info->swaplist);
1541 1542
1542 switch (mode & S_IFMT) { 1543 switch (mode & S_IFMT) {
@@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1779static int 1780static int
1780shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1781shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1781{ 1782{
1782 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1783 struct inode *inode;
1783 int error = -ENOSPC; 1784 int error = -ENOSPC;
1784 1785
1786 inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
1785 if (inode) { 1787 if (inode) {
1786 error = security_inode_init_security(inode, dir, NULL, NULL, 1788 error = security_inode_init_security(inode, dir, NULL, NULL,
1787 NULL); 1789 NULL);
@@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1920 if (len > PAGE_CACHE_SIZE) 1922 if (len > PAGE_CACHE_SIZE)
1921 return -ENAMETOOLONG; 1923 return -ENAMETOOLONG;
1922 1924
1923 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1925 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1924 if (!inode) 1926 if (!inode)
1925 return -ENOSPC; 1927 return -ENOSPC;
1926 1928
@@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb,
2332 sb->s_flags |= MS_POSIXACL; 2334 sb->s_flags |= MS_POSIXACL;
2333#endif 2335#endif
2334 2336
2335 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); 2337 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2336 if (!inode) 2338 if (!inode)
2337 goto failed; 2339 goto failed;
2338 inode->i_uid = sbinfo->uid; 2340 inode->i_uid = sbinfo->uid;
@@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2574 return 0; 2576 return 0;
2575} 2577}
2576 2578
2577#define shmem_file_operations ramfs_file_operations 2579#define shmem_vm_ops generic_file_vm_ops
2578#define shmem_vm_ops generic_file_vm_ops 2580#define shmem_file_operations ramfs_file_operations
2579#define shmem_get_inode ramfs_get_inode 2581#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2580#define shmem_acct_size(a, b) 0 2582#define shmem_acct_size(flags, size) 0
2581#define shmem_unacct_size(a, b) do {} while (0) 2583#define shmem_unacct_size(flags, size) do {} while (0)
2582#define SHMEM_MAX_BYTES LLONG_MAX 2584#define SHMEM_MAX_BYTES LLONG_MAX
2583 2585
2584#endif /* CONFIG_SHMEM */ 2586#endif /* CONFIG_SHMEM */
2585 2587
@@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2589 * shmem_file_setup - get an unlinked file living in tmpfs 2591 * shmem_file_setup - get an unlinked file living in tmpfs
2590 * @name: name for dentry (to be seen in /proc/<pid>/maps 2592 * @name: name for dentry (to be seen in /proc/<pid>/maps
2591 * @size: size to be set for the file 2593 * @size: size to be set for the file
2592 * @flags: vm_flags 2594 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2593 */ 2595 */
2594struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2596struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2595{ 2597{
@@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2623 goto put_dentry; 2625 goto put_dentry;
2624 2626
2625 error = -ENOSPC; 2627 error = -ENOSPC;
2626 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2628 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
2627 if (!inode) 2629 if (!inode)
2628 goto close_file; 2630 goto close_file;
2629 2631
2630#ifdef CONFIG_SHMEM
2631 SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
2632#endif
2633 d_instantiate(dentry, inode); 2632 d_instantiate(dentry, inode);
2634 inode->i_size = size; 2633 inode->i_size = size;
2635 inode->i_nlink = 0; /* It is unlinked */ 2634 inode->i_nlink = 0; /* It is unlinked */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fb6f59935fb2..af58324c361a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -333,6 +333,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
333 unsigned long addr; 333 unsigned long addr;
334 int purged = 0; 334 int purged = 0;
335 335
336 BUG_ON(!size);
336 BUG_ON(size & ~PAGE_MASK); 337 BUG_ON(size & ~PAGE_MASK);
337 338
338 va = kmalloc_node(sizeof(struct vmap_area), 339 va = kmalloc_node(sizeof(struct vmap_area),
@@ -344,6 +345,9 @@ retry:
344 addr = ALIGN(vstart, align); 345 addr = ALIGN(vstart, align);
345 346
346 spin_lock(&vmap_area_lock); 347 spin_lock(&vmap_area_lock);
348 if (addr + size - 1 < addr)
349 goto overflow;
350
347 /* XXX: could have a last_hole cache */ 351 /* XXX: could have a last_hole cache */
348 n = vmap_area_root.rb_node; 352 n = vmap_area_root.rb_node;
349 if (n) { 353 if (n) {
@@ -375,6 +379,8 @@ retry:
375 379
376 while (addr + size > first->va_start && addr + size <= vend) { 380 while (addr + size > first->va_start && addr + size <= vend) {
377 addr = ALIGN(first->va_end + PAGE_SIZE, align); 381 addr = ALIGN(first->va_end + PAGE_SIZE, align);
382 if (addr + size - 1 < addr)
383 goto overflow;
378 384
379 n = rb_next(&first->rb_node); 385 n = rb_next(&first->rb_node);
380 if (n) 386 if (n)
@@ -385,6 +391,7 @@ retry:
385 } 391 }
386found: 392found:
387 if (addr + size > vend) { 393 if (addr + size > vend) {
394overflow:
388 spin_unlock(&vmap_area_lock); 395 spin_unlock(&vmap_area_lock);
389 if (!purged) { 396 if (!purged) {
390 purge_vmap_area_lazy(); 397 purge_vmap_area_lazy();
@@ -508,6 +515,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
508 static DEFINE_SPINLOCK(purge_lock); 515 static DEFINE_SPINLOCK(purge_lock);
509 LIST_HEAD(valist); 516 LIST_HEAD(valist);
510 struct vmap_area *va; 517 struct vmap_area *va;
518 struct vmap_area *n_va;
511 int nr = 0; 519 int nr = 0;
512 520
513 /* 521 /*
@@ -547,7 +555,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
547 555
548 if (nr) { 556 if (nr) {
549 spin_lock(&vmap_area_lock); 557 spin_lock(&vmap_area_lock);
550 list_for_each_entry(va, &valist, purge_list) 558 list_for_each_entry_safe(va, n_va, &valist, purge_list)
551 __free_vmap_area(va); 559 __free_vmap_area(va);
552 spin_unlock(&vmap_area_lock); 560 spin_unlock(&vmap_area_lock);
553 } 561 }
@@ -1347,6 +1355,7 @@ EXPORT_SYMBOL(vfree);
1347void vunmap(const void *addr) 1355void vunmap(const void *addr)
1348{ 1356{
1349 BUG_ON(in_interrupt()); 1357 BUG_ON(in_interrupt());
1358 might_sleep();
1350 __vunmap(addr, 0); 1359 __vunmap(addr, 0);
1351} 1360}
1352EXPORT_SYMBOL(vunmap); 1361EXPORT_SYMBOL(vunmap);
@@ -1366,6 +1375,8 @@ void *vmap(struct page **pages, unsigned int count,
1366{ 1375{
1367 struct vm_struct *area; 1376 struct vm_struct *area;
1368 1377
1378 might_sleep();
1379
1369 if (count > num_physpages) 1380 if (count > num_physpages)
1370 return NULL; 1381 return NULL;
1371 1382
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e9db889d6222..2886d2fb9ab5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -1,12 +1,16 @@
1#include <linux/skbuff.h> 1#include <linux/skbuff.h>
2#include <linux/netdevice.h> 2#include <linux/netdevice.h>
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include <linux/netpoll.h>
4#include "vlan.h" 5#include "vlan.h"
5 6
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
9{ 10{
11 if (netpoll_rx(skb))
12 return NET_RX_DROP;
13
10 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
11 goto drop; 15 goto drop;
12 16
@@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
100{ 104{
101 int err = NET_RX_SUCCESS; 105 int err = NET_RX_SUCCESS;
102 106
107 if (netpoll_receive_skb(skb))
108 return NET_RX_DROP;
109
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 110 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1: 111 case -1:
105 return netif_receive_skb(skb); 112 return netif_receive_skb(skb);
@@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
126 if (!skb) 133 if (!skb)
127 goto out; 134 goto out;
128 135
136 if (netpoll_receive_skb(skb))
137 goto out;
138
129 err = NET_RX_SUCCESS; 139 err = NET_RX_SUCCESS;
130 140
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 141 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
diff --git a/net/core/dev.c b/net/core/dev.c
index a17e00662363..72b0d26fd46d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2488,6 +2488,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2488 2488
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2490{ 2490{
2491 if (netpoll_receive_skb(skb))
2492 return NET_RX_DROP;
2493
2491 switch (__napi_gro_receive(napi, skb)) { 2494 switch (__napi_gro_receive(napi, skb)) {
2492 case -1: 2495 case -1:
2493 return netif_receive_skb(skb); 2496 return netif_receive_skb(skb);
@@ -2558,6 +2561,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2558 if (!skb) 2561 if (!skb)
2559 goto out; 2562 goto out;
2560 2563
2564 if (netpoll_receive_skb(skb))
2565 goto out;
2566
2561 err = NET_RX_SUCCESS; 2567 err = NET_RX_SUCCESS;
2562 2568
2563 switch (__napi_gro_receive(napi, skb)) { 2569 switch (__napi_gro_receive(napi, skb)) {
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55151faaf90c..2adb1a7d361f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net)
32{ 32{
33 /* Must be called with net_mutex held */ 33 /* Must be called with net_mutex held */
34 struct pernet_operations *ops; 34 struct pernet_operations *ops;
35 int error; 35 int error = 0;
36 struct net_generic *ng;
37 36
38 atomic_set(&net->count, 1); 37 atomic_set(&net->count, 1);
38
39#ifdef NETNS_REFCNT_DEBUG 39#ifdef NETNS_REFCNT_DEBUG
40 atomic_set(&net->use_count, 0); 40 atomic_set(&net->use_count, 0);
41#endif 41#endif
42 42
43 error = -ENOMEM;
44 ng = kzalloc(sizeof(struct net_generic) +
45 INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
46 if (ng == NULL)
47 goto out;
48
49 ng->len = INITIAL_NET_GEN_PTRS;
50 rcu_assign_pointer(net->gen, ng);
51
52 error = 0;
53 list_for_each_entry(ops, &pernet_list, list) { 43 list_for_each_entry(ops, &pernet_list, list) {
54 if (ops->init) { 44 if (ops->init) {
55 error = ops->init(net); 45 error = ops->init(net);
@@ -70,24 +60,50 @@ out_undo:
70 } 60 }
71 61
72 rcu_barrier(); 62 rcu_barrier();
73 kfree(ng);
74 goto out; 63 goto out;
75} 64}
76 65
66static struct net_generic *net_alloc_generic(void)
67{
68 struct net_generic *ng;
69 size_t generic_size = sizeof(struct net_generic) +
70 INITIAL_NET_GEN_PTRS * sizeof(void *);
71
72 ng = kzalloc(generic_size, GFP_KERNEL);
73 if (ng)
74 ng->len = INITIAL_NET_GEN_PTRS;
75
76 return ng;
77}
78
77#ifdef CONFIG_NET_NS 79#ifdef CONFIG_NET_NS
78static struct kmem_cache *net_cachep; 80static struct kmem_cache *net_cachep;
79static struct workqueue_struct *netns_wq; 81static struct workqueue_struct *netns_wq;
80 82
81static struct net *net_alloc(void) 83static struct net *net_alloc(void)
82{ 84{
83 return kmem_cache_zalloc(net_cachep, GFP_KERNEL); 85 struct net *net = NULL;
86 struct net_generic *ng;
87
88 ng = net_alloc_generic();
89 if (!ng)
90 goto out;
91
92 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
93 if (!net)
94 goto out_free;
95
96 rcu_assign_pointer(net->gen, ng);
97out:
98 return net;
99
100out_free:
101 kfree(ng);
102 goto out;
84} 103}
85 104
86static void net_free(struct net *net) 105static void net_free(struct net *net)
87{ 106{
88 if (!net)
89 return;
90
91#ifdef NETNS_REFCNT_DEBUG 107#ifdef NETNS_REFCNT_DEBUG
92 if (unlikely(atomic_read(&net->use_count) != 0)) { 108 if (unlikely(atomic_read(&net->use_count) != 0)) {
93 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 109 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
112 err = -ENOMEM; 128 err = -ENOMEM;
113 new_net = net_alloc(); 129 new_net = net_alloc();
114 if (!new_net) 130 if (!new_net)
115 goto out; 131 goto out_err;
116 132
117 mutex_lock(&net_mutex); 133 mutex_lock(&net_mutex);
118 err = setup_net(new_net); 134 err = setup_net(new_net);
119 if (err) 135 if (!err) {
120 goto out_unlock; 136 rtnl_lock();
121 137 list_add_tail(&new_net->list, &net_namespace_list);
122 rtnl_lock(); 138 rtnl_unlock();
123 list_add_tail(&new_net->list, &net_namespace_list); 139 }
124 rtnl_unlock();
125
126
127out_unlock:
128 mutex_unlock(&net_mutex); 140 mutex_unlock(&net_mutex);
141
142 if (err)
143 goto out_free;
129out: 144out:
130 put_net(old_net); 145 put_net(old_net);
131 if (err) {
132 net_free(new_net);
133 new_net = ERR_PTR(err);
134 }
135 return new_net; 146 return new_net;
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
136} 153}
137 154
138static void cleanup_net(struct work_struct *work) 155static void cleanup_net(struct work_struct *work)
@@ -188,6 +205,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
188 205
189static int __init net_ns_init(void) 206static int __init net_ns_init(void)
190{ 207{
208 struct net_generic *ng;
191 int err; 209 int err;
192 210
193 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); 211 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
@@ -202,6 +220,12 @@ static int __init net_ns_init(void)
202 panic("Could not create netns workq"); 220 panic("Could not create netns workq");
203#endif 221#endif
204 222
223 ng = net_alloc_generic();
224 if (!ng)
225 panic("Could not allocate generic netns");
226
227 rcu_assign_pointer(init_net.gen, ng);
228
205 mutex_lock(&net_mutex); 229 mutex_lock(&net_mutex);
206 err = setup_net(&init_net); 230 err = setup_net(&init_net);
207 231
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da74b844f4ea..c6a6b166f8d6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 BUG(); 143 BUG();
144} 144}
145 145
146void skb_truesize_bug(struct sk_buff *skb)
147{
148 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
149 "len=%u, sizeof(sk_buff)=%Zd\n",
150 skb->truesize, skb->len, sizeof(struct sk_buff));
151}
152EXPORT_SYMBOL(skb_truesize_bug);
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few 146/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the 147 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks. 148 * [BEEP] leaks.
diff --git a/net/core/sock.c b/net/core/sock.c
index 6f2e1337975d..5f97caa158e8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -696,7 +696,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
696 if (len < 0) 696 if (len < 0)
697 return -EINVAL; 697 return -EINVAL;
698 698
699 v.val = 0; 699 memset(&v, 0, sizeof(v));
700 700
701 switch(optname) { 701 switch(optname) {
702 case SO_DEBUG: 702 case SO_DEBUG:
@@ -1137,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb)
1137{ 1137{
1138 struct sock *sk = skb->sk; 1138 struct sock *sk = skb->sk;
1139 1139
1140 skb_truesize_check(skb);
1141 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1140 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1142 sk_mem_uncharge(skb->sk, skb->truesize); 1141 sk_mem_uncharge(skb->sk, skb->truesize);
1143} 1142}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a6961d75c7ea..c28976a7e596 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1374 1374
1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1376 struct tcp_sacktag_state *state, 1376 struct tcp_sacktag_state *state,
1377 unsigned int pcount, int shifted, int mss) 1377 unsigned int pcount, int shifted, int mss,
1378 int dup_sack)
1378{ 1379{
1379 struct tcp_sock *tp = tcp_sk(sk); 1380 struct tcp_sock *tp = tcp_sk(sk);
1380 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1381 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1410 } 1411 }
1411 1412
1412 /* We discard results */ 1413 /* We discard results */
1413 tcp_sacktag_one(skb, sk, state, 0, pcount); 1414 tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1414 1415
1415 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1416 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1416 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1417 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1561 1562
1562 if (!skb_shift(prev, skb, len)) 1563 if (!skb_shift(prev, skb, len))
1563 goto fallback; 1564 goto fallback;
1564 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) 1565 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
1565 goto out; 1566 goto out;
1566 1567
1567 /* Hole filled allows collapsing with the next as well, this is very 1568 /* Hole filled allows collapsing with the next as well, this is very
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1580 len = skb->len; 1581 len = skb->len;
1581 if (skb_shift(prev, skb, len)) { 1582 if (skb_shift(prev, skb, len)) {
1582 pcount += tcp_skb_pcount(skb); 1583 pcount += tcp_skb_pcount(skb);
1583 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); 1584 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
1584 } 1585 }
1585 1586
1586out: 1587out:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2023 last_lost = tp->snd_una; 2023 last_lost = tp->snd_una;
2024 } 2024 }
2025 2025
2026 /* First pass: retransmit lost packets. */
2027 tcp_for_write_queue_from(skb, sk) { 2026 tcp_for_write_queue_from(skb, sk) {
2028 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2027 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2029 2028
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 2747ec7bfb63..4660b088a8ce 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -1,6 +1,6 @@
1/* Tom Kelly's Scalable TCP 1/* Tom Kelly's Scalable TCP
2 * 2 *
3 * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ 3 * See http://www.deneholme.net/tom/scalable/
4 * 4 *
5 * John Heffner <jheffner@sc.edu> 5 * John Heffner <jheffner@sc.edu>
6 */ 6 */
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 8fe267feb81e..1bcc3431859e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -258,11 +258,11 @@ unique:
258 258
259 if (twp != NULL) { 259 if (twp != NULL) {
260 *twp = tw; 260 *twp = tw;
261 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 261 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
262 } else if (tw != NULL) { 262 } else if (tw != NULL) {
263 /* Silly. Should hash-dance instead... */ 263 /* Silly. Should hash-dance instead... */
264 inet_twsk_deschedule(tw, death_row); 264 inet_twsk_deschedule(tw, death_row);
265 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 265 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
266 266
267 inet_twsk_put(tw); 267 inet_twsk_put(tw);
268 } 268 }
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c323643ffcf9..72dbb6d1a6b3 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -201,8 +201,9 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
201 201
202 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 202 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
203 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { 203 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
204 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 204 if (LOG_INVALID(net, IPPROTO_ICMPV6))
205 "nf_ct_icmpv6: ICMPv6 checksum failed\n"); 205 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
206 "nf_ct_icmpv6: ICMPv6 checksum failed ");
206 return -NF_ACCEPT; 207 return -NF_ACCEPT;
207 } 208 }
208 209
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fa49dc7fe100..c712e9fc6bba 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -39,7 +39,7 @@
39#endif 39#endif
40 40
41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
42#define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ 42#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ 44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
45 45
@@ -590,8 +590,10 @@ nfulnl_log_packet(u_int8_t pf,
590 590
591 qthreshold = inst->qthreshold; 591 qthreshold = inst->qthreshold;
592 /* per-rule qthreshold overrides per-instance */ 592 /* per-rule qthreshold overrides per-instance */
593 if (qthreshold > li->u.ulog.qthreshold) 593 if (li->u.ulog.qthreshold)
594 qthreshold = li->u.ulog.qthreshold; 594 if (qthreshold > li->u.ulog.qthreshold)
595 qthreshold = li->u.ulog.qthreshold;
596
595 597
596 switch (inst->copy_mode) { 598 switch (inst->copy_mode) {
597 case NFULNL_COPY_META: 599 case NFULNL_COPY_META:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index bfbf521f6ea5..5baccfa5a0de 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -827,59 +827,143 @@ static const struct file_operations xt_table_ops = {
827 .release = seq_release_net, 827 .release = seq_release_net,
828}; 828};
829 829
830static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) 830/*
831 * Traverse state for ip{,6}_{tables,matches} for helping crossing
832 * the multi-AF mutexes.
833 */
834struct nf_mttg_trav {
835 struct list_head *head, *curr;
836 uint8_t class, nfproto;
837};
838
839enum {
840 MTTG_TRAV_INIT,
841 MTTG_TRAV_NFP_UNSPEC,
842 MTTG_TRAV_NFP_SPEC,
843 MTTG_TRAV_DONE,
844};
845
846static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
847 bool is_target)
831{ 848{
832 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 849 static const uint8_t next_class[] = {
833 u_int16_t af = (unsigned long)pde->data; 850 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
851 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
852 };
853 struct nf_mttg_trav *trav = seq->private;
854
855 switch (trav->class) {
856 case MTTG_TRAV_INIT:
857 trav->class = MTTG_TRAV_NFP_UNSPEC;
858 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
859 trav->head = trav->curr = is_target ?
860 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
861 break;
862 case MTTG_TRAV_NFP_UNSPEC:
863 trav->curr = trav->curr->next;
864 if (trav->curr != trav->head)
865 break;
866 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
867 mutex_lock(&xt[trav->nfproto].mutex);
868 trav->head = trav->curr = is_target ?
869 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
870 trav->class = next_class[trav->class];
871 break;
872 case MTTG_TRAV_NFP_SPEC:
873 trav->curr = trav->curr->next;
874 if (trav->curr != trav->head)
875 break;
876 /* fallthru, _stop will unlock */
877 default:
878 return NULL;
879 }
834 880
835 mutex_lock(&xt[af].mutex); 881 if (ppos != NULL)
836 return seq_list_start(&xt[af].match, *pos); 882 ++*ppos;
883 return trav;
837} 884}
838 885
839static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos) 886static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
887 bool is_target)
840{ 888{
841 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 889 struct nf_mttg_trav *trav = seq->private;
842 u_int16_t af = (unsigned long)pde->data; 890 unsigned int j;
843 891
844 return seq_list_next(v, &xt[af].match, pos); 892 trav->class = MTTG_TRAV_INIT;
893 for (j = 0; j < *pos; ++j)
894 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
895 return NULL;
896 return trav;
845} 897}
846 898
847static void xt_match_seq_stop(struct seq_file *seq, void *v) 899static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
848{ 900{
849 struct proc_dir_entry *pde = seq->private; 901 struct nf_mttg_trav *trav = seq->private;
850 u_int16_t af = (unsigned long)pde->data; 902
903 switch (trav->class) {
904 case MTTG_TRAV_NFP_UNSPEC:
905 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
906 break;
907 case MTTG_TRAV_NFP_SPEC:
908 mutex_unlock(&xt[trav->nfproto].mutex);
909 break;
910 }
911}
851 912
852 mutex_unlock(&xt[af].mutex); 913static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
914{
915 return xt_mttg_seq_start(seq, pos, false);
853} 916}
854 917
855static int xt_match_seq_show(struct seq_file *seq, void *v) 918static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
856{ 919{
857 struct xt_match *match = list_entry(v, struct xt_match, list); 920 return xt_mttg_seq_next(seq, v, ppos, false);
921}
858 922
859 if (strlen(match->name)) 923static int xt_match_seq_show(struct seq_file *seq, void *v)
860 return seq_printf(seq, "%s\n", match->name); 924{
861 else 925 const struct nf_mttg_trav *trav = seq->private;
862 return 0; 926 const struct xt_match *match;
927
928 switch (trav->class) {
929 case MTTG_TRAV_NFP_UNSPEC:
930 case MTTG_TRAV_NFP_SPEC:
931 if (trav->curr == trav->head)
932 return 0;
933 match = list_entry(trav->curr, struct xt_match, list);
934 return (*match->name == '\0') ? 0 :
935 seq_printf(seq, "%s\n", match->name);
936 }
937 return 0;
863} 938}
864 939
865static const struct seq_operations xt_match_seq_ops = { 940static const struct seq_operations xt_match_seq_ops = {
866 .start = xt_match_seq_start, 941 .start = xt_match_seq_start,
867 .next = xt_match_seq_next, 942 .next = xt_match_seq_next,
868 .stop = xt_match_seq_stop, 943 .stop = xt_mttg_seq_stop,
869 .show = xt_match_seq_show, 944 .show = xt_match_seq_show,
870}; 945};
871 946
872static int xt_match_open(struct inode *inode, struct file *file) 947static int xt_match_open(struct inode *inode, struct file *file)
873{ 948{
949 struct seq_file *seq;
950 struct nf_mttg_trav *trav;
874 int ret; 951 int ret;
875 952
876 ret = seq_open(file, &xt_match_seq_ops); 953 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
877 if (!ret) { 954 if (trav == NULL)
878 struct seq_file *seq = file->private_data; 955 return -ENOMEM;
879 956
880 seq->private = PDE(inode); 957 ret = seq_open(file, &xt_match_seq_ops);
958 if (ret < 0) {
959 kfree(trav);
960 return ret;
881 } 961 }
882 return ret; 962
963 seq = file->private_data;
964 seq->private = trav;
965 trav->nfproto = (unsigned long)PDE(inode)->data;
966 return 0;
883} 967}
884 968
885static const struct file_operations xt_match_ops = { 969static const struct file_operations xt_match_ops = {
@@ -887,62 +971,63 @@ static const struct file_operations xt_match_ops = {
887 .open = xt_match_open, 971 .open = xt_match_open,
888 .read = seq_read, 972 .read = seq_read,
889 .llseek = seq_lseek, 973 .llseek = seq_lseek,
890 .release = seq_release, 974 .release = seq_release_private,
891}; 975};
892 976
893static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) 977static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
894{ 978{
895 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 979 return xt_mttg_seq_start(seq, pos, true);
896 u_int16_t af = (unsigned long)pde->data;
897
898 mutex_lock(&xt[af].mutex);
899 return seq_list_start(&xt[af].target, *pos);
900} 980}
901 981
902static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos) 982static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
903{ 983{
904 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 984 return xt_mttg_seq_next(seq, v, ppos, true);
905 u_int16_t af = (unsigned long)pde->data;
906
907 return seq_list_next(v, &xt[af].target, pos);
908}
909
910static void xt_target_seq_stop(struct seq_file *seq, void *v)
911{
912 struct proc_dir_entry *pde = seq->private;
913 u_int16_t af = (unsigned long)pde->data;
914
915 mutex_unlock(&xt[af].mutex);
916} 985}
917 986
918static int xt_target_seq_show(struct seq_file *seq, void *v) 987static int xt_target_seq_show(struct seq_file *seq, void *v)
919{ 988{
920 struct xt_target *target = list_entry(v, struct xt_target, list); 989 const struct nf_mttg_trav *trav = seq->private;
921 990 const struct xt_target *target;
922 if (strlen(target->name)) 991
923 return seq_printf(seq, "%s\n", target->name); 992 switch (trav->class) {
924 else 993 case MTTG_TRAV_NFP_UNSPEC:
925 return 0; 994 case MTTG_TRAV_NFP_SPEC:
995 if (trav->curr == trav->head)
996 return 0;
997 target = list_entry(trav->curr, struct xt_target, list);
998 return (*target->name == '\0') ? 0 :
999 seq_printf(seq, "%s\n", target->name);
1000 }
1001 return 0;
926} 1002}
927 1003
928static const struct seq_operations xt_target_seq_ops = { 1004static const struct seq_operations xt_target_seq_ops = {
929 .start = xt_target_seq_start, 1005 .start = xt_target_seq_start,
930 .next = xt_target_seq_next, 1006 .next = xt_target_seq_next,
931 .stop = xt_target_seq_stop, 1007 .stop = xt_mttg_seq_stop,
932 .show = xt_target_seq_show, 1008 .show = xt_target_seq_show,
933}; 1009};
934 1010
935static int xt_target_open(struct inode *inode, struct file *file) 1011static int xt_target_open(struct inode *inode, struct file *file)
936{ 1012{
1013 struct seq_file *seq;
1014 struct nf_mttg_trav *trav;
937 int ret; 1015 int ret;
938 1016
939 ret = seq_open(file, &xt_target_seq_ops); 1017 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
940 if (!ret) { 1018 if (trav == NULL)
941 struct seq_file *seq = file->private_data; 1019 return -ENOMEM;
942 1020
943 seq->private = PDE(inode); 1021 ret = seq_open(file, &xt_target_seq_ops);
1022 if (ret < 0) {
1023 kfree(trav);
1024 return ret;
944 } 1025 }
945 return ret; 1026
1027 seq = file->private_data;
1028 seq->private = trav;
1029 trav->nfproto = (unsigned long)PDE(inode)->data;
1030 return 0;
946} 1031}
947 1032
948static const struct file_operations xt_target_ops = { 1033static const struct file_operations xt_target_ops = {
@@ -950,7 +1035,7 @@ static const struct file_operations xt_target_ops = {
950 .open = xt_target_open, 1035 .open = xt_target_open,
951 .read = seq_read, 1036 .read = seq_read,
952 .llseek = seq_lseek, 1037 .llseek = seq_lseek,
953 .release = seq_release, 1038 .release = seq_release_private,
954}; 1039};
955 1040
956#define FORMAT_TABLES "_tables_names" 1041#define FORMAT_TABLES "_tables_names"
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fe80b614a400..791e030ea903 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -542,7 +542,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
542 struct recent_entry *e; 542 struct recent_entry *e;
543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; 543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
544 const char *c = buf; 544 const char *c = buf;
545 union nf_inet_addr addr; 545 union nf_inet_addr addr = {};
546 u_int16_t family; 546 u_int16_t family;
547 bool add, succ; 547 bool add, succ;
548 548
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index f6b4fa97df70..e36e94ab4e10 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
66{ 66{
67 struct drr_sched *q = qdisc_priv(sch); 67 struct drr_sched *q = qdisc_priv(sch);
68 struct drr_class *cl = (struct drr_class *)*arg; 68 struct drr_class *cl = (struct drr_class *)*arg;
69 struct nlattr *opt = tca[TCA_OPTIONS];
69 struct nlattr *tb[TCA_DRR_MAX + 1]; 70 struct nlattr *tb[TCA_DRR_MAX + 1];
70 u32 quantum; 71 u32 quantum;
71 int err; 72 int err;
72 73
73 err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); 74 if (!opt)
75 return -EINVAL;
76
77 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
74 if (err < 0) 78 if (err < 0)
75 return err; 79 return err;
76 80
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index e06365775bdf..3b949a354470 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -186,3 +186,17 @@ quiet_cmd_gzip = GZIP $@
186cmd_gzip = gzip -f -9 < $< > $@ 186cmd_gzip = gzip -f -9 < $< > $@
187 187
188 188
189# Bzip2
190# ---------------------------------------------------------------------------
191
192# Bzip2 does not include size in file... so we have to fake that
193size_append=$(CONFIG_SHELL) $(srctree)/scripts/bin_size
194
195quiet_cmd_bzip2 = BZIP2 $@
196cmd_bzip2 = (bzip2 -9 < $< && $(size_append) $<) > $@ || (rm -f $@ ; false)
197
198# Lzma
199# ---------------------------------------------------------------------------
200
201quiet_cmd_lzma = LZMA $@
202cmd_lzma = (lzma -9 -c $< && $(size_append) $<) >$@ || (rm -f $@ ; false)
diff --git a/scripts/bin_size b/scripts/bin_size
new file mode 100644
index 000000000000..43e1b360cee6
--- /dev/null
+++ b/scripts/bin_size
@@ -0,0 +1,10 @@
1#!/bin/sh
2
3if [ $# = 0 ] ; then
4 echo Usage: $0 file
5fi
6
7size_dec=`stat -c "%s" $1`
8size_hex_echo_string=`printf "%08x" $size_dec |
9 sed 's/\(..\)\(..\)\(..\)\(..\)/\\\\x\4\\\\x\3\\\\x\2\\\\x\1/g'`
10/bin/echo -ne $size_hex_echo_string
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 45eb0ae98eba..2d5ece798c4c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -10,7 +10,7 @@ use strict;
10my $P = $0; 10my $P = $0;
11$P =~ s@.*/@@g; 11$P =~ s@.*/@@g;
12 12
13my $V = '0.27'; 13my $V = '0.28';
14 14
15use Getopt::Long qw(:config no_auto_abbrev); 15use Getopt::Long qw(:config no_auto_abbrev);
16 16
@@ -110,7 +110,8 @@ our $Sparse = qr{
110 __iomem| 110 __iomem|
111 __must_check| 111 __must_check|
112 __init_refok| 112 __init_refok|
113 __kprobes 113 __kprobes|
114 __ref
114 }x; 115 }x;
115our $Attribute = qr{ 116our $Attribute = qr{
116 const| 117 const|
@@ -1240,7 +1241,8 @@ sub process {
1240 $realfile =~ s@^([^/]*)/@@; 1241 $realfile =~ s@^([^/]*)/@@;
1241 1242
1242 $p1_prefix = $1; 1243 $p1_prefix = $1;
1243 if ($tree && $p1_prefix ne '' && -e "$root/$p1_prefix") { 1244 if (!$file && $tree && $p1_prefix ne '' &&
1245 -e "$root/$p1_prefix") {
1244 WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n"); 1246 WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
1245 } 1247 }
1246 1248
@@ -1583,9 +1585,9 @@ sub process {
1583 } 1585 }
1584# TEST: allow direct testing of the attribute matcher. 1586# TEST: allow direct testing of the attribute matcher.
1585 if ($dbg_attr) { 1587 if ($dbg_attr) {
1586 if ($line =~ /^.\s*$Attribute\s*$/) { 1588 if ($line =~ /^.\s*$Modifier\s*$/) {
1587 ERROR("TEST: is attr\n" . $herecurr); 1589 ERROR("TEST: is attr\n" . $herecurr);
1588 } elsif ($dbg_attr > 1 && $line =~ /^.+($Attribute)/) { 1590 } elsif ($dbg_attr > 1 && $line =~ /^.+($Modifier)/) {
1589 ERROR("TEST: is not attr ($1 is)\n". $herecurr); 1591 ERROR("TEST: is not attr ($1 is)\n". $herecurr);
1590 } 1592 }
1591 next; 1593 next;
@@ -1657,7 +1659,7 @@ sub process {
1657 1659
1658# * goes on variable not on type 1660# * goes on variable not on type
1659 # (char*[ const]) 1661 # (char*[ const])
1660 if ($line =~ m{\($NonptrType(\s*\*[\s\*]*(?:$Modifier\s*)*)\)}) { 1662 if ($line =~ m{\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\)}) {
1661 my ($from, $to) = ($1, $1); 1663 my ($from, $to) = ($1, $1);
1662 1664
1663 # Should start with a space. 1665 # Should start with a space.
@@ -1672,7 +1674,7 @@ sub process {
1672 if ($from ne $to) { 1674 if ($from ne $to) {
1673 ERROR("\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr); 1675 ERROR("\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr);
1674 } 1676 }
1675 } elsif ($line =~ m{\b$NonptrType(\s*\*[\s\*]*(?:$Modifier\s*)?)($Ident)}) { 1677 } elsif ($line =~ m{\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident)}) {
1676 my ($from, $to, $ident) = ($1, $1, $2); 1678 my ($from, $to, $ident) = ($1, $1, $2);
1677 1679
1678 # Should start with a space. 1680 # Should start with a space.
@@ -1685,8 +1687,8 @@ sub process {
1685 # Modifiers should have spaces. 1687 # Modifiers should have spaces.
1686 $to =~ s/(\b$Modifier$)/$1 /; 1688 $to =~ s/(\b$Modifier$)/$1 /;
1687 1689
1688 #print "from<$from> to<$to>\n"; 1690 #print "from<$from> to<$to> ident<$ident>\n";
1689 if ($from ne $to) { 1691 if ($from ne $to && $ident !~ /^$Modifier$/) {
1690 ERROR("\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr); 1692 ERROR("\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr);
1691 } 1693 }
1692 } 1694 }
@@ -1885,11 +1887,11 @@ sub process {
1885 if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { 1887 if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
1886 ERROR("space required before that '$op' $at\n" . $hereptr); 1888 ERROR("space required before that '$op' $at\n" . $hereptr);
1887 } 1889 }
1888 if ($op eq '*' && $cc =~/\s*const\b/) { 1890 if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
1889 # A unary '*' may be const 1891 # A unary '*' may be const
1890 1892
1891 } elsif ($ctx =~ /.xW/) { 1893 } elsif ($ctx =~ /.xW/) {
1892 ERROR("space prohibited after that '$op' $at\n" . $hereptr); 1894 ERROR("Aspace prohibited after that '$op' $at\n" . $hereptr);
1893 } 1895 }
1894 1896
1895 # unary ++ and unary -- are allowed no space on one side. 1897 # unary ++ and unary -- are allowed no space on one side.
@@ -2560,7 +2562,7 @@ sub process {
2560 if ($line =~ /\bin_atomic\s*\(/) { 2562 if ($line =~ /\bin_atomic\s*\(/) {
2561 if ($realfile =~ m@^drivers/@) { 2563 if ($realfile =~ m@^drivers/@) {
2562 ERROR("do not use in_atomic in drivers\n" . $herecurr); 2564 ERROR("do not use in_atomic in drivers\n" . $herecurr);
2563 } else { 2565 } elsif ($realfile !~ m@^kernel/@) {
2564 WARN("use of in_atomic() is incorrect outside core kernel code\n" . $herecurr); 2566 WARN("use of in_atomic() is incorrect outside core kernel code\n" . $herecurr);
2565 } 2567 }
2566 } 2568 }
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 5f3415f28736..3eea8f15131b 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -5,7 +5,7 @@
5# Released under the terms of the GNU GPL 5# Released under the terms of the GNU GPL
6# 6#
7# Generate a cpio packed initramfs. It uses gen_init_cpio to generate 7# Generate a cpio packed initramfs. It uses gen_init_cpio to generate
8# the cpio archive, and gzip to pack it. 8# the cpio archive, and then compresses it.
9# The script may also be used to generate the inputfile used for gen_init_cpio 9# The script may also be used to generate the inputfile used for gen_init_cpio
10# This script assumes that gen_init_cpio is located in usr/ directory 10# This script assumes that gen_init_cpio is located in usr/ directory
11 11
@@ -16,8 +16,8 @@ usage() {
16cat << EOF 16cat << EOF
17Usage: 17Usage:
18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ... 18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
19 -o <file> Create gzipped initramfs file named <file> using 19 -o <file> Create compressed initramfs file named <file> using
20 gen_init_cpio and gzip 20 gen_init_cpio and compressor depending on the extension
21 -u <uid> User ID to map to user ID 0 (root). 21 -u <uid> User ID to map to user ID 0 (root).
22 <uid> is only meaningful if <cpio_source> is a 22 <uid> is only meaningful if <cpio_source> is a
23 directory. "squash" forces all files to uid 0. 23 directory. "squash" forces all files to uid 0.
@@ -225,6 +225,7 @@ cpio_list=
225output="/dev/stdout" 225output="/dev/stdout"
226output_file="" 226output_file=""
227is_cpio_compressed= 227is_cpio_compressed=
228compr="gzip -9 -f"
228 229
229arg="$1" 230arg="$1"
230case "$arg" in 231case "$arg" in
@@ -233,11 +234,15 @@ case "$arg" in
233 echo "deps_initramfs := \\" 234 echo "deps_initramfs := \\"
234 shift 235 shift
235 ;; 236 ;;
236 "-o") # generate gzipped cpio image named $1 237 "-o") # generate compressed cpio image named $1
237 shift 238 shift
238 output_file="$1" 239 output_file="$1"
239 cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)" 240 cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
240 output=${cpio_list} 241 output=${cpio_list}
242 echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
243 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
244 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
245 echo "$output_file" | grep -q "\.cpio$" && compr="cat"
241 shift 246 shift
242 ;; 247 ;;
243esac 248esac
@@ -274,7 +279,7 @@ while [ $# -gt 0 ]; do
274 esac 279 esac
275done 280done
276 281
277# If output_file is set we will generate cpio archive and gzip it 282# If output_file is set we will generate cpio archive and compress it
278# we are carefull to delete tmp files 283# we are carefull to delete tmp files
279if [ ! -z ${output_file} ]; then 284if [ ! -z ${output_file} ]; then
280 if [ -z ${cpio_file} ]; then 285 if [ -z ${cpio_file} ]; then
@@ -287,7 +292,8 @@ if [ ! -z ${output_file} ]; then
287 if [ "${is_cpio_compressed}" = "compressed" ]; then 292 if [ "${is_cpio_compressed}" = "compressed" ]; then
288 cat ${cpio_tfile} > ${output_file} 293 cat ${cpio_tfile} > ${output_file}
289 else 294 else
290 cat ${cpio_tfile} | gzip -f -9 - > ${output_file} 295 (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
296 || (rm -f ${output_file} ; false)
291 fi 297 fi
292 [ -z ${cpio_file} ] && rm ${cpio_tfile} 298 [ -z ${cpio_file} ] && rm ${cpio_tfile}
293fi 299fi
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 3f4b26647386..350794ab9b42 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -386,11 +386,12 @@ int selinux_netlbl_inode_permission(struct inode *inode, int mask)
386 if (!S_ISSOCK(inode->i_mode) || 386 if (!S_ISSOCK(inode->i_mode) ||
387 ((mask & (MAY_WRITE | MAY_APPEND)) == 0)) 387 ((mask & (MAY_WRITE | MAY_APPEND)) == 0))
388 return 0; 388 return 0;
389
390 sock = SOCKET_I(inode); 389 sock = SOCKET_I(inode);
391 sk = sock->sk; 390 sk = sock->sk;
391 if (sk == NULL)
392 return 0;
392 sksec = sk->sk_security; 393 sksec = sk->sk_security;
393 if (sksec->nlbl_state != NLBL_REQUIRE) 394 if (sksec == NULL || sksec->nlbl_state != NLBL_REQUIRE)
394 return 0; 395 return 0;
395 396
396 local_bh_disable(); 397 local_bh_disable();
diff --git a/sound/core/oss/rate.c b/sound/core/oss/rate.c
index a466443c4a26..2fa9299a440d 100644
--- a/sound/core/oss/rate.c
+++ b/sound/core/oss/rate.c
@@ -157,7 +157,7 @@ static void resample_shrink(struct snd_pcm_plugin *plugin,
157 while (dst_frames1 > 0) { 157 while (dst_frames1 > 0) {
158 S1 = S2; 158 S1 = S2;
159 if (src_frames1-- > 0) { 159 if (src_frames1-- > 0) {
160 S1 = *src; 160 S2 = *src;
161 src += src_step; 161 src += src_step;
162 } 162 }
163 if (pos & ~R_MASK) { 163 if (pos & ~R_MASK) {
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 3f00ddf450f8..c7c54e7748e9 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -165,7 +165,7 @@ module_param_array(enable, bool, NULL, 0444);
165MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard."); 165MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard.");
166 166
167static struct pci_device_id snd_aw2_ids[] = { 167static struct pci_device_id snd_aw2_ids[] = {
168 {PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, PCI_ANY_ID, PCI_ANY_ID, 168 {PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, 0, 0,
169 0, 0, 0}, 169 0, 0, 0},
170 {0} 170 {0}
171}; 171};
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 7958006a1d66..101a1c13a20d 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1528,6 +1528,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
1528 .ca0151_chip = 1, 1528 .ca0151_chip = 1,
1529 .spk71 = 1, 1529 .spk71 = 1,
1530 .spdif_bug = 1, 1530 .spdif_bug = 1,
1531 .invert_shared_spdif = 1, /* digital/analog switch swapped */
1531 .ac97_chip = 1} , 1532 .ac97_chip = 1} ,
1532 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102, 1533 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102,
1533 .driver = "Audigy2", .name = "SB Audigy 2 Platinum [SB0240P]", 1534 .driver = "Audigy2", .name = "SB Audigy 2 Platinum [SB0240P]",
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 482fb0304ca9..4ae51dcb81af 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -277,18 +277,19 @@ static ssize_t init_verbs_store(struct device *dev,
277{ 277{
278 struct snd_hwdep *hwdep = dev_get_drvdata(dev); 278 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
279 struct hda_codec *codec = hwdep->private_data; 279 struct hda_codec *codec = hwdep->private_data;
280 char *p; 280 struct hda_verb *v;
281 struct hda_verb verb, *v; 281 int nid, verb, param;
282 282
283 verb.nid = simple_strtoul(buf, &p, 0); 283 if (sscanf(buf, "%i %i %i", &nid, &verb, &param) != 3)
284 verb.verb = simple_strtoul(p, &p, 0); 284 return -EINVAL;
285 verb.param = simple_strtoul(p, &p, 0); 285 if (!nid || !verb)
286 if (!verb.nid || !verb.verb || !verb.param)
287 return -EINVAL; 286 return -EINVAL;
288 v = snd_array_new(&codec->init_verbs); 287 v = snd_array_new(&codec->init_verbs);
289 if (!v) 288 if (!v)
290 return -ENOMEM; 289 return -ENOMEM;
291 *v = verb; 290 v->nid = nid;
291 v->verb = verb;
292 v->param = param;
292 return count; 293 return count;
293} 294}
294 295
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c8d9178f47e5..5e909e0da04b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2095,6 +2095,8 @@ static struct snd_pci_quirk probe_mask_list[] __devinitdata = {
2095 SND_PCI_QUIRK(0x1028, 0x20ac, "Dell Studio Desktop", 0x01), 2095 SND_PCI_QUIRK(0x1028, 0x20ac, "Dell Studio Desktop", 0x01),
2096 /* including bogus ALC268 in slot#2 that conflicts with ALC888 */ 2096 /* including bogus ALC268 in slot#2 that conflicts with ALC888 */
2097 SND_PCI_QUIRK(0x17c0, 0x4085, "Medion MD96630", 0x01), 2097 SND_PCI_QUIRK(0x17c0, 0x4085, "Medion MD96630", 0x01),
2098 /* conflict of ALC268 in slot#3 (digital I/O); a temporary fix */
2099 SND_PCI_QUIRK(0x1179, 0xff00, "Toshiba laptop", 0x03),
2098 {} 2100 {}
2099}; 2101};
2100 2102
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ed8fcbd60003..6c26afcb8262 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7017,6 +7017,7 @@ static int patch_alc882(struct hda_codec *codec)
7017 case 0x106b3e00: /* iMac 24 Aluminium */ 7017 case 0x106b3e00: /* iMac 24 Aluminium */
7018 board_config = ALC885_IMAC24; 7018 board_config = ALC885_IMAC24;
7019 break; 7019 break;
7020 case 0x106b00a0: /* MacBookPro3,1 - Another revision */
7020 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */ 7021 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
7021 case 0x106b00a4: /* MacbookPro4,1 */ 7022 case 0x106b00a4: /* MacbookPro4,1 */
7022 case 0x106b2c00: /* Macbook Pro rev3 */ 7023 case 0x106b2c00: /* Macbook Pro rev3 */
@@ -8469,6 +8470,8 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8469 ALC888_ACER_ASPIRE_4930G), 8470 ALC888_ACER_ASPIRE_4930G),
8470 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", 8471 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
8471 ALC888_ACER_ASPIRE_4930G), 8472 ALC888_ACER_ASPIRE_4930G),
8473 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
8474 ALC888_ACER_ASPIRE_4930G),
8472 SND_PCI_QUIRK(0x1025, 0, "Acer laptop", ALC883_ACER), /* default Acer */ 8475 SND_PCI_QUIRK(0x1025, 0, "Acer laptop", ALC883_ACER), /* default Acer */
8473 SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL), 8476 SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
8474 SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG), 8477 SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
@@ -10554,6 +10557,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
10554 SND_PCI_QUIRK(0x103c, 0x1309, "HP xw4*00", ALC262_HP_BPC), 10557 SND_PCI_QUIRK(0x103c, 0x1309, "HP xw4*00", ALC262_HP_BPC),
10555 SND_PCI_QUIRK(0x103c, 0x130a, "HP xw6*00", ALC262_HP_BPC), 10558 SND_PCI_QUIRK(0x103c, 0x130a, "HP xw6*00", ALC262_HP_BPC),
10556 SND_PCI_QUIRK(0x103c, 0x130b, "HP xw8*00", ALC262_HP_BPC), 10559 SND_PCI_QUIRK(0x103c, 0x130b, "HP xw8*00", ALC262_HP_BPC),
10560 SND_PCI_QUIRK(0x103c, 0x170b, "HP xw*", ALC262_HP_BPC),
10557 SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL), 10561 SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
10558 SND_PCI_QUIRK(0x103c, 0x2801, "HP D7000", ALC262_HP_BPC_D7000_WF), 10562 SND_PCI_QUIRK(0x103c, 0x2801, "HP D7000", ALC262_HP_BPC_D7000_WF),
10559 SND_PCI_QUIRK(0x103c, 0x2802, "HP D7000", ALC262_HP_BPC_D7000_WL), 10563 SND_PCI_QUIRK(0x103c, 0x2802, "HP D7000", ALC262_HP_BPC_D7000_WL),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 8027edf3c8f2..3bc427645da8 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4989,7 +4989,7 @@ again:
4989 case STAC_DELL_M4_3: 4989 case STAC_DELL_M4_3:
4990 spec->num_dmics = 1; 4990 spec->num_dmics = 1;
4991 spec->num_smuxes = 0; 4991 spec->num_smuxes = 0;
4992 spec->num_dmuxes = 0; 4992 spec->num_dmuxes = 1;
4993 break; 4993 break;
4994 default: 4994 default:
4995 spec->num_dmics = STAC92HD71BXX_NUM_DMICS; 4995 spec->num_dmics = STAC92HD71BXX_NUM_DMICS;
diff --git a/sound/pci/pcxhr/pcxhr.h b/sound/pci/pcxhr/pcxhr.h
index 84131a916c92..69d87dee6995 100644
--- a/sound/pci/pcxhr/pcxhr.h
+++ b/sound/pci/pcxhr/pcxhr.h
@@ -97,12 +97,12 @@ struct pcxhr_mgr {
97 int capture_chips; 97 int capture_chips;
98 int fw_file_set; 98 int fw_file_set;
99 int firmware_num; 99 int firmware_num;
100 int is_hr_stereo:1; 100 unsigned int is_hr_stereo:1;
101 int board_has_aes1:1; /* if 1 board has AES1 plug and SRC */ 101 unsigned int board_has_aes1:1; /* if 1 board has AES1 plug and SRC */
102 int board_has_analog:1; /* if 0 the board is digital only */ 102 unsigned int board_has_analog:1; /* if 0 the board is digital only */
103 int board_has_mic:1; /* if 1 the board has microphone input */ 103 unsigned int board_has_mic:1; /* if 1 the board has microphone input */
104 int board_aes_in_192k:1;/* if 1 the aes input plugs do support 192kHz */ 104 unsigned int board_aes_in_192k:1;/* if 1 the aes input plugs do support 192kHz */
105 int mono_capture:1; /* if 1 the board does mono capture */ 105 unsigned int mono_capture:1; /* if 1 the board does mono capture */
106 106
107 struct snd_dma_buffer hostport; 107 struct snd_dma_buffer hostport;
108 108
diff --git a/usr/Kconfig b/usr/Kconfig
index 86cecb59dd07..43a3a0fe8f29 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -44,3 +44,92 @@ config INITRAMFS_ROOT_GID
44 owned by group root in the initial ramdisk image. 44 owned by group root in the initial ramdisk image.
45 45
46 If you are not sure, leave it set to "0". 46 If you are not sure, leave it set to "0".
47
48config RD_GZIP
49 bool "Initial ramdisk compressed using gzip"
50 default y
51 depends on BLK_DEV_INITRD=y
52 select DECOMPRESS_GZIP
53 help
54 Support loading of a gzip encoded initial ramdisk or cpio buffer.
55 If unsure, say Y.
56
57config RD_BZIP2
58 bool "Initial ramdisk compressed using bzip2"
59 default n
60 depends on BLK_DEV_INITRD=y
61 select DECOMPRESS_BZIP2
62 help
63 Support loading of a bzip2 encoded initial ramdisk or cpio buffer
64 If unsure, say N.
65
66config RD_LZMA
67 bool "Initial ramdisk compressed using lzma"
68 default n
69 depends on BLK_DEV_INITRD=y
70 select DECOMPRESS_LZMA
71 help
72 Support loading of a lzma encoded initial ramdisk or cpio buffer
73 If unsure, say N.
74
75choice
76 prompt "Built-in initramfs compression mode"
77 help
78 This setting is only meaningful if the INITRAMFS_SOURCE is
79 set. It decides by which algorithm the INITRAMFS_SOURCE will
80 be compressed.
81 Several compression algorithms are available, which differ
82 in efficiency, compression and decompression speed.
83 Compression speed is only relevant when building a kernel.
84 Decompression speed is relevant at each boot.
85
86 If you have any problems with bzip2 or lzma compressed
87 initramfs, mail me (Alain Knaff) <alain@knaff.lu>.
88
89 High compression options are mostly useful for users who
90 are low on disk space (embedded systems), but for whom ram
91 size matters less.
92
93 If in doubt, select 'gzip'
94
95config INITRAMFS_COMPRESSION_NONE
96 bool "None"
97 help
98 Do not compress the built-in initramfs at all. This may
99 sound wasteful in space, but, you should be aware that the
100 built-in initramfs will be compressed at a later stage
101 anyways along with the rest of the kernel, on those
102 architectures that support this.
103 However, not compressing the initramfs may lead to slightly
104 higher memory consumption during a short time at boot, while
105 both the cpio image and the unpacked filesystem image will
106 be present in memory simultaneously
107
108config INITRAMFS_COMPRESSION_GZIP
109 bool "Gzip"
110 depends on RD_GZIP
111 help
112 The old and tried gzip compression. Its compression ratio is
113 the poorest among the 3 choices; however its speed (both
114 compression and decompression) is the fastest.
115
116config INITRAMFS_COMPRESSION_BZIP2
117 bool "Bzip2"
118 depends on RD_BZIP2
119 help
120 Its compression ratio and speed is intermediate.
121 Decompression speed is slowest among the three. The initramfs
122 size is about 10% smaller with bzip2, in comparison to gzip.
123 Bzip2 uses a large amount of memory. For modern kernels you
124 will need at least 8MB RAM or more for booting.
125
126config INITRAMFS_COMPRESSION_LZMA
127 bool "LZMA"
128 depends on RD_LZMA
129 help
130 The most recent compression algorithm.
131 Its ratio is best, decompression speed is between the other
132 two. Compression is slowest. The initramfs size is about 33%
133 smaller with LZMA in comparison to gzip.
134
135endchoice
diff --git a/usr/Makefile b/usr/Makefile
index 201f27f8cbaf..b84894b3929d 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -6,13 +6,25 @@ klibcdirs:;
6PHONY += klibcdirs 6PHONY += klibcdirs
7 7
8 8
9# No compression
10suffix_$(CONFIG_INITRAMFS_COMPRESSION_NONE) =
11
12# Gzip, but no bzip2
13suffix_$(CONFIG_INITRAMFS_COMPRESSION_GZIP) = .gz
14
15# Bzip2
16suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) = .bz2
17
18# Lzma
19suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma
20
9# Generate builtin.o based on initramfs_data.o 21# Generate builtin.o based on initramfs_data.o
10obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o 22obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o
11 23
12# initramfs_data.o contains the initramfs_data.cpio.gz image. 24# initramfs_data.o contains the compressed initramfs_data.cpio image.
13# The image is included using .incbin, a dependency which is not 25# The image is included using .incbin, a dependency which is not
14# tracked automatically. 26# tracked automatically.
15$(obj)/initramfs_data.o: $(obj)/initramfs_data.cpio.gz FORCE 27$(obj)/initramfs_data$(suffix_y).o: $(obj)/initramfs_data.cpio$(suffix_y) FORCE
16 28
17##### 29#####
18# Generate the initramfs cpio archive 30# Generate the initramfs cpio archive
@@ -25,28 +37,28 @@ ramfs-args := \
25 $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \ 37 $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
26 $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) 38 $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID))
27 39
28# .initramfs_data.cpio.gz.d is used to identify all files included 40# .initramfs_data.cpio.d is used to identify all files included
29# in initramfs and to detect if any files are added/removed. 41# in initramfs and to detect if any files are added/removed.
30# Removed files are identified by directory timestamp being updated 42# Removed files are identified by directory timestamp being updated
31# The dependency list is generated by gen_initramfs.sh -l 43# The dependency list is generated by gen_initramfs.sh -l
32ifneq ($(wildcard $(obj)/.initramfs_data.cpio.gz.d),) 44ifneq ($(wildcard $(obj)/.initramfs_data.cpio.d),)
33 include $(obj)/.initramfs_data.cpio.gz.d 45 include $(obj)/.initramfs_data.cpio.d
34endif 46endif
35 47
36quiet_cmd_initfs = GEN $@ 48quiet_cmd_initfs = GEN $@
37 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) 49 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
38 50
39targets := initramfs_data.cpio.gz 51targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio
40# do not try to update files included in initramfs 52# do not try to update files included in initramfs
41$(deps_initramfs): ; 53$(deps_initramfs): ;
42 54
43$(deps_initramfs): klibcdirs 55$(deps_initramfs): klibcdirs
44# We rebuild initramfs_data.cpio.gz if: 56# We rebuild initramfs_data.cpio if:
45# 1) Any included file is newer then initramfs_data.cpio.gz 57# 1) Any included file is newer then initramfs_data.cpio
46# 2) There are changes in which files are included (added or deleted) 58# 2) There are changes in which files are included (added or deleted)
47# 3) If gen_init_cpio are newer than initramfs_data.cpio.gz 59# 3) If gen_init_cpio are newer than initramfs_data.cpio
48# 4) arguments to gen_initramfs.sh changes 60# 4) arguments to gen_initramfs.sh changes
49$(obj)/initramfs_data.cpio.gz: $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs 61$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
50 $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.gz.d 62 $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
51 $(call if_changed,initfs) 63 $(call if_changed,initfs)
52 64
diff --git a/usr/initramfs_data.S b/usr/initramfs_data.S
index c2e1ad424f4a..7c6973d8d829 100644
--- a/usr/initramfs_data.S
+++ b/usr/initramfs_data.S
@@ -26,5 +26,5 @@ SECTIONS
26*/ 26*/
27 27
28.section .init.ramfs,"a" 28.section .init.ramfs,"a"
29.incbin "usr/initramfs_data.cpio.gz" 29.incbin "usr/initramfs_data.cpio"
30 30
diff --git a/usr/initramfs_data.bz2.S b/usr/initramfs_data.bz2.S
new file mode 100644
index 000000000000..bc54d090365c
--- /dev/null
+++ b/usr/initramfs_data.bz2.S
@@ -0,0 +1,29 @@
1/*
2 initramfs_data includes the compressed binary that is the
3 filesystem used for early user space.
4 Note: Older versions of "as" (prior to binutils 2.11.90.0.23
5 released on 2001-07-14) dit not support .incbin.
6 If you are forced to use older binutils than that then the
7 following trick can be applied to create the resulting binary:
8
9
10 ld -m elf_i386 --format binary --oformat elf32-i386 -r \
11 -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
12 ld -m elf_i386 -r -o built-in.o initramfs_data.o
13
14 initramfs_data.scr looks like this:
15SECTIONS
16{
17 .init.ramfs : { *(.data) }
18}
19
20 The above example is for i386 - the parameters vary from architectures.
21 Eventually look up LDFLAGS_BLOB in an older version of the
22 arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
23
24 Using .incbin has the advantage over ld that the correct flags are set
25 in the ELF header, as required by certain architectures.
26*/
27
28.section .init.ramfs,"a"
29.incbin "usr/initramfs_data.cpio.bz2"
diff --git a/usr/initramfs_data.gz.S b/usr/initramfs_data.gz.S
new file mode 100644
index 000000000000..890c8dd1d6bd
--- /dev/null
+++ b/usr/initramfs_data.gz.S
@@ -0,0 +1,29 @@
1/*
2 initramfs_data includes the compressed binary that is the
3 filesystem used for early user space.
4 Note: Older versions of "as" (prior to binutils 2.11.90.0.23
5 released on 2001-07-14) dit not support .incbin.
6 If you are forced to use older binutils than that then the
7 following trick can be applied to create the resulting binary:
8
9
10 ld -m elf_i386 --format binary --oformat elf32-i386 -r \
11 -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
12 ld -m elf_i386 -r -o built-in.o initramfs_data.o
13
14 initramfs_data.scr looks like this:
15SECTIONS
16{
17 .init.ramfs : { *(.data) }
18}
19
20 The above example is for i386 - the parameters vary from architectures.
21 Eventually look up LDFLAGS_BLOB in an older version of the
22 arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
23
24 Using .incbin has the advantage over ld that the correct flags are set
25 in the ELF header, as required by certain architectures.
26*/
27
28.section .init.ramfs,"a"
29.incbin "usr/initramfs_data.cpio.gz"
diff --git a/usr/initramfs_data.lzma.S b/usr/initramfs_data.lzma.S
new file mode 100644
index 000000000000..e11469e48562
--- /dev/null
+++ b/usr/initramfs_data.lzma.S
@@ -0,0 +1,29 @@
1/*
2 initramfs_data includes the compressed binary that is the
3 filesystem used for early user space.
4 Note: Older versions of "as" (prior to binutils 2.11.90.0.23
5 released on 2001-07-14) dit not support .incbin.
6 If you are forced to use older binutils than that then the
7 following trick can be applied to create the resulting binary:
8
9
10 ld -m elf_i386 --format binary --oformat elf32-i386 -r \
11 -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
12 ld -m elf_i386 -r -o built-in.o initramfs_data.o
13
14 initramfs_data.scr looks like this:
15SECTIONS
16{
17 .init.ramfs : { *(.data) }
18}
19
20 The above example is for i386 - the parameters vary from architectures.
21 Eventually look up LDFLAGS_BLOB in an older version of the
22 arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
23
24 Using .incbin has the advantage over ld that the correct flags are set
25 in the ELF header, as required by certain architectures.
26*/
27
28.section .init.ramfs,"a"
29.incbin "usr/initramfs_data.cpio.lzma"