aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/SubmittingPatches2
-rw-r--r--Documentation/connector/cn_test.c2
-rw-r--r--Documentation/connector/connector.txt8
-rw-r--r--Documentation/filesystems/ext4.txt13
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--Documentation/filesystems/vfat.txt2
-rw-r--r--Documentation/networking/timestamping/timestamping.c2
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/mach-ns9xxx/clock.c2
-rw-r--r--arch/arm/mach-omap1/id.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c10
-rw-r--r--arch/arm/mach-omap2/board-ldp.c10
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c7
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c10
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c10
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c10
-rw-r--r--arch/arm/mach-omap2/board-zoom2.c10
-rw-r--r--arch/arm/mach-omap2/cm4xxx.c17
-rw-r--r--arch/arm/mach-omap2/devices.c65
-rw-r--r--arch/arm/mach-omap2/io.c4
-rw-r--r--arch/arm/mach-omap2/iommu2.c2
-rw-r--r--arch/arm/mach-omap2/mailbox.c33
-rw-r--r--arch/arm/mach-omap2/mux.c4
-rw-r--r--arch/arm/mach-omap2/serial.c6
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-omap/include/mach/keypad.h5
-rw-r--r--arch/arm/plat-omap/include/mach/mux.h2
-rw-r--r--arch/arm/plat-omap/iovmm.c5
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/mci.h3
-rw-r--r--arch/blackfin/mach-bf561/coreb.c2
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c2
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/gpio.c2
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c4
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h175
-rw-r--r--arch/ia64/include/asm/spinlock_types.h2
-rw-r--r--arch/ia64/kernel/head.S89
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c20
-rw-r--r--arch/ia64/oprofile/backtrace.c20
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h12
-rw-r--r--arch/mips/alchemy/common/dbdma.c8
-rw-r--r--arch/mips/basler/excite/excite_iodev.c2
-rw-r--r--arch/mips/bcm63xx/Makefile2
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c8
-rw-r--r--arch/mips/bcm63xx/dev-pcmcia.c144
-rw-r--r--arch/mips/bcm63xx/dev-uart.c41
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h13
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h6
-rw-r--r--arch/mips/include/asm/smp.h15
-rw-r--r--arch/mips/include/asm/unaligned.h4
-rw-r--r--arch/mips/kernel/kspd.c33
-rw-r--r--arch/mips/kernel/rtlx.c15
-rw-r--r--arch/mips/kernel/smp.c14
-rw-r--r--arch/mips/kernel/smtc.c5
-rw-r--r--arch/mips/kernel/vpe.c77
-rw-r--r--arch/mips/mm/sc-mips.c5
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c14
-rw-r--r--arch/mips/pci/ops-pmcmsp.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c4
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c33
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mn10300/include/asm/uaccess.h73
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/clock.h6
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/clock.h6
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c16
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/kernel/smp_64.c53
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/checksum_32.h3
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h30
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c5
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c8
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S57
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/xen/debugfs.c2
-rw-r--r--drivers/acpi/osl.c8
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/atm/ambassador.c8
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c14
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/bfin-otp.c2
-rw-r--r--drivers/char/cyclades.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/pty.c47
-rw-r--r--drivers/char/serial167.c7
-rw-r--r--drivers/char/tty_io.c15
-rw-r--r--drivers/char/vt_ioctl.c6
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c22
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c88
-rw-r--r--drivers/gpu/drm/drm_edid.c46
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c235
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/.gitignore3
-rw-r--r--drivers/gpu/drm/radeon/avivod.h9
-rw-r--r--drivers/gpu/drm/radeon/r100.c197
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h69
-rw-r--r--drivers/gpu/drm/radeon/r200.c79
-rw-r--r--drivers/gpu/drm/radeon/r300.c137
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/r520.c276
-rw-r--r--drivers/gpu/drm/radeon/r520d.h187
-rw-r--r--drivers/gpu/drm/radeon/r600.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c186
-rw-r--r--drivers/gpu/drm/radeon/radeon.h76
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h80
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c103
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c20
-rw-r--r--drivers/gpu/drm/radeon/rs690.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c364
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h385
-rw-r--r--drivers/gpu/drm/radeon/rv770.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/ieee1394/dma.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c2
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig1
-rw-r--r--drivers/isdn/i4l/Kconfig3
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/lguest/lguest_user.c2
-rw-r--r--drivers/md/dm-log-userspace-transfer.c6
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/video/cafe_ccic.c2
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2
-rw-r--r--drivers/media/video/gspca/gspca.c2
-rw-r--r--drivers/media/video/meye.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/video/stk-webcam.c2
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/video/videobuf-dma-contig.c2
-rw-r--r--drivers/media/video/videobuf-dma-sg.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c2
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c5
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/sdio_cis.c65
-rw-r--r--drivers/mmc/host/Kconfig41
-rw-r--r--drivers/mmc/host/s3cmci.c608
-rw-r--r--drivers/mmc/host/s3cmci.h14
-rw-r--r--drivers/net/3c59x.c77
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c3
-rw-r--r--drivers/net/benet/be_cmds.h3
-rw-r--r--drivers/net/benet/be_main.c23
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/cnic.c3
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_ethtool.c202
-rw-r--r--drivers/net/e1000/e1000_hw.c12914
-rw-r--r--drivers/net/e1000/e1000_hw.h3231
-rw-r--r--drivers/net/e1000/e1000_main.c825
-rw-r--r--drivers/net/e1000/e1000_param.c22
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c232
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h9
-rw-r--r--drivers/net/ks8851_mll.c1697
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/qlge.h18
-rw-r--r--drivers/net/qlge/qlge_main.c26
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c16
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/b43/pio.c60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c187
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c31
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/pcmcia/Kconfig4
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/au1000_generic.c2
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c536
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.h60
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cs.c2
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c2
-rw-r--r--drivers/pcmcia/m32r_cfc.c2
-rw-r--r--drivers/pcmcia/m32r_pcc.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c2
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pcmcia/vrc4171_card.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c88
-rw-r--r--drivers/platform/x86/sony-laptop.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/scsi/sg.c45
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/serial/Kconfig21
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bcm63xx_uart.c890
-rw-r--r--drivers/serial/icom.c54
-rw-r--r--drivers/serial/serial_txx9.c39
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi_imx.c (renamed from drivers/spi/mxc_spi.c)383
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/dst/dcore.c7
-rw-r--r--drivers/staging/pohmelfs/config.c5
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/host/whci/debug.c6
-rw-r--r--drivers/usb/misc/rio500.c3
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/serial/usb-serial.c14
-rw-r--r--drivers/uwb/uwb-debug.c6
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fbmem.c2
-rw-r--r--drivers/video/omap/dispc.c2
-rw-r--r--drivers/video/uvesafb.c5
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--fs/afs/cache.h12
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/btrfs_inode.h8
-rw-r--r--fs/btrfs/ctree.h27
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c391
-rw-r--r--fs/btrfs/extent_io.c92
-rw-r--r--fs/btrfs/extent_io.h13
-rw-r--r--fs/btrfs/file.c37
-rw-r--r--fs/btrfs/inode.c239
-rw-r--r--fs/btrfs/ioctl.c62
-rw-r--r--fs/btrfs/ordered-data.c93
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c10
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/ext4/ext4.h54
-rw-r--r--fs/ext4/ext4_extents.h7
-rw-r--r--fs/ext4/ext4_jbd2.h6
-rw-r--r--fs/ext4/extents.c444
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/fsync.c5
-rw-r--r--fs/ext4/inode.c574
-rw-r--r--fs/ext4/mballoc.c305
-rw-r--r--fs/ext4/mballoc.h35
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c20
-rw-r--r--fs/ext4/namei.c3
-rw-r--r--fs/ext4/super.c99
-rw-r--r--fs/fat/fat.h2
-rw-r--r--fs/fat/inode.c18
-rw-r--r--fs/fat/misc.c8
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/jbd2/checkpoint.c7
-rw-r--r--fs/jbd2/commit.c59
-rw-r--r--fs/jbd2/journal.c198
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nilfs2/btnode.c1
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/file.c4
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/mdt.c2
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nls/nls_base.c8
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/netdebug.c4
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c8
-rw-r--r--fs/ocfs2/mmap.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/omfs/dir.c2
-rw-r--r--fs/omfs/file.c2
-rw-r--r--fs/omfs/omfs.h4
-rw-r--r--fs/sysfs/bin.c4
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c4
-rw-r--r--include/asm-generic/gpio.h1
-rw-r--r--include/drm/drm_crtc.h14
-rw-r--r--include/drm/drm_crtc_helper.h4
-rw-r--r--include/drm/drm_fb_helper.h24
-rw-r--r--include/linux/agp_backend.h2
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/connector.h11
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/if_tunnel.h2
-rw-r--r--include/linux/jbd2.h27
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mroute.h4
-rw-r--r--include/linux/mroute6.h4
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/ramfs.h2
-rw-r--r--include/linux/res_counter.h6
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/tty_driver.h13
-rw-r--r--include/net/compat.h4
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ipip.h7
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h12
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/wext.h1
-rw-r--r--include/pcmcia/ss.h2
-rw-r--r--include/trace/events/ext4.h178
-rw-r--r--include/trace/events/jbd2.h78
-rw-r--r--ipc/shm.c4
-rw-r--r--kernel/cgroup.c15
-rw-r--r--kernel/hrtimer.c53
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/module.c7
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/rcutree_trace.c10
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/res_counter.c18
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_clock.c4
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c127
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/percpu.c83
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--net/8021q/vlan_netlink.c1
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/pvc.c2
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c23
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c4
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/can/raw.c2
-rw-r--r--net/compat.c12
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/sock.c27
-rw-r--r--net/dcb/dcbnl.c15
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/proto.c10
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/ieee802154/dgram.c2
-rw-r--r--net/ieee802154/raw.c2
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv4/udp_impl.h4
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/sit.c60
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/udp_impl.h4
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/mlme.c18
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/netfilter/nf_sockopt.c4
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/phonet/socket.c1
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/sctp/socket.c62
-rw-r--r--net/socket.c9
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/wireless/sme.c5
-rw-r--r--net/wireless/wext-sme.c8
-rw-r--r--net/wireless/wext.c11
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--samples/tracepoints/tracepoint-sample.c2
-rw-r--r--security/integrity/ima/ima_fs.c10
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/usb/usx2y/us122l.c2
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c2
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
-rw-r--r--virt/kvm/kvm_main.c6
473 files changed, 17022 insertions, 14492 deletions
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index b7f9d3b4bbf6..72651f788f4e 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -232,7 +232,7 @@ your e-mail client so that it sends your patches untouched.
232When sending patches to Linus, always follow step #7. 232When sending patches to Linus, always follow step #7.
233 233
234Large changes are not appropriate for mailing lists, and some 234Large changes are not appropriate for mailing lists, and some
235maintainers. If your patch, uncompressed, exceeds 40 kB in size, 235maintainers. If your patch, uncompressed, exceeds 300 kB in size,
236it is preferred that you store your patch on an Internet-accessible 236it is preferred that you store your patch on an Internet-accessible
237server, and provide instead a URL (link) pointing to your patch. 237server, and provide instead a URL (link) pointing to your patch.
238 238
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index 1711adc33373..b07add3467f1 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -34,7 +34,7 @@ static char cn_test_name[] = "cn_test";
34static struct sock *nls; 34static struct sock *nls;
35static struct timer_list cn_test_timer; 35static struct timer_list cn_test_timer;
36 36
37static void cn_test_callback(struct cn_msg *msg) 37static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
38{ 38{
39 pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n", 39 pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
40 __func__, jiffies, msg->id.idx, msg->id.val, 40 __func__, jiffies, msg->id.idx, msg->id.val,
diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt
index 81e6bf6ead57..78c9466a9aa8 100644
--- a/Documentation/connector/connector.txt
+++ b/Documentation/connector/connector.txt
@@ -23,7 +23,7 @@ handling, etc... The Connector driver allows any kernelspace agents to use
23netlink based networking for inter-process communication in a significantly 23netlink based networking for inter-process communication in a significantly
24easier way: 24easier way:
25 25
26int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *)); 26int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
27void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask); 27void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask);
28 28
29struct cb_id 29struct cb_id
@@ -53,15 +53,15 @@ struct cn_msg
53Connector interfaces. 53Connector interfaces.
54/*****************************************/ 54/*****************************************/
55 55
56int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *)); 56int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
57 57
58 Registers new callback with connector core. 58 Registers new callback with connector core.
59 59
60 struct cb_id *id - unique connector's user identifier. 60 struct cb_id *id - unique connector's user identifier.
61 It must be registered in connector.h for legal in-kernel users. 61 It must be registered in connector.h for legal in-kernel users.
62 char *name - connector's callback symbolic name. 62 char *name - connector's callback symbolic name.
63 void (*callback) (void *) - connector's callback. 63 void (*callback) (struct cn..) - connector's callback.
64 Argument must be dereferenced to struct cn_msg *. 64 cn_msg and the sender's credentials
65 65
66 66
67void cn_del_callback(struct cb_id *id); 67void cn_del_callback(struct cb_id *id);
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 18b5ec8cea45..bf4f4b7e11b3 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -282,9 +282,16 @@ stripe=n Number of filesystem blocks that mballoc will try
282 to use for allocation size and alignment. For RAID5/6 282 to use for allocation size and alignment. For RAID5/6
283 systems this should be the number of data 283 systems this should be the number of data
284 disks * RAID chunk size in file system blocks. 284 disks * RAID chunk size in file system blocks.
285delalloc (*) Deferring block allocation until write-out time. 285
286nodelalloc Disable delayed allocation. Blocks are allocation 286delalloc (*) Defer block allocation until just before ext4
287 when data is copied from user to page cache. 287 writes out the block(s) in question. This
288 allows ext4 to better allocation decisions
289 more efficiently.
290nodelalloc Disable delayed allocation. Blocks are allocated
291 when the data is copied from userspace to the
292 page cache, either via the write(2) system call
293 or when an mmap'ed page which was previously
294 unallocated is written for the first time.
288 295
289max_batch_time=usec Maximum amount of time ext4 should wait for 296max_batch_time=usec Maximum amount of time ext4 should wait for
290 additional filesystem operations to be batch 297 additional filesystem operations to be batch
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index b5aee7838a00..2c48f945546b 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1113,7 +1113,6 @@ Table 1-12: Files in /proc/fs/ext4/<devname>
1113.............................................................................. 1113..............................................................................
1114 File Content 1114 File Content
1115 mb_groups details of multiblock allocator buddy cache of free blocks 1115 mb_groups details of multiblock allocator buddy cache of free blocks
1116 mb_history multiblock allocation history
1117.............................................................................. 1116..............................................................................
1118 1117
1119 1118
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index b58b84b50fa2..eed520fd0c8e 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -102,7 +102,7 @@ shortname=lower|win95|winnt|mixed
102 winnt: emulate the Windows NT rule for display/create. 102 winnt: emulate the Windows NT rule for display/create.
103 mixed: emulate the Windows NT rule for display, 103 mixed: emulate the Windows NT rule for display,
104 emulate the Windows 95 rule for create. 104 emulate the Windows 95 rule for create.
105 Default setting is `lower'. 105 Default setting is `mixed'.
106 106
107tz=UTC -- Interpret timestamps as UTC rather than local time. 107tz=UTC -- Interpret timestamps as UTC rather than local time.
108 This option disables the conversion of timestamps 108 This option disables the conversion of timestamps
diff --git a/Documentation/networking/timestamping/timestamping.c b/Documentation/networking/timestamping/timestamping.c
index 43d143104210..a7936fe8444a 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/Documentation/networking/timestamping/timestamping.c
@@ -381,7 +381,7 @@ int main(int argc, char **argv)
381 memset(&hwtstamp, 0, sizeof(hwtstamp)); 381 memset(&hwtstamp, 0, sizeof(hwtstamp));
382 strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); 382 strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name));
383 hwtstamp.ifr_data = (void *)&hwconfig; 383 hwtstamp.ifr_data = (void *)&hwconfig;
384 memset(&hwconfig, 0, sizeof(&hwconfig)); 384 memset(&hwconfig, 0, sizeof(hwconfig));
385 hwconfig.tx_type = 385 hwconfig.tx_type =
386 (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ? 386 (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
387 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 387 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
diff --git a/MAINTAINERS b/MAINTAINERS
index e7047c3743a8..737a9b2c532d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -741,23 +741,36 @@ M: Dirk Opfer <dirk@opfer-online.de>
741S: Maintained 741S: Maintained
742 742
743ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT 743ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
744P: Marek Vasut 744M: Marek Vasut <marek.vasut@gmail.com>
745M: marek.vasut@gmail.com
746L: linux-arm-kernel@lists.infradead.org 745L: linux-arm-kernel@lists.infradead.org
747W: http://hackndev.com 746W: http://hackndev.com
748S: Maintained 747S: Maintained
748F: arch/arm/mach-pxa/include/mach/palmtx.h
749F: arch/arm/mach-pxa/palmtx.c
750F: arch/arm/mach-pxa/include/mach/palmt5.h
751F: arch/arm/mach-pxa/palmt5.c
752F: arch/arm/mach-pxa/include/mach/palmld.h
753F: arch/arm/mach-pxa/palmld.c
754F: arch/arm/mach-pxa/include/mach/palmte2.h
755F: arch/arm/mach-pxa/palmte2.c
756F: arch/arm/mach-pxa/include/mach/palmtc.h
757F: arch/arm/mach-pxa/palmtc.c
749 758
750ARM/PALM TREO 680 SUPPORT 759ARM/PALM TREO 680 SUPPORT
751M: Tomas Cech <sleep_walker@suse.cz> 760M: Tomas Cech <sleep_walker@suse.cz>
752L: linux-arm-kernel@lists.infradead.org 761L: linux-arm-kernel@lists.infradead.org
753W: http://hackndev.com 762W: http://hackndev.com
754S: Maintained 763S: Maintained
764F: arch/arm/mach-pxa/include/mach/treo680.h
765F: arch/arm/mach-pxa/treo680.c
755 766
756ARM/PALMZ72 SUPPORT 767ARM/PALMZ72 SUPPORT
757M: Sergey Lapin <slapin@ossfans.org> 768M: Sergey Lapin <slapin@ossfans.org>
758L: linux-arm-kernel@lists.infradead.org 769L: linux-arm-kernel@lists.infradead.org
759W: http://hackndev.com 770W: http://hackndev.com
760S: Maintained 771S: Maintained
772F: arch/arm/mach-pxa/include/mach/palmz72.h
773F: arch/arm/mach-pxa/palmz72.c
761 774
762ARM/PLEB SUPPORT 775ARM/PLEB SUPPORT
763M: Peter Chubb <pleb@gelato.unsw.edu.au> 776M: Peter Chubb <pleb@gelato.unsw.edu.au>
diff --git a/Makefile b/Makefile
index f908accd332b..00444a8e304f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 31 3SUBLEVEL = 32
4EXTRAVERSION = 4EXTRAVERSION = -rc2
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index ecf4d488333d..003ef4c02585 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,7 +1,6 @@
1#include <asm-generic/vmlinux.lds.h> 1#include <asm-generic/vmlinux.lds.h>
2#include <asm/thread_info.h> 2#include <asm/thread_info.h>
3#include <asm/page.h> 3#include <asm/page.h>
4#include <asm/thread_info.h>
5 4
6OUTPUT_FORMAT("elf64-alpha") 5OUTPUT_FORMAT("elf64-alpha")
7OUTPUT_ARCH(alpha) 6OUTPUT_ARCH(alpha)
diff --git a/arch/arm/mach-ns9xxx/clock.c b/arch/arm/mach-ns9xxx/clock.c
index 44ed20d4a388..cf81cbc57544 100644
--- a/arch/arm/mach-ns9xxx/clock.c
+++ b/arch/arm/mach-ns9xxx/clock.c
@@ -195,7 +195,7 @@ static int clk_debugfs_open(struct inode *inode, struct file *file)
195 return single_open(file, clk_debugfs_show, NULL); 195 return single_open(file, clk_debugfs_show, NULL);
196} 196}
197 197
198static struct file_operations clk_debugfs_operations = { 198static const struct file_operations clk_debugfs_operations = {
199 .open = clk_debugfs_open, 199 .open = clk_debugfs_open,
200 .read = seq_read, 200 .read = seq_read,
201 .llseek = seq_lseek, 201 .llseek = seq_lseek,
diff --git a/arch/arm/mach-omap1/id.c b/arch/arm/mach-omap1/id.c
index 4ef26faf083e..e5dcdf764c91 100644
--- a/arch/arm/mach-omap1/id.c
+++ b/arch/arm/mach-omap1/id.c
@@ -38,7 +38,7 @@ static struct omap_id omap_ids[] __initdata = {
38 { .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000}, 38 { .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
39 { .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100}, 39 { .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
40 { .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300}, 40 { .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
41 { .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320500, .type = 0x08500000}, 41 { .jtag_id = 0xb62c, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x08500000},
42 { .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000}, 42 { .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
43 { .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x16100000}, 43 { .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x16100000},
44 { .jtag_id = 0xb576, .die_rev = 0x2, .omap_id = 0x03320100, .type = 0x16110000}, 44 { .jtag_id = 0xb576, .die_rev = 0x2, .omap_id = 0x03320100, .type = 0x16110000},
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index bd57ec76dc5e..efaf053eba85 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -54,7 +54,7 @@
54 54
55#define TWL4030_MSECURE_GPIO 22 55#define TWL4030_MSECURE_GPIO 22
56 56
57static int sdp3430_keymap[] = { 57static int board_keymap[] = {
58 KEY(0, 0, KEY_LEFT), 58 KEY(0, 0, KEY_LEFT),
59 KEY(0, 1, KEY_RIGHT), 59 KEY(0, 1, KEY_RIGHT),
60 KEY(0, 2, KEY_A), 60 KEY(0, 2, KEY_A),
@@ -88,11 +88,15 @@ static int sdp3430_keymap[] = {
88 0 88 0
89}; 89};
90 90
91static struct matrix_keymap_data board_map_data = {
92 .keymap = board_keymap,
93 .keymap_size = ARRAY_SIZE(board_keymap),
94};
95
91static struct twl4030_keypad_data sdp3430_kp_data = { 96static struct twl4030_keypad_data sdp3430_kp_data = {
97 .keymap_data = &board_map_data,
92 .rows = 5, 98 .rows = 5,
93 .cols = 6, 99 .cols = 6,
94 .keymap = sdp3430_keymap,
95 .keymapsize = ARRAY_SIZE(sdp3430_keymap),
96 .rep = 1, 100 .rep = 1,
97}; 101};
98 102
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index ec6854cbdd9f..d110a7fdfbd8 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -80,7 +80,7 @@ static struct platform_device ldp_smsc911x_device = {
80 }, 80 },
81}; 81};
82 82
83static int ldp_twl4030_keymap[] = { 83static int board_keymap[] = {
84 KEY(0, 0, KEY_1), 84 KEY(0, 0, KEY_1),
85 KEY(1, 0, KEY_2), 85 KEY(1, 0, KEY_2),
86 KEY(2, 0, KEY_3), 86 KEY(2, 0, KEY_3),
@@ -101,11 +101,15 @@ static int ldp_twl4030_keymap[] = {
101 0 101 0
102}; 102};
103 103
104static struct matrix_keymap_data board_map_data = {
105 .keymap = board_keymap,
106 .keymap_size = ARRAY_SIZE(board_keymap),
107};
108
104static struct twl4030_keypad_data ldp_kp_twl4030_data = { 109static struct twl4030_keypad_data ldp_kp_twl4030_data = {
110 .keymap_data = &board_map_data,
105 .rows = 6, 111 .rows = 6,
106 .cols = 6, 112 .cols = 6,
107 .keymap = ldp_twl4030_keymap,
108 .keymapsize = ARRAY_SIZE(ldp_twl4030_keymap),
109 .rep = 1, 113 .rep = 1,
110}; 114};
111 115
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 500c9956876d..70df6b4dbcd4 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -139,8 +139,13 @@ static struct gpio_led gpio_leds[];
139static int beagle_twl_gpio_setup(struct device *dev, 139static int beagle_twl_gpio_setup(struct device *dev,
140 unsigned gpio, unsigned ngpio) 140 unsigned gpio, unsigned ngpio)
141{ 141{
142 if (system_rev >= 0x20 && system_rev <= 0x34301000) {
143 omap_cfg_reg(AG9_34XX_GPIO23);
144 mmc[0].gpio_wp = 23;
145 } else {
146 omap_cfg_reg(AH8_34XX_GPIO29);
147 }
142 /* gpio + 0 is "mmc0_cd" (input/IRQ) */ 148 /* gpio + 0 is "mmc0_cd" (input/IRQ) */
143 omap_cfg_reg(AH8_34XX_GPIO29);
144 mmc[0].gpio_cd = gpio + 0; 149 mmc[0].gpio_cd = gpio + 0;
145 twl4030_mmc_init(mmc); 150 twl4030_mmc_init(mmc);
146 151
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index d50b9be90580..e4ec0c591216 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -159,7 +159,7 @@ static struct twl4030_usb_data omap3evm_usb_data = {
159 .usb_mode = T2_USB_MODE_ULPI, 159 .usb_mode = T2_USB_MODE_ULPI,
160}; 160};
161 161
162static int omap3evm_keymap[] = { 162static int board_keymap[] = {
163 KEY(0, 0, KEY_LEFT), 163 KEY(0, 0, KEY_LEFT),
164 KEY(0, 1, KEY_RIGHT), 164 KEY(0, 1, KEY_RIGHT),
165 KEY(0, 2, KEY_A), 165 KEY(0, 2, KEY_A),
@@ -178,11 +178,15 @@ static int omap3evm_keymap[] = {
178 KEY(3, 3, KEY_P) 178 KEY(3, 3, KEY_P)
179}; 179};
180 180
181static struct matrix_keymap_data board_map_data = {
182 .keymap = board_keymap,
183 .keymap_size = ARRAY_SIZE(board_keymap),
184};
185
181static struct twl4030_keypad_data omap3evm_kp_data = { 186static struct twl4030_keypad_data omap3evm_kp_data = {
187 .keymap_data = &board_map_data,
182 .rows = 4, 188 .rows = 4,
183 .cols = 4, 189 .cols = 4,
184 .keymap = omap3evm_keymap,
185 .keymapsize = ARRAY_SIZE(omap3evm_keymap),
186 .rep = 1, 190 .rep = 1,
187}; 191};
188 192
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index b43f6e36b6d9..7f6bf8772af7 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -133,7 +133,7 @@ static void __init pandora_keys_gpio_init(void)
133 omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME); 133 omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME);
134} 134}
135 135
136static int pandora_keypad_map[] = { 136static int board_keymap[] = {
137 /* col, row, code */ 137 /* col, row, code */
138 KEY(0, 0, KEY_9), 138 KEY(0, 0, KEY_9),
139 KEY(0, 1, KEY_0), 139 KEY(0, 1, KEY_0),
@@ -180,11 +180,15 @@ static int pandora_keypad_map[] = {
180 KEY(5, 2, KEY_FN), 180 KEY(5, 2, KEY_FN),
181}; 181};
182 182
183static struct matrix_keymap_data board_map_data = {
184 .keymap = board_keymap,
185 .keymap_size = ARRAY_SIZE(board_keymap),
186};
187
183static struct twl4030_keypad_data pandora_kp_data = { 188static struct twl4030_keypad_data pandora_kp_data = {
189 .keymap_data = &board_map_data,
184 .rows = 8, 190 .rows = 8,
185 .cols = 6, 191 .cols = 6,
186 .keymap = pandora_keypad_map,
187 .keymapsize = ARRAY_SIZE(pandora_keypad_map),
188 .rep = 1, 192 .rep = 1,
189}; 193};
190 194
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index e6e8290b7828..b45ad312c587 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -36,7 +36,7 @@
36#define SYSTEM_REV_B_USES_VAUX3 0x1699 36#define SYSTEM_REV_B_USES_VAUX3 0x1699
37#define SYSTEM_REV_S_USES_VAUX3 0x8 37#define SYSTEM_REV_S_USES_VAUX3 0x8
38 38
39static int rx51_keymap[] = { 39static int board_keymap[] = {
40 KEY(0, 0, KEY_Q), 40 KEY(0, 0, KEY_Q),
41 KEY(0, 1, KEY_W), 41 KEY(0, 1, KEY_W),
42 KEY(0, 2, KEY_E), 42 KEY(0, 2, KEY_E),
@@ -83,11 +83,15 @@ static int rx51_keymap[] = {
83 KEY(0xff, 5, KEY_F10), 83 KEY(0xff, 5, KEY_F10),
84}; 84};
85 85
86static struct matrix_keymap_data board_map_data = {
87 .keymap = board_keymap,
88 .keymap_size = ARRAY_SIZE(board_keymap),
89};
90
86static struct twl4030_keypad_data rx51_kp_data = { 91static struct twl4030_keypad_data rx51_kp_data = {
92 .keymap_data = &board_map_data,
87 .rows = 8, 93 .rows = 8,
88 .cols = 8, 94 .cols = 8,
89 .keymap = rx51_keymap,
90 .keymapsize = ARRAY_SIZE(rx51_keymap),
91 .rep = 1, 95 .rep = 1,
92}; 96};
93 97
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index 324009edbd53..b7b32208ced7 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -27,7 +27,7 @@
27#include "mmc-twl4030.h" 27#include "mmc-twl4030.h"
28 28
29/* Zoom2 has Qwerty keyboard*/ 29/* Zoom2 has Qwerty keyboard*/
30static int zoom2_twl4030_keymap[] = { 30static int board_keymap[] = {
31 KEY(0, 0, KEY_E), 31 KEY(0, 0, KEY_E),
32 KEY(1, 0, KEY_R), 32 KEY(1, 0, KEY_R),
33 KEY(2, 0, KEY_T), 33 KEY(2, 0, KEY_T),
@@ -82,11 +82,15 @@ static int zoom2_twl4030_keymap[] = {
82 0 82 0
83}; 83};
84 84
85static struct matrix_keymap_data board_map_data = {
86 .keymap = board_keymap,
87 .keymap_size = ARRAY_SIZE(board_keymap),
88};
89
85static struct twl4030_keypad_data zoom2_kp_twl4030_data = { 90static struct twl4030_keypad_data zoom2_kp_twl4030_data = {
91 .keymap_data = &board_map_data,
86 .rows = 8, 92 .rows = 8,
87 .cols = 8, 93 .cols = 8,
88 .keymap = zoom2_twl4030_keymap,
89 .keymapsize = ARRAY_SIZE(zoom2_twl4030_keymap),
90 .rep = 1, 94 .rep = 1,
91}; 95};
92 96
diff --git a/arch/arm/mach-omap2/cm4xxx.c b/arch/arm/mach-omap2/cm4xxx.c
index e4ebd6d53135..4af76bb1003a 100644
--- a/arch/arm/mach-omap2/cm4xxx.c
+++ b/arch/arm/mach-omap2/cm4xxx.c
@@ -22,7 +22,6 @@
22#include <asm/atomic.h> 22#include <asm/atomic.h>
23 23
24#include "cm.h" 24#include "cm.h"
25#include "cm-regbits-4xxx.h"
26 25
27/* XXX move this to cm.h */ 26/* XXX move this to cm.h */
28/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */ 27/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
@@ -50,19 +49,7 @@
50 */ 49 */
51int omap4_cm_wait_idlest_ready(u32 prcm_mod, u8 prcm_dev_offs) 50int omap4_cm_wait_idlest_ready(u32 prcm_mod, u8 prcm_dev_offs)
52{ 51{
53 int i = 0; 52 /* FIXME: Add clock manager related code */
54 u8 cm_id; 53 return 0;
55 u16 prcm_mod_offs;
56 u32 mask = OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK;
57
58 cm_id = prcm_mod >> OMAP4_PRCM_MOD_CM_ID_SHIFT;
59 prcm_mod_offs = prcm_mod & OMAP4_PRCM_MOD_OFFS_MASK;
60
61 while (((omap4_cm_read_mod_reg(cm_id, prcm_mod_offs, prcm_dev_offs,
62 OMAP4_CM_CLKCTRL_DREG) & mask) != 0) &&
63 (i++ < MAX_MODULE_READY_TIME))
64 udelay(1);
65
66 return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
67} 54}
68 55
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index bcfcfc7fdb9b..faf7a1e0c525 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -355,29 +355,60 @@ static struct platform_device omap2_mcspi4 = {
355}; 355};
356#endif 356#endif
357 357
358static void omap_init_mcspi(void) 358#ifdef CONFIG_ARCH_OMAP4
359static inline void omap4_mcspi_fixup(void)
359{ 360{
360 if (cpu_is_omap44xx()) { 361 omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
361 omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE; 362 omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff;
362 omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff; 363 omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
363 omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE; 364 omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff;
364 omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff; 365 omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
365 omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE; 366 omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff;
366 omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff; 367 omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
367 omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE; 368 omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff;
368 omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff; 369}
369 } 370#else
370 platform_device_register(&omap2_mcspi1); 371static inline void omap4_mcspi_fixup(void)
371 platform_device_register(&omap2_mcspi2); 372{
373}
374#endif
375
372#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 376#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
373 defined(CONFIG_ARCH_OMAP4) 377 defined(CONFIG_ARCH_OMAP4)
374 if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx()) 378static inline void omap2_mcspi3_init(void)
375 platform_device_register(&omap2_mcspi3); 379{
380 platform_device_register(&omap2_mcspi3);
381}
382#else
383static inline void omap2_mcspi3_init(void)
384{
385}
376#endif 386#endif
387
377#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) 388#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
378 if (cpu_is_omap343x() || cpu_is_omap44xx()) 389static inline void omap2_mcspi4_init(void)
379 platform_device_register(&omap2_mcspi4); 390{
391 platform_device_register(&omap2_mcspi4);
392}
393#else
394static inline void omap2_mcspi4_init(void)
395{
396}
380#endif 397#endif
398
399static void omap_init_mcspi(void)
400{
401 if (cpu_is_omap44xx())
402 omap4_mcspi_fixup();
403
404 platform_device_register(&omap2_mcspi1);
405 platform_device_register(&omap2_mcspi2);
406
407 if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
408 omap2_mcspi3_init();
409
410 if (cpu_is_omap343x() || cpu_is_omap44xx())
411 omap2_mcspi4_init();
381} 412}
382 413
383#else 414#else
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 7574b6f20e8e..e3a3bad1d84f 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -294,10 +294,10 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
294 else if (cpu_is_omap34xx()) 294 else if (cpu_is_omap34xx())
295 hwmods = omap34xx_hwmods; 295 hwmods = omap34xx_hwmods;
296 296
297 omap_hwmod_init(hwmods);
298 omap2_mux_init();
299#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */ 297#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
300 /* The OPP tables have to be registered before a clk init */ 298 /* The OPP tables have to be registered before a clk init */
299 omap_hwmod_init(hwmods);
300 omap2_mux_init();
301 omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps); 301 omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
302 pwrdm_init(powerdomains_omap); 302 pwrdm_init(powerdomains_omap);
303 clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps); 303 clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
index 2d9b5cc981cd..4a0e1cd5c1f4 100644
--- a/arch/arm/mach-omap2/iommu2.c
+++ b/arch/arm/mach-omap2/iommu2.c
@@ -79,7 +79,7 @@ static int omap2_iommu_enable(struct iommu *obj)
79 l = iommu_read_reg(obj, MMU_SYSSTATUS); 79 l = iommu_read_reg(obj, MMU_SYSSTATUS);
80 if (l & MMU_SYS_RESETDONE) 80 if (l & MMU_SYS_RESETDONE)
81 break; 81 break;
82 } while (time_after(jiffies, timeout)); 82 } while (!time_after(jiffies, timeout));
83 83
84 if (!(l & MMU_SYS_RESETDONE)) { 84 if (!(l & MMU_SYS_RESETDONE)) {
85 dev_err(obj->dev, "can't take mmu out of reset\n"); 85 dev_err(obj->dev, "can't take mmu out of reset\n");
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 6f71f3730c97..c035ad3426d0 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -30,6 +30,14 @@
30#define MAILBOX_IRQ_NEWMSG(u) (1 << (2 * (u))) 30#define MAILBOX_IRQ_NEWMSG(u) (1 << (2 * (u)))
31#define MAILBOX_IRQ_NOTFULL(u) (1 << (2 * (u) + 1)) 31#define MAILBOX_IRQ_NOTFULL(u) (1 << (2 * (u) + 1))
32 32
33/* SYSCONFIG: register bit definition */
34#define AUTOIDLE (1 << 0)
35#define SOFTRESET (1 << 1)
36#define SMARTIDLE (2 << 3)
37
38/* SYSSTATUS: register bit definition */
39#define RESETDONE (1 << 0)
40
33#define MBOX_REG_SIZE 0x120 41#define MBOX_REG_SIZE 0x120
34#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32)) 42#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
35 43
@@ -69,21 +77,33 @@ static inline void mbox_write_reg(u32 val, size_t ofs)
69/* Mailbox H/W preparations */ 77/* Mailbox H/W preparations */
70static int omap2_mbox_startup(struct omap_mbox *mbox) 78static int omap2_mbox_startup(struct omap_mbox *mbox)
71{ 79{
72 unsigned int l; 80 u32 l;
81 unsigned long timeout;
73 82
74 mbox_ick_handle = clk_get(NULL, "mailboxes_ick"); 83 mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
75 if (IS_ERR(mbox_ick_handle)) { 84 if (IS_ERR(mbox_ick_handle)) {
76 printk("Could not get mailboxes_ick\n"); 85 pr_err("Can't get mailboxes_ick\n");
77 return -ENODEV; 86 return -ENODEV;
78 } 87 }
79 clk_enable(mbox_ick_handle); 88 clk_enable(mbox_ick_handle);
80 89
90 mbox_write_reg(SOFTRESET, MAILBOX_SYSCONFIG);
91 timeout = jiffies + msecs_to_jiffies(20);
92 do {
93 l = mbox_read_reg(MAILBOX_SYSSTATUS);
94 if (l & RESETDONE)
95 break;
96 } while (!time_after(jiffies, timeout));
97
98 if (!(l & RESETDONE)) {
99 pr_err("Can't take mmu out of reset\n");
100 return -ENODEV;
101 }
102
81 l = mbox_read_reg(MAILBOX_REVISION); 103 l = mbox_read_reg(MAILBOX_REVISION);
82 pr_info("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f)); 104 pr_info("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
83 105
84 /* set smart-idle & autoidle */ 106 l = SMARTIDLE | AUTOIDLE;
85 l = mbox_read_reg(MAILBOX_SYSCONFIG);
86 l |= 0x00000011;
87 mbox_write_reg(l, MAILBOX_SYSCONFIG); 107 mbox_write_reg(l, MAILBOX_SYSCONFIG);
88 108
89 omap2_mbox_enable_irq(mbox, IRQ_RX); 109 omap2_mbox_enable_irq(mbox, IRQ_RX);
@@ -156,6 +176,9 @@ static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
156 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 176 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
157 177
158 mbox_write_reg(bit, p->irqstatus); 178 mbox_write_reg(bit, p->irqstatus);
179
180 /* Flush posted write for irq status to avoid spurious interrupts */
181 mbox_read_reg(p->irqstatus);
159} 182}
160 183
161static int omap2_mbox_is_irq(struct omap_mbox *mbox, 184static int omap2_mbox_is_irq(struct omap_mbox *mbox,
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 2daa595aaff4..b5fac32aae70 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -460,6 +460,8 @@ MUX_CFG_34XX("AF26_34XX_GPIO0", 0x1e0,
460 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT) 460 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
461MUX_CFG_34XX("AF22_34XX_GPIO9", 0xa18, 461MUX_CFG_34XX("AF22_34XX_GPIO9", 0xa18,
462 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT) 462 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
463MUX_CFG_34XX("AG9_34XX_GPIO23", 0x5ee,
464 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
463MUX_CFG_34XX("AH8_34XX_GPIO29", 0x5fa, 465MUX_CFG_34XX("AH8_34XX_GPIO29", 0x5fa,
464 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT) 466 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
465MUX_CFG_34XX("U8_34XX_GPIO54_OUT", 0x0b4, 467MUX_CFG_34XX("U8_34XX_GPIO54_OUT", 0x0b4,
@@ -472,6 +474,8 @@ MUX_CFG_34XX("G25_34XX_GPIO86_OUT", 0x0fc,
472 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT) 474 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
473MUX_CFG_34XX("AG4_34XX_GPIO134_OUT", 0x160, 475MUX_CFG_34XX("AG4_34XX_GPIO134_OUT", 0x160,
474 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT) 476 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
477MUX_CFG_34XX("AF4_34XX_GPIO135_OUT", 0x162,
478 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
475MUX_CFG_34XX("AE4_34XX_GPIO136_OUT", 0x164, 479MUX_CFG_34XX("AE4_34XX_GPIO136_OUT", 0x164,
476 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT) 480 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
477MUX_CFG_34XX("AF6_34XX_GPIO140_UP", 0x16c, 481MUX_CFG_34XX("AF6_34XX_GPIO140_UP", 0x16c,
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a529c77daa8..ae2186892c85 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -110,7 +110,7 @@ static struct plat_serial8250_port serial_platform_data2[] = {
110 .uartclk = OMAP24XX_BASE_BAUD * 16, 110 .uartclk = OMAP24XX_BASE_BAUD * 16,
111 }, { 111 }, {
112#ifdef CONFIG_ARCH_OMAP4 112#ifdef CONFIG_ARCH_OMAP4
113 .membase = IO_ADDRESS(OMAP_UART4_BASE), 113 .membase = OMAP2_IO_ADDRESS(OMAP_UART4_BASE),
114 .mapbase = OMAP_UART4_BASE, 114 .mapbase = OMAP_UART4_BASE,
115 .irq = 70, 115 .irq = 70,
116 .flags = UPF_BOOT_AUTOCONF, 116 .flags = UPF_BOOT_AUTOCONF,
@@ -126,7 +126,7 @@ static struct plat_serial8250_port serial_platform_data2[] = {
126#ifdef CONFIG_ARCH_OMAP4 126#ifdef CONFIG_ARCH_OMAP4
127static struct plat_serial8250_port serial_platform_data3[] = { 127static struct plat_serial8250_port serial_platform_data3[] = {
128 { 128 {
129 .membase = IO_ADDRESS(OMAP_UART4_BASE), 129 .membase = OMAP2_IO_ADDRESS(OMAP_UART4_BASE),
130 .mapbase = OMAP_UART4_BASE, 130 .mapbase = OMAP_UART4_BASE,
131 .irq = 70, 131 .irq = 70,
132 .flags = UPF_BOOT_AUTOCONF, 132 .flags = UPF_BOOT_AUTOCONF,
@@ -579,7 +579,7 @@ static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS] = {
579 { 579 {
580 .pdev = { 580 .pdev = {
581 .name = "serial8250", 581 .name = "serial8250",
582 .id = 3 582 .id = 3,
583 .dev = { 583 .dev = {
584 .platform_data = serial_platform_data3, 584 .platform_data = serial_platform_data3,
585 }, 585 },
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 693839c89ad0..71ebd7fcfea1 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -250,7 +250,7 @@ static struct gpio_bank gpio_bank_730[7] = {
250 250
251#ifdef CONFIG_ARCH_OMAP850 251#ifdef CONFIG_ARCH_OMAP850
252static struct gpio_bank gpio_bank_850[7] = { 252static struct gpio_bank gpio_bank_850[7] = {
253 { OMAP1_MPUIO_BASE, INT_850_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO }, 253 { OMAP1_MPUIO_VBASE, INT_850_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO },
254 { OMAP850_GPIO1_BASE, INT_850_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_850 }, 254 { OMAP850_GPIO1_BASE, INT_850_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_850 },
255 { OMAP850_GPIO2_BASE, INT_850_GPIO_BANK2, IH_GPIO_BASE + 32, METHOD_GPIO_850 }, 255 { OMAP850_GPIO2_BASE, INT_850_GPIO_BANK2, IH_GPIO_BASE + 32, METHOD_GPIO_850 },
256 { OMAP850_GPIO3_BASE, INT_850_GPIO_BANK3, IH_GPIO_BASE + 64, METHOD_GPIO_850 }, 256 { OMAP850_GPIO3_BASE, INT_850_GPIO_BANK3, IH_GPIO_BASE + 64, METHOD_GPIO_850 },
diff --git a/arch/arm/plat-omap/include/mach/keypad.h b/arch/arm/plat-omap/include/mach/keypad.h
index 45ea3ae3c995..d91b9be334ff 100644
--- a/arch/arm/plat-omap/include/mach/keypad.h
+++ b/arch/arm/plat-omap/include/mach/keypad.h
@@ -10,6 +10,8 @@
10#ifndef ASMARM_ARCH_KEYPAD_H 10#ifndef ASMARM_ARCH_KEYPAD_H
11#define ASMARM_ARCH_KEYPAD_H 11#define ASMARM_ARCH_KEYPAD_H
12 12
13#include <linux/input/matrix_keypad.h>
14
13struct omap_kp_platform_data { 15struct omap_kp_platform_data {
14 int rows; 16 int rows;
15 int cols; 17 int cols;
@@ -35,9 +37,6 @@ struct omap_kp_platform_data {
35 37
36#define KEY_PERSISTENT 0x00800000 38#define KEY_PERSISTENT 0x00800000
37#define KEYNUM_MASK 0x00EFFFFF 39#define KEYNUM_MASK 0x00EFFFFF
38#define KEY(col, row, val) (((col) << 28) | ((row) << 24) | (val))
39#define PERSISTENT_KEY(col, row) (((col) << 28) | ((row) << 24) | \
40 KEY_PERSISTENT)
41 40
42#endif 41#endif
43 42
diff --git a/arch/arm/plat-omap/include/mach/mux.h b/arch/arm/plat-omap/include/mach/mux.h
index 98dfab651dfc..0f49d2d563d9 100644
--- a/arch/arm/plat-omap/include/mach/mux.h
+++ b/arch/arm/plat-omap/include/mach/mux.h
@@ -840,12 +840,14 @@ enum omap34xx_index {
840 */ 840 */
841 AF26_34XX_GPIO0, 841 AF26_34XX_GPIO0,
842 AF22_34XX_GPIO9, 842 AF22_34XX_GPIO9,
843 AG9_34XX_GPIO23,
843 AH8_34XX_GPIO29, 844 AH8_34XX_GPIO29,
844 U8_34XX_GPIO54_OUT, 845 U8_34XX_GPIO54_OUT,
845 U8_34XX_GPIO54_DOWN, 846 U8_34XX_GPIO54_DOWN,
846 L8_34XX_GPIO63, 847 L8_34XX_GPIO63,
847 G25_34XX_GPIO86_OUT, 848 G25_34XX_GPIO86_OUT,
848 AG4_34XX_GPIO134_OUT, 849 AG4_34XX_GPIO134_OUT,
850 AF4_34XX_GPIO135_OUT,
849 AE4_34XX_GPIO136_OUT, 851 AE4_34XX_GPIO136_OUT,
850 AF6_34XX_GPIO140_UP, 852 AF6_34XX_GPIO140_UP,
851 AE6_34XX_GPIO141, 853 AE6_34XX_GPIO141,
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 6fc52fcbdc03..57f7122a0919 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -199,7 +199,8 @@ static void *vmap_sg(const struct sg_table *sgt)
199 va += bytes; 199 va += bytes;
200 } 200 }
201 201
202 flush_cache_vmap(new->addr, new->addr + total); 202 flush_cache_vmap((unsigned long)new->addr,
203 (unsigned long)(new->addr + total));
203 return new->addr; 204 return new->addr;
204 205
205err_out: 206err_out:
@@ -390,7 +391,7 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
390 } 391 }
391 392
392 va_end = _va + PAGE_SIZE * i; 393 va_end = _va + PAGE_SIZE * i;
393 flush_cache_vmap(_va, va_end); 394 flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
394} 395}
395 396
396static inline void sgtable_drain_vmalloc(struct sg_table *sgt) 397static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
diff --git a/arch/arm/plat-s3c24xx/include/plat/mci.h b/arch/arm/plat-s3c24xx/include/plat/mci.h
index 2d0852ac3b27..c2cef6139683 100644
--- a/arch/arm/plat-s3c24xx/include/plat/mci.h
+++ b/arch/arm/plat-s3c24xx/include/plat/mci.h
@@ -2,8 +2,11 @@
2#define _ARCH_MCI_H 2#define _ARCH_MCI_H
3 3
4struct s3c24xx_mci_pdata { 4struct s3c24xx_mci_pdata {
5 unsigned int no_wprotect : 1;
6 unsigned int no_detect : 1;
5 unsigned int wprotect_invert : 1; 7 unsigned int wprotect_invert : 1;
6 unsigned int detect_invert : 1; /* set => detect active high. */ 8 unsigned int detect_invert : 1; /* set => detect active high. */
9 unsigned int use_dma : 1;
7 10
8 unsigned int gpio_detect; 11 unsigned int gpio_detect;
9 unsigned int gpio_wprotect; 12 unsigned int gpio_wprotect;
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 93635a766f9c..1e60a92dd602 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -48,7 +48,7 @@ coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned l
48 return ret; 48 return ret;
49} 49}
50 50
51static struct file_operations coreb_fops = { 51static const struct file_operations coreb_fops = {
52 .owner = THIS_MODULE, 52 .owner = THIS_MODULE,
53 .ioctl = coreb_ioctl, 53 .ioctl = coreb_ioctl,
54}; 54};
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index 6cc1a0319a5d..562b9a7feae7 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -244,7 +244,7 @@ static unsigned sync_serial_prescale_shadow;
244 244
245#define NUMBER_OF_PORTS 2 245#define NUMBER_OF_PORTS 2
246 246
247static struct file_operations sync_serial_fops = { 247static const struct file_operations sync_serial_fops = {
248 .owner = THIS_MODULE, 248 .owner = THIS_MODULE,
249 .write = sync_serial_write, 249 .write = sync_serial_write,
250 .read = sync_serial_read, 250 .read = sync_serial_read,
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
index fe1fde893887..d89ab80498ed 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
@@ -855,7 +855,7 @@ gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
855 return 0; 855 return 0;
856} 856}
857 857
858struct file_operations gpio_fops = { 858static const struct file_operations gpio_fops = {
859 .owner = THIS_MODULE, 859 .owner = THIS_MODULE,
860 .poll = gpio_poll, 860 .poll = gpio_poll,
861 .ioctl = gpio_ioctl, 861 .ioctl = gpio_ioctl,
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6851e52ed5a2..1ee596cd942f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -60,9 +60,7 @@ config IOMMU_HELPER
60 bool 60 bool
61 61
62config GENERIC_LOCKBREAK 62config GENERIC_LOCKBREAK
63 bool 63 def_bool n
64 default y
65 depends on SMP && PREEMPT
66 64
67config RWSEM_XCHGADD_ALGORITHM 65config RWSEM_XCHGADD_ALGORITHM
68 bool 66 bool
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index f92bdaac8976..c69552bf893e 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -69,11 +69,11 @@ ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
69} 69}
70 70
71 71
72static struct vm_operations_struct ia32_shared_page_vm_ops = { 72static const struct vm_operations_struct ia32_shared_page_vm_ops = {
73 .fault = ia32_install_shared_page 73 .fault = ia32_install_shared_page
74}; 74};
75 75
76static struct vm_operations_struct ia32_gate_page_vm_ops = { 76static const struct vm_operations_struct ia32_gate_page_vm_ops = {
77 .fault = ia32_install_gate_page 77 .fault = ia32_install_gate_page
78}; 78};
79 79
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 0f82cc2934e1..91df9686a0da 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -89,10 +89,12 @@ ia64_acpi_release_global_lock (unsigned int *lock)
89#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ 89#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
90 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock)) 90 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
91 91
92#ifdef CONFIG_ACPI
92#define acpi_disabled 0 /* ACPI always enabled on IA64 */ 93#define acpi_disabled 0 /* ACPI always enabled on IA64 */
93#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 94#define acpi_noirq 0 /* ACPI always enabled on IA64 */
94#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
95#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
97#endif
96#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 98#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
97static inline void disable_acpi(void) { } 99static inline void disable_acpi(void) { }
98 100
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 13ab71576bc7..30bb930e1111 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -19,103 +19,106 @@
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define __raw_spin_lock_init(x) ((x)->lock = 0)
21 21
22#ifdef ASM_SUPPORTED
23/* 22/*
24 * Try to get the lock. If we fail to get the lock, make a non-standard call to 23 * Ticket locks are conceptually two parts, one indicating the current head of
25 * ia64_spinlock_contention(). We do not use a normal call because that would force all 24 * the queue, and the other indicating the current tail. The lock is acquired
26 * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is 25 * by atomically noting the tail and incrementing it by one (thus adding
27 * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". 26 * ourself to the queue and noting our position), then waiting until the head
27 * becomes equal to the the initial value of the tail.
28 *
29 * 63 32 31 0
30 * +----------------------------------------------------+
31 * | next_ticket_number | now_serving |
32 * +----------------------------------------------------+
28 */ 33 */
29 34
30#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" 35#define TICKET_SHIFT 32
31 36
32static inline void 37static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
33__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
34{ 38{
35 register volatile unsigned int *ptr asm ("r31") = &lock->lock; 39 int *p = (int *)&lock->lock, turn, now_serving;
36 40
37#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 41 now_serving = *p;
38# ifdef CONFIG_ITANIUM 42 turn = ia64_fetchadd(1, p+1, acq);
39 /* don't use brl on Itanium... */ 43
40 asm volatile ("{\n\t" 44 if (turn == now_serving)
41 " mov ar.ccv = r0\n\t" 45 return;
42 " mov r28 = ip\n\t" 46
43 " mov r30 = 1;;\n\t" 47 do {
44 "}\n\t" 48 cpu_relax();
45 "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" 49 } while (ACCESS_ONCE(*p) != turn);
46 "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
47 "cmp4.ne p14, p0 = r30, r0\n\t"
48 "mov b6 = r29;;\n\t"
49 "mov r27=%2\n\t"
50 "(p14) br.cond.spnt.many b6"
51 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
52# else
53 asm volatile ("{\n\t"
54 " mov ar.ccv = r0\n\t"
55 " mov r28 = ip\n\t"
56 " mov r30 = 1;;\n\t"
57 "}\n\t"
58 "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
59 "cmp4.ne p14, p0 = r30, r0\n\t"
60 "mov r27=%2\n\t"
61 "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
62 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
63# endif /* CONFIG_MCKINLEY */
64#else
65# ifdef CONFIG_ITANIUM
66 /* don't use brl on Itanium... */
67 /* mis-declare, so we get the entry-point, not it's function descriptor: */
68 asm volatile ("mov r30 = 1\n\t"
69 "mov r27=%2\n\t"
70 "mov ar.ccv = r0;;\n\t"
71 "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
72 "movl r29 = ia64_spinlock_contention;;\n\t"
73 "cmp4.ne p14, p0 = r30, r0\n\t"
74 "mov b6 = r29;;\n\t"
75 "(p14) br.call.spnt.many b6 = b6"
76 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
77# else
78 asm volatile ("mov r30 = 1\n\t"
79 "mov r27=%2\n\t"
80 "mov ar.ccv = r0;;\n\t"
81 "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
82 "cmp4.ne p14, p0 = r30, r0\n\t"
83 "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
84 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
85# endif /* CONFIG_MCKINLEY */
86#endif
87} 50}
88 51
89#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 52static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
53{
54 long tmp = ACCESS_ONCE(lock->lock), try;
90 55
91/* Unlock by doing an ordered store and releasing the cacheline with nta */ 56 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
92static inline void __raw_spin_unlock(raw_spinlock_t *x) { 57 try = tmp + (1L << TICKET_SHIFT);
93 barrier(); 58
94 asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); 59 return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
60 }
61 return 0;
95} 62}
96 63
97#else /* !ASM_SUPPORTED */ 64static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
98#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 65{
99# define __raw_spin_lock(x) \ 66 int *p = (int *)&lock->lock;
100do { \ 67
101 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ 68 (void)ia64_fetchadd(1, p, rel);
102 __u64 ia64_spinlock_val; \ 69}
103 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ 70
104 if (unlikely(ia64_spinlock_val)) { \ 71static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
105 do { \ 72{
106 while (*ia64_spinlock_ptr) \ 73 long tmp = ACCESS_ONCE(lock->lock);
107 ia64_barrier(); \ 74
108 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ 75 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
109 } while (ia64_spinlock_val); \ 76}
110 } \ 77
111} while (0) 78static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
112#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) 79{
113#endif /* !ASM_SUPPORTED */ 80 long tmp = ACCESS_ONCE(lock->lock);
114 81
115#define __raw_spin_is_locked(x) ((x)->lock != 0) 82 return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
116#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) 83}
117#define __raw_spin_unlock_wait(lock) \ 84
118 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 85static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
86{
87 return __ticket_spin_is_locked(lock);
88}
89
90static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
91{
92 return __ticket_spin_is_contended(lock);
93}
94#define __raw_spin_is_contended __raw_spin_is_contended
95
96static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
97{
98 __ticket_spin_lock(lock);
99}
100
101static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
102{
103 return __ticket_spin_trylock(lock);
104}
105
106static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
107{
108 __ticket_spin_unlock(lock);
109}
110
111static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
112 unsigned long flags)
113{
114 __raw_spin_lock(lock);
115}
116
117static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
118{
119 while (__raw_spin_is_locked(lock))
120 cpu_relax();
121}
119 122
120#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 123#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
121#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 124#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..b61d136d9bc2 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,7 +6,7 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned long lock;
10} raw_spinlock_t; 10} raw_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 1a6e44515eb4..696eff28a0c4 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1130,95 +1130,6 @@ SET_REG(b5);
1130#endif /* CONFIG_IA64_BRL_EMU */ 1130#endif /* CONFIG_IA64_BRL_EMU */
1131 1131
1132#ifdef CONFIG_SMP 1132#ifdef CONFIG_SMP
1133 /*
1134 * This routine handles spinlock contention. It uses a non-standard calling
1135 * convention to avoid converting leaf routines into interior routines. Because
1136 * of this special convention, there are several restrictions:
1137 *
1138 * - do not use gp relative variables, this code is called from the kernel
1139 * and from modules, r1 is undefined.
1140 * - do not use stacked registers, the caller owns them.
1141 * - do not use the scratch stack space, the caller owns it.
1142 * - do not use any registers other than the ones listed below
1143 *
1144 * Inputs:
1145 * ar.pfs - saved CFM of caller
1146 * ar.ccv - 0 (and available for use)
1147 * r27 - flags from spin_lock_irqsave or 0. Must be preserved.
1148 * r28 - available for use.
1149 * r29 - available for use.
1150 * r30 - available for use.
1151 * r31 - address of lock, available for use.
1152 * b6 - return address
1153 * p14 - available for use.
1154 * p15 - used to track flag status.
1155 *
1156 * If you patch this code to use more registers, do not forget to update
1157 * the clobber lists for spin_lock() in arch/ia64/include/asm/spinlock.h.
1158 */
1159
1160#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
1161
1162GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
1163 .prologue
1164 .save ar.pfs, r0 // this code effectively has a zero frame size
1165 .save rp, r28
1166 .body
1167 nop 0
1168 tbit.nz p15,p0=r27,IA64_PSR_I_BIT
1169 .restore sp // pop existing prologue after next insn
1170 mov b6 = r28
1171 .prologue
1172 .save ar.pfs, r0
1173 .altrp b6
1174 .body
1175 ;;
1176(p15) ssm psr.i // reenable interrupts if they were on
1177 // DavidM says that srlz.d is slow and is not required in this case
1178.wait:
1179 // exponential backoff, kdb, lockmeter etc. go in here
1180 hint @pause
1181 ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
1182 nop 0
1183 ;;
1184 cmp4.ne p14,p0=r30,r0
1185(p14) br.cond.sptk.few .wait
1186(p15) rsm psr.i // disable interrupts if we reenabled them
1187 br.cond.sptk.few b6 // lock is now free, try to acquire
1188 .global ia64_spinlock_contention_pre3_4_end // for kernprof
1189ia64_spinlock_contention_pre3_4_end:
1190END(ia64_spinlock_contention_pre3_4)
1191
1192#else
1193
1194GLOBAL_ENTRY(ia64_spinlock_contention)
1195 .prologue
1196 .altrp b6
1197 .body
1198 tbit.nz p15,p0=r27,IA64_PSR_I_BIT
1199 ;;
1200.wait:
1201(p15) ssm psr.i // reenable interrupts if they were on
1202 // DavidM says that srlz.d is slow and is not required in this case
1203.wait2:
1204 // exponential backoff, kdb, lockmeter etc. go in here
1205 hint @pause
1206 ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
1207 ;;
1208 cmp4.ne p14,p0=r30,r0
1209 mov r30 = 1
1210(p14) br.cond.sptk.few .wait2
1211(p15) rsm psr.i // disable interrupts if we reenabled them
1212 ;;
1213 cmpxchg4.acq r30=[r31], r30, ar.ccv
1214 ;;
1215 cmp4.ne p14,p0=r0,r30
1216(p14) br.cond.sptk.few .wait
1217
1218 br.ret.sptk.many b6 // lock is now taken
1219END(ia64_spinlock_contention)
1220
1221#endif
1222 1133
1223#ifdef CONFIG_HOTPLUG_CPU 1134#ifdef CONFIG_HOTPLUG_CPU
1224GLOBAL_ENTRY(ia64_jump_to_sal) 1135GLOBAL_ENTRY(ia64_jump_to_sal)
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 8ebccb589e1c..14d39e300627 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -84,26 +84,6 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs);
84#include <asm/unwind.h> 84#include <asm/unwind.h>
85EXPORT_SYMBOL(unw_init_running); 85EXPORT_SYMBOL(unw_init_running);
86 86
87#ifdef ASM_SUPPORTED
88# ifdef CONFIG_SMP
89# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
90/*
91 * This is not a normal routine and we don't want a function descriptor for it, so we use
92 * a fake declaration here.
93 */
94extern char ia64_spinlock_contention_pre3_4;
95EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
96# else
97/*
98 * This is not a normal routine and we don't want a function descriptor for it, so we use
99 * a fake declaration here.
100 */
101extern char ia64_spinlock_contention;
102EXPORT_SYMBOL(ia64_spinlock_contention);
103# endif
104# endif
105#endif
106
107#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) 87#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
108extern void esi_call_phys (void); 88extern void esi_call_phys (void);
109EXPORT_SYMBOL_GPL(esi_call_phys); 89EXPORT_SYMBOL_GPL(esi_call_phys);
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
index adb01566bd57..5cdd7e4a597c 100644
--- a/arch/ia64/oprofile/backtrace.c
+++ b/arch/ia64/oprofile/backtrace.c
@@ -32,24 +32,6 @@ typedef struct
32 u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */ 32 u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */
33} ia64_backtrace_t; 33} ia64_backtrace_t;
34 34
35#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
36/*
37 * Returns non-zero if the PC is in the spinlock contention out-of-line code
38 * with non-standard calling sequence (on older compilers).
39 */
40static __inline__ int in_old_ool_spinlock_code(unsigned long pc)
41{
42 extern const char ia64_spinlock_contention_pre3_4[] __attribute__ ((weak));
43 extern const char ia64_spinlock_contention_pre3_4_end[] __attribute__ ((weak));
44 unsigned long sc_start = (unsigned long)ia64_spinlock_contention_pre3_4;
45 unsigned long sc_end = (unsigned long)ia64_spinlock_contention_pre3_4_end;
46 return (sc_start && sc_end && pc >= sc_start && pc < sc_end);
47}
48#else
49/* Newer spinlock code does a proper br.call and works fine with the unwinder */
50#define in_old_ool_spinlock_code(pc) 0
51#endif
52
53/* Returns non-zero if the PC is in the Interrupt Vector Table */ 35/* Returns non-zero if the PC is in the Interrupt Vector Table */
54static __inline__ int in_ivt_code(unsigned long pc) 36static __inline__ int in_ivt_code(unsigned long pc)
55{ 37{
@@ -80,7 +62,7 @@ static __inline__ int next_frame(ia64_backtrace_t *bt)
80 */ 62 */
81 if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc) 63 if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
82 bt->frame.pfs_loc = &bt->regs->ar_pfs; 64 bt->frame.pfs_loc = &bt->regs->ar_pfs;
83 bt->prev_pfs_loc = (in_old_ool_spinlock_code(bt->frame.ip) ? bt->frame.pfs_loc : NULL); 65 bt->prev_pfs_loc = NULL;
84 66
85 return unw_unwind(&bt->frame) == 0; 67 return unw_unwind(&bt->frame) == 0;
86} 68}
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
index 554f65b6cd3b..394ee946015c 100644
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ b/arch/m68k/include/asm/hardirq_mm.h
@@ -1,8 +1,16 @@
1#ifndef __M68K_HARDIRQ_H 1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H 2#define __M68K_HARDIRQ_H
3 3
4#define HARDIRQ_BITS 8 4#include <linux/threads.h>
5#include <linux/cache.h>
6
7/* entry.S is sensitive to the offsets of these fields */
8typedef struct {
9 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
5 11
6#include <asm-generic/hardirq.h> 12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14#define HARDIRQ_BITS 8
7 15
8#endif 16#endif
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 3ab6d80d150d..19c1c82849ff 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -175,7 +175,7 @@ static dbdev_tab_t dbdev_tab[] = {
175#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab) 175#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
176 176
177#ifdef CONFIG_PM 177#ifdef CONFIG_PM
178static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][8]; 178static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
179#endif 179#endif
180 180
181 181
@@ -993,14 +993,13 @@ void au1xxx_dbdma_suspend(void)
993 au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c); 993 au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
994 994
995 /* save channel configurations */ 995 /* save channel configurations */
996 for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) { 996 for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
997 au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00); 997 au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
998 au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04); 998 au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
999 au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08); 999 au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
1000 au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c); 1000 au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
1001 au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10); 1001 au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
1002 au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14); 1002 au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
1003 au1xxx_dbdma_pm_regs[i][6] = au_readl(addr + 0x18);
1004 1003
1005 /* halt channel */ 1004 /* halt channel */
1006 au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00); 1005 au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
@@ -1027,14 +1026,13 @@ void au1xxx_dbdma_resume(void)
1027 au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c); 1026 au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
1028 1027
1029 /* restore channel configurations */ 1028 /* restore channel configurations */
1030 for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) { 1029 for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
1031 au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00); 1030 au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
1032 au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04); 1031 au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
1033 au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08); 1032 au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
1034 au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c); 1033 au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
1035 au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10); 1034 au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
1036 au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14); 1035 au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
1037 au_writel(au1xxx_dbdma_pm_regs[i][6], addr + 0x18);
1038 au_sync(); 1036 au_sync();
1039 addr += 0x100; /* next channel base */ 1037 addr += 0x100; /* next channel base */
1040 } 1038 }
diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c
index dfbfd7e2ac08..938b1d0b7652 100644
--- a/arch/mips/basler/excite/excite_iodev.c
+++ b/arch/mips/basler/excite/excite_iodev.c
@@ -112,10 +112,8 @@ static int iodev_open(struct inode *i, struct file *f)
112{ 112{
113 int ret; 113 int ret;
114 114
115 lock_kernel();
116 ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED, 115 ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
117 iodev_name, &miscdev); 116 iodev_name, &miscdev);
118 unlock_kernel();
119 117
120 return ret; 118 return ret;
121} 119}
diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
index aaa585cf26e3..c146d1ededed 100644
--- a/arch/mips/bcm63xx/Makefile
+++ b/arch/mips/bcm63xx/Makefile
@@ -1,5 +1,5 @@
1obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \ 1obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \
2 dev-dsp.o dev-enet.o 2 dev-dsp.o dev-enet.o dev-pcmcia.o dev-uart.o
3obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 3obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
4 4
5obj-y += boards/ 5obj-y += boards/
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index fd77f548207a..78e155d21be6 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -20,10 +20,11 @@
20#include <bcm63xx_cpu.h> 20#include <bcm63xx_cpu.h>
21#include <bcm63xx_regs.h> 21#include <bcm63xx_regs.h>
22#include <bcm63xx_io.h> 22#include <bcm63xx_io.h>
23#include <bcm63xx_board.h>
24#include <bcm63xx_dev_pci.h> 23#include <bcm63xx_dev_pci.h>
25#include <bcm63xx_dev_enet.h> 24#include <bcm63xx_dev_enet.h>
26#include <bcm63xx_dev_dsp.h> 25#include <bcm63xx_dev_dsp.h>
26#include <bcm63xx_dev_pcmcia.h>
27#include <bcm63xx_dev_uart.h>
27#include <board_bcm963xx.h> 28#include <board_bcm963xx.h>
28 29
29#define PFX "board_bcm963xx: " 30#define PFX "board_bcm963xx: "
@@ -793,6 +794,11 @@ int __init board_register_devices(void)
793{ 794{
794 u32 val; 795 u32 val;
795 796
797 bcm63xx_uart_register();
798
799 if (board.has_pccard)
800 bcm63xx_pcmcia_register();
801
796 if (board.has_enet0 && 802 if (board.has_enet0 &&
797 !board_get_mac_address(board.enet0.mac_addr)) 803 !board_get_mac_address(board.enet0.mac_addr))
798 bcm63xx_enet_register(0, &board.enet0); 804 bcm63xx_enet_register(0, &board.enet0);
diff --git a/arch/mips/bcm63xx/dev-pcmcia.c b/arch/mips/bcm63xx/dev-pcmcia.c
new file mode 100644
index 000000000000..de4d917fd54d
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-pcmcia.c
@@ -0,0 +1,144 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <asm/bootinfo.h>
12#include <linux/platform_device.h>
13#include <bcm63xx_cs.h>
14#include <bcm63xx_cpu.h>
15#include <bcm63xx_dev_pcmcia.h>
16#include <bcm63xx_io.h>
17#include <bcm63xx_regs.h>
18
19static struct resource pcmcia_resources[] = {
20 /* pcmcia registers */
21 {
22 /* start & end filled at runtime */
23 .flags = IORESOURCE_MEM,
24 },
25
26 /* pcmcia memory zone resources */
27 {
28 .start = BCM_PCMCIA_COMMON_BASE_PA,
29 .end = BCM_PCMCIA_COMMON_END_PA,
30 .flags = IORESOURCE_MEM,
31 },
32 {
33 .start = BCM_PCMCIA_ATTR_BASE_PA,
34 .end = BCM_PCMCIA_ATTR_END_PA,
35 .flags = IORESOURCE_MEM,
36 },
37 {
38 .start = BCM_PCMCIA_IO_BASE_PA,
39 .end = BCM_PCMCIA_IO_END_PA,
40 .flags = IORESOURCE_MEM,
41 },
42
43 /* PCMCIA irq */
44 {
45 /* start filled at runtime */
46 .flags = IORESOURCE_IRQ,
47 },
48
49 /* declare PCMCIA IO resource also */
50 {
51 .start = BCM_PCMCIA_IO_BASE_PA,
52 .end = BCM_PCMCIA_IO_END_PA,
53 .flags = IORESOURCE_IO,
54 },
55};
56
57static struct bcm63xx_pcmcia_platform_data pd;
58
59static struct platform_device bcm63xx_pcmcia_device = {
60 .name = "bcm63xx_pcmcia",
61 .id = 0,
62 .num_resources = ARRAY_SIZE(pcmcia_resources),
63 .resource = pcmcia_resources,
64 .dev = {
65 .platform_data = &pd,
66 },
67};
68
69static int __init config_pcmcia_cs(unsigned int cs,
70 u32 base, unsigned int size)
71{
72 int ret;
73
74 ret = bcm63xx_set_cs_status(cs, 0);
75 if (!ret)
76 ret = bcm63xx_set_cs_base(cs, base, size);
77 if (!ret)
78 ret = bcm63xx_set_cs_status(cs, 1);
79 return ret;
80}
81
82static const __initdata struct {
83 unsigned int cs;
84 unsigned int base;
85 unsigned int size;
86} pcmcia_cs[3] = {
87 {
88 .cs = MPI_CS_PCMCIA_COMMON,
89 .base = BCM_PCMCIA_COMMON_BASE_PA,
90 .size = BCM_PCMCIA_COMMON_SIZE
91 },
92 {
93 .cs = MPI_CS_PCMCIA_ATTR,
94 .base = BCM_PCMCIA_ATTR_BASE_PA,
95 .size = BCM_PCMCIA_ATTR_SIZE
96 },
97 {
98 .cs = MPI_CS_PCMCIA_IO,
99 .base = BCM_PCMCIA_IO_BASE_PA,
100 .size = BCM_PCMCIA_IO_SIZE
101 },
102};
103
104int __init bcm63xx_pcmcia_register(void)
105{
106 int ret, i;
107
108 if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358())
109 return 0;
110
111 /* use correct pcmcia ready gpio depending on processor */
112 switch (bcm63xx_get_cpu_id()) {
113 case BCM6348_CPU_ID:
114 pd.ready_gpio = 22;
115 break;
116
117 case BCM6358_CPU_ID:
118 pd.ready_gpio = 18;
119 break;
120
121 default:
122 return -ENODEV;
123 }
124
125 pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA);
126 pcmcia_resources[0].end = pcmcia_resources[0].start +
127 RSET_PCMCIA_SIZE - 1;
128 pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA);
129
130 /* configure pcmcia chip selects */
131 for (i = 0; i < 3; i++) {
132 ret = config_pcmcia_cs(pcmcia_cs[i].cs,
133 pcmcia_cs[i].base,
134 pcmcia_cs[i].size);
135 if (ret)
136 goto out_err;
137 }
138
139 return platform_device_register(&bcm63xx_pcmcia_device);
140
141out_err:
142 printk(KERN_ERR "unable to set pcmcia chip select\n");
143 return ret;
144}
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c
new file mode 100644
index 000000000000..5f3d89c4a988
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-uart.c
@@ -0,0 +1,41 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/platform_device.h>
12#include <bcm63xx_cpu.h>
13#include <bcm63xx_dev_uart.h>
14
15static struct resource uart_resources[] = {
16 {
17 .start = -1, /* filled at runtime */
18 .end = -1, /* filled at runtime */
19 .flags = IORESOURCE_MEM,
20 },
21 {
22 .start = -1, /* filled at runtime */
23 .flags = IORESOURCE_IRQ,
24 },
25};
26
27static struct platform_device bcm63xx_uart_device = {
28 .name = "bcm63xx_uart",
29 .id = 0,
30 .num_resources = ARRAY_SIZE(uart_resources),
31 .resource = uart_resources,
32};
33
34int __init bcm63xx_uart_register(void)
35{
36 uart_resources[0].start = bcm63xx_regset_address(RSET_UART0);
37 uart_resources[0].end = uart_resources[0].start;
38 uart_resources[0].end += RSET_UART_SIZE - 1;
39 uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
40 return platform_device_register(&bcm63xx_uart_device);
41}
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h
new file mode 100644
index 000000000000..2beb3969ce3b
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h
@@ -0,0 +1,13 @@
1#ifndef BCM63XX_DEV_PCMCIA_H_
2#define BCM63XX_DEV_PCMCIA_H_
3
4/*
5 * PCMCIA driver platform data
6 */
7struct bcm63xx_pcmcia_platform_data {
8 unsigned int ready_gpio;
9};
10
11int bcm63xx_pcmcia_register(void);
12
13#endif /* BCM63XX_DEV_PCMCIA_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
new file mode 100644
index 000000000000..bf348f573bbc
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
@@ -0,0 +1,6 @@
1#ifndef BCM63XX_DEV_UART_H_
2#define BCM63XX_DEV_UART_H_
3
4int bcm63xx_uart_register(void);
5
6#endif /* BCM63XX_DEV_UART_H_ */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index e15f11a09311..af42385245d5 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -77,7 +77,18 @@ extern void play_dead(void);
77 77
78extern asmlinkage void smp_call_function_interrupt(void); 78extern asmlinkage void smp_call_function_interrupt(void);
79 79
80extern void arch_send_call_function_single_ipi(int cpu); 80static inline void arch_send_call_function_single_ipi(int cpu)
81extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 81{
82 extern struct plat_smp_ops *mp_ops; /* private */
83
84 mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
85}
86
87static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
88{
89 extern struct plat_smp_ops *mp_ops; /* private */
90
91 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
92}
82 93
83#endif /* __ASM_SMP_H */ 94#endif /* __ASM_SMP_H */
diff --git a/arch/mips/include/asm/unaligned.h b/arch/mips/include/asm/unaligned.h
index 792404948571..42f66c311473 100644
--- a/arch/mips/include/asm/unaligned.h
+++ b/arch/mips/include/asm/unaligned.h
@@ -12,17 +12,17 @@
12#if defined(__MIPSEB__) 12#if defined(__MIPSEB__)
13# include <linux/unaligned/be_struct.h> 13# include <linux/unaligned/be_struct.h>
14# include <linux/unaligned/le_byteshift.h> 14# include <linux/unaligned/le_byteshift.h>
15# include <linux/unaligned/generic.h>
16# define get_unaligned __get_unaligned_be 15# define get_unaligned __get_unaligned_be
17# define put_unaligned __put_unaligned_be 16# define put_unaligned __put_unaligned_be
18#elif defined(__MIPSEL__) 17#elif defined(__MIPSEL__)
19# include <linux/unaligned/le_struct.h> 18# include <linux/unaligned/le_struct.h>
20# include <linux/unaligned/be_byteshift.h> 19# include <linux/unaligned/be_byteshift.h>
21# include <linux/unaligned/generic.h>
22# define get_unaligned __get_unaligned_le 20# define get_unaligned __get_unaligned_le
23# define put_unaligned __put_unaligned_le 21# define put_unaligned __put_unaligned_le
24#else 22#else
25# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???" 23# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
26#endif 24#endif
27 25
26# include <linux/unaligned/generic.h>
27
28#endif /* _ASM_MIPS_UNALIGNED_H */ 28#endif /* _ASM_MIPS_UNALIGNED_H */
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f2397f00db43..ad4e017ed2f3 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -172,13 +172,20 @@ static unsigned int translate_open_flags(int flags)
172} 172}
173 173
174 174
175static void sp_setfsuidgid( uid_t uid, gid_t gid) 175static int sp_setfsuidgid(uid_t uid, gid_t gid)
176{ 176{
177 current->cred->fsuid = uid; 177 struct cred *new;
178 current->cred->fsgid = gid;
179 178
180 key_fsuid_changed(current); 179 new = prepare_creds();
181 key_fsgid_changed(current); 180 if (!new)
181 return -ENOMEM;
182
183 new->fsuid = uid;
184 new->fsgid = gid;
185
186 commit_creds(new);
187
188 return 0;
182} 189}
183 190
184/* 191/*
@@ -196,7 +203,7 @@ void sp_work_handle_request(void)
196 mm_segment_t old_fs; 203 mm_segment_t old_fs;
197 struct timeval tv; 204 struct timeval tv;
198 struct timezone tz; 205 struct timezone tz;
199 int cmd; 206 int err, cmd;
200 207
201 char *vcwd; 208 char *vcwd;
202 int size; 209 int size;
@@ -225,8 +232,11 @@ void sp_work_handle_request(void)
225 /* Run the syscall at the privilege of the user who loaded the 232 /* Run the syscall at the privilege of the user who loaded the
226 SP program */ 233 SP program */
227 234
228 if (vpe_getuid(tclimit)) 235 if (vpe_getuid(tclimit)) {
229 sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit)); 236 err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
237 if (!err)
238 pr_err("Change of creds failed\n");
239 }
230 240
231 switch (sc.cmd) { 241 switch (sc.cmd) {
232 /* needs the flags argument translating from SDE kit to 242 /* needs the flags argument translating from SDE kit to
@@ -283,8 +293,11 @@ void sp_work_handle_request(void)
283 break; 293 break;
284 } /* switch */ 294 } /* switch */
285 295
286 if (vpe_getuid(tclimit)) 296 if (vpe_getuid(tclimit)) {
287 sp_setfsuidgid( 0, 0); 297 err = sp_setfsuidgid(0, 0);
298 if (!err)
299 pr_err("restoring old creds failed\n");
300 }
288 301
289 old_fs = get_fs(); 302 old_fs = get_fs();
290 set_fs(KERNEL_DS); 303 set_fs(KERNEL_DS);
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index a10ebfdc28ae..364f066cb497 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -72,8 +72,9 @@ static void rtlx_dispatch(void)
72*/ 72*/
73static irqreturn_t rtlx_interrupt(int irq, void *dev_id) 73static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
74{ 74{
75 unsigned int vpeflags;
76 unsigned long flags;
75 int i; 77 int i;
76 unsigned int flags, vpeflags;
77 78
78 /* Ought not to be strictly necessary for SMTC builds */ 79 /* Ought not to be strictly necessary for SMTC builds */
79 local_irq_save(flags); 80 local_irq_save(flags);
@@ -392,20 +393,12 @@ out:
392 393
393static int file_open(struct inode *inode, struct file *filp) 394static int file_open(struct inode *inode, struct file *filp)
394{ 395{
395 int minor = iminor(inode); 396 return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
396 int err;
397
398 lock_kernel();
399 err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
400 unlock_kernel();
401 return err;
402} 397}
403 398
404static int file_release(struct inode *inode, struct file *filp) 399static int file_release(struct inode *inode, struct file *filp)
405{ 400{
406 int minor = iminor(inode); 401 return rtlx_release(iminor(inode));
407
408 return rtlx_release(minor);
409} 402}
410 403
411static unsigned int file_poll(struct file *file, poll_table * wait) 404static unsigned int file_poll(struct file *file, poll_table * wait)
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 4eb106c6a3ec..e72e6844d134 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -32,7 +32,6 @@
32#include <linux/cpumask.h> 32#include <linux/cpumask.h>
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/smp.h>
36 35
37#include <asm/atomic.h> 36#include <asm/atomic.h>
38#include <asm/cpu.h> 37#include <asm/cpu.h>
@@ -128,19 +127,6 @@ asmlinkage __cpuinit void start_secondary(void)
128 cpu_idle(); 127 cpu_idle();
129} 128}
130 129
131void arch_send_call_function_ipi_mask(const struct cpumask *mask)
132{
133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
134}
135
136/*
137 * We reuse the same vector for the single IPI
138 */
139void arch_send_call_function_single_ipi(int cpu)
140{
141 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
142}
143
144/* 130/*
145 * Call into both interrupt handlers, as we share the IPI for them 131 * Call into both interrupt handlers, as we share the IPI for them
146 */ 132 */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 67153a0dc267..4d181df44a40 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1098,9 +1098,8 @@ static void ipi_irq_dispatch(void)
1098 1098
1099static struct irqaction irq_ipi = { 1099static struct irqaction irq_ipi = {
1100 .handler = ipi_interrupt, 1100 .handler = ipi_interrupt,
1101 .flags = IRQF_DISABLED, 1101 .flags = IRQF_DISABLED | IRQF_PERCPU,
1102 .name = "SMTC_IPI", 1102 .name = "SMTC_IPI"
1103 .flags = IRQF_PERCPU
1104}; 1103};
1105 1104
1106static void setup_cross_vpe_interrupts(unsigned int nvpe) 1105static void setup_cross_vpe_interrupts(unsigned int nvpe)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index eb6c4c5b7fbe..03092ab2a296 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -144,14 +144,15 @@ struct tc {
144}; 144};
145 145
146struct { 146struct {
147 /* Virtual processing elements */ 147 spinlock_t vpe_list_lock;
148 struct list_head vpe_list; 148 struct list_head vpe_list; /* Virtual processing elements */
149 149 spinlock_t tc_list_lock;
150 /* Thread contexts */ 150 struct list_head tc_list; /* Thread contexts */
151 struct list_head tc_list;
152} vpecontrol = { 151} vpecontrol = {
153 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 152 .vpe_list_lock = SPIN_LOCK_UNLOCKED,
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 153 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
154 .tc_list_lock = SPIN_LOCK_UNLOCKED,
155 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155}; 156};
156 157
157static void release_progmem(void *ptr); 158static void release_progmem(void *ptr);
@@ -159,28 +160,38 @@ static void release_progmem(void *ptr);
159/* get the vpe associated with this minor */ 160/* get the vpe associated with this minor */
160static struct vpe *get_vpe(int minor) 161static struct vpe *get_vpe(int minor)
161{ 162{
162 struct vpe *v; 163 struct vpe *res, *v;
163 164
164 if (!cpu_has_mipsmt) 165 if (!cpu_has_mipsmt)
165 return NULL; 166 return NULL;
166 167
168 res = NULL;
169 spin_lock(&vpecontrol.vpe_list_lock);
167 list_for_each_entry(v, &vpecontrol.vpe_list, list) { 170 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
168 if (v->minor == minor) 171 if (v->minor == minor) {
169 return v; 172 res = v;
173 break;
174 }
170 } 175 }
176 spin_unlock(&vpecontrol.vpe_list_lock);
171 177
172 return NULL; 178 return res;
173} 179}
174 180
175/* get the vpe associated with this minor */ 181/* get the vpe associated with this minor */
176static struct tc *get_tc(int index) 182static struct tc *get_tc(int index)
177{ 183{
178 struct tc *t; 184 struct tc *res, *t;
179 185
186 res = NULL;
187 spin_lock(&vpecontrol.tc_list_lock);
180 list_for_each_entry(t, &vpecontrol.tc_list, list) { 188 list_for_each_entry(t, &vpecontrol.tc_list, list) {
181 if (t->index == index) 189 if (t->index == index) {
182 return t; 190 res = t;
191 break;
192 }
183 } 193 }
194 spin_unlock(&vpecontrol.tc_list_lock);
184 195
185 return NULL; 196 return NULL;
186} 197}
@@ -190,15 +201,17 @@ static struct vpe *alloc_vpe(int minor)
190{ 201{
191 struct vpe *v; 202 struct vpe *v;
192 203
193 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) { 204 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
194 return NULL; 205 return NULL;
195 }
196 206
197 INIT_LIST_HEAD(&v->tc); 207 INIT_LIST_HEAD(&v->tc);
208 spin_lock(&vpecontrol.vpe_list_lock);
198 list_add_tail(&v->list, &vpecontrol.vpe_list); 209 list_add_tail(&v->list, &vpecontrol.vpe_list);
210 spin_unlock(&vpecontrol.vpe_list_lock);
199 211
200 INIT_LIST_HEAD(&v->notify); 212 INIT_LIST_HEAD(&v->notify);
201 v->minor = minor; 213 v->minor = minor;
214
202 return v; 215 return v;
203} 216}
204 217
@@ -212,7 +225,10 @@ static struct tc *alloc_tc(int index)
212 225
213 INIT_LIST_HEAD(&tc->tc); 226 INIT_LIST_HEAD(&tc->tc);
214 tc->index = index; 227 tc->index = index;
228
229 spin_lock(&vpecontrol.tc_list_lock);
215 list_add_tail(&tc->list, &vpecontrol.tc_list); 230 list_add_tail(&tc->list, &vpecontrol.tc_list);
231 spin_unlock(&vpecontrol.tc_list_lock);
216 232
217out: 233out:
218 return tc; 234 return tc;
@@ -227,7 +243,7 @@ static void release_vpe(struct vpe *v)
227 kfree(v); 243 kfree(v);
228} 244}
229 245
230static void dump_mtregs(void) 246static void __maybe_unused dump_mtregs(void)
231{ 247{
232 unsigned long val; 248 unsigned long val;
233 249
@@ -1048,20 +1064,19 @@ static int vpe_open(struct inode *inode, struct file *filp)
1048 enum vpe_state state; 1064 enum vpe_state state;
1049 struct vpe_notifications *not; 1065 struct vpe_notifications *not;
1050 struct vpe *v; 1066 struct vpe *v;
1051 int ret, err = 0; 1067 int ret;
1052 1068
1053 lock_kernel();
1054 if (minor != iminor(inode)) { 1069 if (minor != iminor(inode)) {
1055 /* assume only 1 device at the moment. */ 1070 /* assume only 1 device at the moment. */
1056 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); 1071 pr_warning("VPE loader: only vpe1 is supported\n");
1057 err = -ENODEV; 1072
1058 goto out; 1073 return -ENODEV;
1059 } 1074 }
1060 1075
1061 if ((v = get_vpe(tclimit)) == NULL) { 1076 if ((v = get_vpe(tclimit)) == NULL) {
1062 printk(KERN_WARNING "VPE loader: unable to get vpe\n"); 1077 pr_warning("VPE loader: unable to get vpe\n");
1063 err = -ENODEV; 1078
1064 goto out; 1079 return -ENODEV;
1065 } 1080 }
1066 1081
1067 state = xchg(&v->state, VPE_STATE_INUSE); 1082 state = xchg(&v->state, VPE_STATE_INUSE);
@@ -1101,8 +1116,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
1101 v->shared_ptr = NULL; 1116 v->shared_ptr = NULL;
1102 v->__start = 0; 1117 v->__start = 0;
1103 1118
1104out:
1105 unlock_kernel(); 1119 unlock_kernel();
1120
1106 return 0; 1121 return 0;
1107} 1122}
1108 1123
@@ -1594,14 +1609,14 @@ static void __exit vpe_module_exit(void)
1594{ 1609{
1595 struct vpe *v, *n; 1610 struct vpe *v, *n;
1596 1611
1612 device_del(&vpe_device);
1613 unregister_chrdev(major, module_name);
1614
1615 /* No locking needed here */
1597 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { 1616 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1598 if (v->state != VPE_STATE_UNUSED) { 1617 if (v->state != VPE_STATE_UNUSED)
1599 release_vpe(v); 1618 release_vpe(v);
1600 }
1601 } 1619 }
1602
1603 device_del(&vpe_device);
1604 unregister_chrdev(major, module_name);
1605} 1620}
1606 1621
1607module_init(vpe_module_init); 1622module_init(vpe_module_init);
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index b55c2d1b998f..5ab5fa8c1d82 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -32,6 +32,11 @@ static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
32 */ 32 */
33static void mips_sc_inv(unsigned long addr, unsigned long size) 33static void mips_sc_inv(unsigned long addr, unsigned long size)
34{ 34{
35 unsigned long lsize = cpu_scache_line_size();
36 unsigned long almask = ~(lsize - 1);
37
38 cache_op(Hit_Writeback_Inv_SD, addr & almask);
39 cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
35 blast_inv_scache_range(addr, addr + size); 40 blast_inv_scache_range(addr, addr + size);
36} 41}
37 42
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index 655cb8dec340..deed1d5d4982 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -44,7 +44,7 @@ static struct loongson2_register_config {
44 unsigned int ctrl; 44 unsigned int ctrl;
45 unsigned long long reset_counter1; 45 unsigned long long reset_counter1;
46 unsigned long long reset_counter2; 46 unsigned long long reset_counter2;
47 int cnt1_enalbed, cnt2_enalbed; 47 int cnt1_enabled, cnt2_enabled;
48} reg; 48} reg;
49 49
50DEFINE_SPINLOCK(sample_lock); 50DEFINE_SPINLOCK(sample_lock);
@@ -81,8 +81,8 @@ static void loongson2_reg_setup(struct op_counter_config *cfg)
81 81
82 reg.ctrl = ctrl; 82 reg.ctrl = ctrl;
83 83
84 reg.cnt1_enalbed = cfg[0].enabled; 84 reg.cnt1_enabled = cfg[0].enabled;
85 reg.cnt2_enalbed = cfg[1].enabled; 85 reg.cnt2_enabled = cfg[1].enabled;
86 86
87} 87}
88 88
@@ -99,7 +99,7 @@ static void loongson2_cpu_setup(void *args)
99static void loongson2_cpu_start(void *args) 99static void loongson2_cpu_start(void *args)
100{ 100{
101 /* Start all counters on current CPU */ 101 /* Start all counters on current CPU */
102 if (reg.cnt1_enalbed || reg.cnt2_enalbed) 102 if (reg.cnt1_enabled || reg.cnt2_enabled)
103 write_c0_perfctrl(reg.ctrl); 103 write_c0_perfctrl(reg.ctrl);
104} 104}
105 105
@@ -125,7 +125,7 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
125 */ 125 */
126 126
127 /* Check whether the irq belongs to me */ 127 /* Check whether the irq belongs to me */
128 enabled = reg.cnt1_enalbed | reg.cnt2_enalbed; 128 enabled = reg.cnt1_enabled | reg.cnt2_enabled;
129 if (!enabled) 129 if (!enabled)
130 return IRQ_NONE; 130 return IRQ_NONE;
131 131
@@ -136,12 +136,12 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
136 spin_lock_irqsave(&sample_lock, flags); 136 spin_lock_irqsave(&sample_lock, flags);
137 137
138 if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) { 138 if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
139 if (reg.cnt1_enalbed) 139 if (reg.cnt1_enabled)
140 oprofile_add_sample(regs, 0); 140 oprofile_add_sample(regs, 0);
141 counter1 = reg.reset_counter1; 141 counter1 = reg.reset_counter1;
142 } 142 }
143 if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) { 143 if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
144 if (reg.cnt2_enalbed) 144 if (reg.cnt2_enabled)
145 oprofile_add_sample(regs, 1); 145 oprofile_add_sample(regs, 1);
146 counter2 = reg.reset_counter2; 146 counter2 = reg.reset_counter2;
147 } 147 }
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 109c95ca698b..32548b5d68d6 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -385,6 +385,7 @@ int msp_pcibios_config_access(unsigned char access_type,
385 unsigned long intr; 385 unsigned long intr;
386 unsigned long value; 386 unsigned long value;
387 static char pciirqflag; 387 static char pciirqflag;
388 int ret;
388#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) 389#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
389 unsigned int vpe_status; 390 unsigned int vpe_status;
390#endif 391#endif
@@ -402,11 +403,13 @@ int msp_pcibios_config_access(unsigned char access_type,
402 * allocation assigns an interrupt handler to the interrupt. 403 * allocation assigns an interrupt handler to the interrupt.
403 */ 404 */
404 if (pciirqflag == 0) { 405 if (pciirqflag == 0) {
405 request_irq(MSP_INT_PCI,/* Hardcoded internal MSP7120 wiring */ 406 ret = request_irq(MSP_INT_PCI,/* Hardcoded internal MSP7120 wiring */
406 bpci_interrupt, 407 bpci_interrupt,
407 IRQF_SHARED | IRQF_DISABLED, 408 IRQF_SHARED | IRQF_DISABLED,
408 "PMC MSP PCI Host", 409 "PMC MSP PCI Host",
409 preg); 410 preg);
411 if (ret != 0)
412 return ret;
410 pciirqflag = ~0; 413 pciirqflag = ~0;
411 } 414 }
412 415
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 9aa8f2951df6..c6851df9ab74 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -165,7 +165,7 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); 165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
166} 166}
167 167
168static void ip27_send_ipi(const struct cpumask *mask, unsigned int action) 168static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
169{ 169{
170 unsigned int i; 170 unsigned int i;
171 171
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index ba59839a021e..4070268aa769 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -117,10 +117,6 @@ static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
117 unsigned long flags; 117 unsigned long flags;
118 unsigned int irq_dirty; 118 unsigned int irq_dirty;
119 119
120 if (cpumask_weight(mask) != 1) {
121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
122 return -1;
123 }
124 i = cpumask_first(mask); 120 i = cpumask_first(mask);
125 121
126 /* Convert logical CPU to physical CPU */ 122 /* Convert logical CPU to physical CPU */
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index 637a194e5cd5..15ea778b5e66 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -403,36 +403,31 @@ static int sbprof_zbprof_stop(void)
403static int sbprof_tb_open(struct inode *inode, struct file *filp) 403static int sbprof_tb_open(struct inode *inode, struct file *filp)
404{ 404{
405 int minor; 405 int minor;
406 int err = 0;
407 406
408 lock_kernel();
409 minor = iminor(inode); 407 minor = iminor(inode);
410 if (minor != 0) { 408 if (minor != 0)
411 err = -ENODEV; 409 return -ENODEV;
412 goto out;
413 }
414 410
415 if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) { 411 if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
416 err = -EBUSY; 412 return -EBUSY;
417 goto out;
418 }
419 413
420 memset(&sbp, 0, sizeof(struct sbprof_tb)); 414 memset(&sbp, 0, sizeof(struct sbprof_tb));
421 sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES); 415 sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
422 if (!sbp.sbprof_tbbuf) { 416 if (!sbp.sbprof_tbbuf) {
423 err = -ENOMEM; 417 sbp.open = SB_CLOSED;
424 goto out; 418 wmb();
419 return -ENOMEM;
425 } 420 }
421
426 memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES); 422 memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
427 init_waitqueue_head(&sbp.tb_sync); 423 init_waitqueue_head(&sbp.tb_sync);
428 init_waitqueue_head(&sbp.tb_read); 424 init_waitqueue_head(&sbp.tb_read);
429 mutex_init(&sbp.lock); 425 mutex_init(&sbp.lock);
430 426
431 sbp.open = SB_OPEN; 427 sbp.open = SB_OPEN;
428 wmb();
432 429
433 out: 430 return 0;
434 unlock_kernel();
435 return err;
436} 431}
437 432
438static int sbprof_tb_release(struct inode *inode, struct file *filp) 433static int sbprof_tb_release(struct inode *inode, struct file *filp)
@@ -440,7 +435,7 @@ static int sbprof_tb_release(struct inode *inode, struct file *filp)
440 int minor; 435 int minor;
441 436
442 minor = iminor(inode); 437 minor = iminor(inode);
443 if (minor != 0 || !sbp.open) 438 if (minor != 0 || sbp.open != SB_CLOSED)
444 return -ENODEV; 439 return -ENODEV;
445 440
446 mutex_lock(&sbp.lock); 441 mutex_lock(&sbp.lock);
@@ -449,7 +444,8 @@ static int sbprof_tb_release(struct inode *inode, struct file *filp)
449 sbprof_zbprof_stop(); 444 sbprof_zbprof_stop();
450 445
451 vfree(sbp.sbprof_tbbuf); 446 vfree(sbp.sbprof_tbbuf);
452 sbp.open = 0; 447 sbp.open = SB_CLOSED;
448 wmb();
453 449
454 mutex_unlock(&sbp.lock); 450 mutex_unlock(&sbp.lock);
455 451
@@ -583,7 +579,8 @@ static int __init sbprof_tb_init(void)
583 } 579 }
584 tb_dev = dev; 580 tb_dev = dev;
585 581
586 sbp.open = 0; 582 sbp.open = SB_CLOSED;
583 wmb();
587 tb_period = zbbus_mhz * 10000LL; 584 tb_period = zbbus_mhz * 10000LL;
588 pr_info(DEVNAME ": initialized - tb_period = %lld\n", 585 pr_info(DEVNAME ": initialized - tb_period = %lld\n",
589 (long long) tb_period); 586 (long long) tb_period);
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 623ffc933c4c..5277aac96b0f 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -106,7 +106,7 @@ void read_persistent_clock(struct timespec *ts)
106 break; 106 break;
107 } 107 }
108 ts->tv_sec = sec; 108 ts->tv_sec = sec;
109 tv->tv_nsec = 0; 109 ts->tv_nsec = 0;
110} 110}
111 111
112int rtc_mips_set_time(unsigned long sec) 112int rtc_mips_set_time(unsigned long sec)
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 8a3a4dd55763..167e10ff06d9 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -129,42 +129,47 @@ extern int fixup_exception(struct pt_regs *regs);
129struct __large_struct { unsigned long buf[100]; }; 129struct __large_struct { unsigned long buf[100]; };
130#define __m(x) (*(struct __large_struct *)(x)) 130#define __m(x) (*(struct __large_struct *)(x))
131 131
132#define __get_user_nocheck(x, ptr, size) \ 132#define __get_user_nocheck(x, ptr, size) \
133({ \ 133({ \
134 __typeof(*(ptr)) __gu_val; \ 134 unsigned long __gu_addr; \
135 unsigned long __gu_addr; \ 135 int __gu_err; \
136 int __gu_err; \ 136 __gu_addr = (unsigned long) (ptr); \
137 __gu_addr = (unsigned long) (ptr); \ 137 switch (size) { \
138 switch (size) { \ 138 case 1: { \
139 case 1: __get_user_asm("bu"); break; \ 139 unsigned char __gu_val; \
140 case 2: __get_user_asm("hu"); break; \ 140 __get_user_asm("bu"); \
141 case 4: __get_user_asm("" ); break; \ 141 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
142 default: __get_user_unknown(); break; \ 142 break; \
143 } \ 143 } \
144 x = (__typeof__(*(ptr))) __gu_val; \ 144 case 2: { \
145 __gu_err; \ 145 unsigned short __gu_val; \
146 __get_user_asm("hu"); \
147 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
148 break; \
149 } \
150 case 4: { \
151 unsigned int __gu_val; \
152 __get_user_asm(""); \
153 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
154 break; \
155 } \
156 default: \
157 __get_user_unknown(); \
158 break; \
159 } \
160 __gu_err; \
146}) 161})
147 162
148#define __get_user_check(x, ptr, size) \ 163#define __get_user_check(x, ptr, size) \
149({ \ 164({ \
150 __typeof__(*(ptr)) __gu_val; \ 165 int _e; \
151 unsigned long __gu_addr; \ 166 if (likely(__access_ok((unsigned long) (ptr), (size)))) \
152 int __gu_err; \ 167 _e = __get_user_nocheck((x), (ptr), (size)); \
153 __gu_addr = (unsigned long) (ptr); \ 168 else { \
154 if (likely(__access_ok(__gu_addr,size))) { \ 169 _e = -EFAULT; \
155 switch (size) { \ 170 (x) = (__typeof__(x))0; \
156 case 1: __get_user_asm("bu"); break; \ 171 } \
157 case 2: __get_user_asm("hu"); break; \ 172 _e; \
158 case 4: __get_user_asm("" ); break; \
159 default: __get_user_unknown(); break; \
160 } \
161 } \
162 else { \
163 __gu_err = -EFAULT; \
164 __gu_val = 0; \
165 } \
166 x = (__typeof__(*(ptr))) __gu_val; \
167 __gu_err; \
168}) 173})
169 174
170#define __get_user_asm(INSN) \ 175#define __get_user_asm(INSN) \
diff --git a/arch/mn10300/unit-asb2303/include/unit/clock.h b/arch/mn10300/unit-asb2303/include/unit/clock.h
index 8b450e920af1..2a0bf79ab968 100644
--- a/arch/mn10300/unit-asb2303/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2303/include/unit/clock.h
@@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
20extern unsigned long mn10300_iobclk; 20extern unsigned long mn10300_iobclk;
21extern unsigned long mn10300_tsc_per_HZ; 21extern unsigned long mn10300_tsc_per_HZ;
22 22
23#define MN10300_IOCLK ((unsigned long)mn10300_ioclk) 23#define MN10300_IOCLK mn10300_ioclk
24/* If this processors has a another clock, uncomment the below. */ 24/* If this processors has a another clock, uncomment the below. */
25/* #define MN10300_IOBCLK ((unsigned long)mn10300_iobclk) */ 25/* #define MN10300_IOBCLK mn10300_iobclk */
26 26
27#else /* !CONFIG_MN10300_RTC */ 27#else /* !CONFIG_MN10300_RTC */
28 28
@@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
35#define MN10300_TSCCLK MN10300_IOCLK 35#define MN10300_TSCCLK MN10300_IOCLK
36 36
37#ifdef CONFIG_MN10300_RTC 37#ifdef CONFIG_MN10300_RTC
38#define MN10300_TSC_PER_HZ ((unsigned long)mn10300_tsc_per_HZ) 38#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
39#else /* !CONFIG_MN10300_RTC */ 39#else /* !CONFIG_MN10300_RTC */
40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ) 40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
41#endif /* !CONFIG_MN10300_RTC */ 41#endif /* !CONFIG_MN10300_RTC */
diff --git a/arch/mn10300/unit-asb2305/include/unit/clock.h b/arch/mn10300/unit-asb2305/include/unit/clock.h
index 7d514841ffda..67be3f2eb18e 100644
--- a/arch/mn10300/unit-asb2305/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2305/include/unit/clock.h
@@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
20extern unsigned long mn10300_iobclk; 20extern unsigned long mn10300_iobclk;
21extern unsigned long mn10300_tsc_per_HZ; 21extern unsigned long mn10300_tsc_per_HZ;
22 22
23#define MN10300_IOCLK ((unsigned long)mn10300_ioclk) 23#define MN10300_IOCLK mn10300_ioclk
24/* If this processors has a another clock, uncomment the below. */ 24/* If this processors has a another clock, uncomment the below. */
25/* #define MN10300_IOBCLK ((unsigned long)mn10300_iobclk) */ 25/* #define MN10300_IOBCLK mn10300_iobclk */
26 26
27#else /* !CONFIG_MN10300_RTC */ 27#else /* !CONFIG_MN10300_RTC */
28 28
@@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
35#define MN10300_TSCCLK MN10300_IOCLK 35#define MN10300_TSCCLK MN10300_IOCLK
36 36
37#ifdef CONFIG_MN10300_RTC 37#ifdef CONFIG_MN10300_RTC
38#define MN10300_TSC_PER_HZ ((unsigned long)mn10300_tsc_per_HZ) 38#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
39#else /* !CONFIG_MN10300_RTC */ 39#else /* !CONFIG_MN10300_RTC */
40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ) 40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
41#endif /* !CONFIG_MN10300_RTC */ 41#endif /* !CONFIG_MN10300_RTC */
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 47ee603f558e..2aa371e30079 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -201,7 +201,7 @@ static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
201 return single_open(file, kvmppc_exit_timing_show, inode->i_private); 201 return single_open(file, kvmppc_exit_timing_show, inode->i_private);
202} 202}
203 203
204static struct file_operations kvmppc_exit_timing_fops = { 204static const struct file_operations kvmppc_exit_timing_fops = {
205 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
206 .open = kvmppc_exit_timing_open, 206 .open = kvmppc_exit_timing_open,
207 .read = seq_read, 207 .read = seq_read,
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 8f079b865ad0..884e8bcec499 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -147,7 +147,7 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
147 __simple_attr_check_format(__fmt, 0ull); \ 147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
149} \ 149} \
150static struct file_operations __fops = { \ 150static const struct file_operations __fops = { \
151 .owner = THIS_MODULE, \ 151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \ 152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \ 153 .release = spufs_attr_release, \
@@ -309,7 +309,7 @@ static int spufs_mem_mmap_access(struct vm_area_struct *vma,
309 return len; 309 return len;
310} 310}
311 311
312static struct vm_operations_struct spufs_mem_mmap_vmops = { 312static const struct vm_operations_struct spufs_mem_mmap_vmops = {
313 .fault = spufs_mem_mmap_fault, 313 .fault = spufs_mem_mmap_fault,
314 .access = spufs_mem_mmap_access, 314 .access = spufs_mem_mmap_access,
315}; 315};
@@ -436,7 +436,7 @@ static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
436 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 436 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
437} 437}
438 438
439static struct vm_operations_struct spufs_cntl_mmap_vmops = { 439static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
440 .fault = spufs_cntl_mmap_fault, 440 .fault = spufs_cntl_mmap_fault,
441}; 441};
442 442
@@ -1143,7 +1143,7 @@ spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1143#endif 1143#endif
1144} 1144}
1145 1145
1146static struct vm_operations_struct spufs_signal1_mmap_vmops = { 1146static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1147 .fault = spufs_signal1_mmap_fault, 1147 .fault = spufs_signal1_mmap_fault,
1148}; 1148};
1149 1149
@@ -1279,7 +1279,7 @@ spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1279#endif 1279#endif
1280} 1280}
1281 1281
1282static struct vm_operations_struct spufs_signal2_mmap_vmops = { 1282static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1283 .fault = spufs_signal2_mmap_fault, 1283 .fault = spufs_signal2_mmap_fault,
1284}; 1284};
1285 1285
@@ -1397,7 +1397,7 @@ spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1397 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1397 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1398} 1398}
1399 1399
1400static struct vm_operations_struct spufs_mss_mmap_vmops = { 1400static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1401 .fault = spufs_mss_mmap_fault, 1401 .fault = spufs_mss_mmap_fault,
1402}; 1402};
1403 1403
@@ -1458,7 +1458,7 @@ spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1458 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1458 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1459} 1459}
1460 1460
1461static struct vm_operations_struct spufs_psmap_mmap_vmops = { 1461static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1462 .fault = spufs_psmap_mmap_fault, 1462 .fault = spufs_psmap_mmap_fault,
1463}; 1463};
1464 1464
@@ -1517,7 +1517,7 @@ spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1517 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1517 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1518} 1518}
1519 1519
1520static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1520static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1521 .fault = spufs_mfc_mmap_fault, 1521 .fault = spufs_mfc_mmap_fault,
1522}; 1522};
1523 1523
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index ab69925d579b..937a544a236d 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -209,7 +209,7 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
209 return n_read * sizeof(struct dtl_entry); 209 return n_read * sizeof(struct dtl_entry);
210} 210}
211 211
212static struct file_operations dtl_fops = { 212static const struct file_operations dtl_fops = {
213 .open = dtl_file_open, 213 .open = dtl_file_open,
214 .release = dtl_file_release, 214 .release = dtl_file_release,
215 .read = dtl_file_read, 215 .read = dtl_file_read,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 97fca4695e0b..ac45aab741a5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -102,6 +102,9 @@ config HAVE_SETUP_PER_CPU_AREA
102config NEED_PER_CPU_EMBED_FIRST_CHUNK 102config NEED_PER_CPU_EMBED_FIRST_CHUNK
103 def_bool y if SPARC64 103 def_bool y if SPARC64
104 104
105config NEED_PER_CPU_PAGE_FIRST_CHUNK
106 def_bool y if SPARC64
107
105config GENERIC_HARDIRQS_NO__DO_IRQ 108config GENERIC_HARDIRQS_NO__DO_IRQ
106 bool 109 bool
107 def_bool y if SPARC64 110 def_bool y if SPARC64
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index ff68373ce6d6..aa36223497b9 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1420,7 +1420,7 @@ static void __init pcpu_free_bootmem(void *ptr, size_t size)
1420 free_bootmem(__pa(ptr), size); 1420 free_bootmem(__pa(ptr), size);
1421} 1421}
1422 1422
1423static int pcpu_cpu_distance(unsigned int from, unsigned int to) 1423static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1424{ 1424{
1425 if (cpu_to_node(from) == cpu_to_node(to)) 1425 if (cpu_to_node(from) == cpu_to_node(to))
1426 return LOCAL_DISTANCE; 1426 return LOCAL_DISTANCE;
@@ -1428,18 +1428,53 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
1428 return REMOTE_DISTANCE; 1428 return REMOTE_DISTANCE;
1429} 1429}
1430 1430
1431static void __init pcpu_populate_pte(unsigned long addr)
1432{
1433 pgd_t *pgd = pgd_offset_k(addr);
1434 pud_t *pud;
1435 pmd_t *pmd;
1436
1437 pud = pud_offset(pgd, addr);
1438 if (pud_none(*pud)) {
1439 pmd_t *new;
1440
1441 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1442 pud_populate(&init_mm, pud, new);
1443 }
1444
1445 pmd = pmd_offset(pud, addr);
1446 if (!pmd_present(*pmd)) {
1447 pte_t *new;
1448
1449 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1450 pmd_populate_kernel(&init_mm, pmd, new);
1451 }
1452}
1453
1431void __init setup_per_cpu_areas(void) 1454void __init setup_per_cpu_areas(void)
1432{ 1455{
1433 unsigned long delta; 1456 unsigned long delta;
1434 unsigned int cpu; 1457 unsigned int cpu;
1435 int rc; 1458 int rc = -EINVAL;
1436 1459
1437 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1460 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1438 PERCPU_DYNAMIC_RESERVE, 4 << 20, 1461 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1439 pcpu_cpu_distance, pcpu_alloc_bootmem, 1462 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1440 pcpu_free_bootmem); 1463 pcpu_cpu_distance,
1441 if (rc) 1464 pcpu_alloc_bootmem,
1442 panic("failed to initialize first chunk (%d)", rc); 1465 pcpu_free_bootmem);
1466 if (rc)
1467 pr_warning("PERCPU: %s allocator failed (%d), "
1468 "falling back to page size\n",
1469 pcpu_fc_names[pcpu_chosen_fc], rc);
1470 }
1471 if (rc < 0)
1472 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1473 pcpu_alloc_bootmem,
1474 pcpu_free_bootmem,
1475 pcpu_populate_pte);
1476 if (rc < 0)
1477 panic("cannot initialize percpu area (err=%d)", rc);
1443 1478
1444 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1479 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1445 for_each_possible_cpu(cpu) 1480 for_each_possible_cpu(cpu)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 93698794aa3a..8da93745c087 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -432,6 +432,17 @@ config X86_NUMAQ
432 of Flat Logical. You will need a new lynxer.elf file to flash your 432 of Flat Logical. You will need a new lynxer.elf file to flash your
433 firmware with - send email to <Martin.Bligh@us.ibm.com>. 433 firmware with - send email to <Martin.Bligh@us.ibm.com>.
434 434
435config X86_SUPPORTS_MEMORY_FAILURE
436 bool
437 # MCE code calls memory_failure():
438 depends on X86_MCE
439 # On 32-bit this adds too big of NODES_SHIFT and we run out of page flags:
440 depends on !X86_NUMAQ
441 # On 32-bit SPARSEMEM adds too big of SECTIONS_WIDTH:
442 depends on X86_64 || !SPARSEMEM
443 select ARCH_SUPPORTS_MEMORY_FAILURE
444 default y
445
435config X86_VISWS 446config X86_VISWS
436 bool "SGI 320/540 (Visual Workstation)" 447 bool "SGI 320/540 (Visual Workstation)"
437 depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT 448 depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 7c5ef8b14d92..46fc474fd819 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -161,7 +161,8 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
161 "adcl $0, %0 ;\n" 161 "adcl $0, %0 ;\n"
162 : "=&r" (sum) 162 : "=&r" (sum)
163 : "r" (saddr), "r" (daddr), 163 : "r" (saddr), "r" (daddr),
164 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); 164 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
165 : "memory");
165 166
166 return csum_fold(sum); 167 return csum_fold(sum);
167} 168}
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 82ceb788a981..ee1931be6593 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -312,19 +312,23 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
312 312
313extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); 313extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
314 314
315#define cmpxchg64(ptr, o, n) \ 315#define cmpxchg64(ptr, o, n) \
316({ \ 316({ \
317 __typeof__(*(ptr)) __ret; \ 317 __typeof__(*(ptr)) __ret; \
318 if (likely(boot_cpu_data.x86 > 4)) \ 318 __typeof__(*(ptr)) __old = (o); \
319 __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \ 319 __typeof__(*(ptr)) __new = (n); \
320 (unsigned long long)(o), \ 320 alternative_io("call cmpxchg8b_emu", \
321 (unsigned long long)(n)); \ 321 "lock; cmpxchg8b (%%esi)" , \
322 else \ 322 X86_FEATURE_CX8, \
323 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ 323 "=A" (__ret), \
324 (unsigned long long)(o), \ 324 "S" ((ptr)), "0" (__old), \
325 (unsigned long long)(n)); \ 325 "b" ((unsigned int)__new), \
326 __ret; \ 326 "c" ((unsigned int)(__new>>32)) \
327}) 327 : "memory"); \
328 __ret; })
329
330
331
328#define cmpxchg64_local(ptr, o, n) \ 332#define cmpxchg64_local(ptr, o, n) \
329({ \ 333({ \
330 __typeof__(*(ptr)) __ret; \ 334 __typeof__(*(ptr)) __ret; \
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 8c44c232efcb..59cdfa4686b2 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
48 * P4, Core and beyond CPUs 48 * P4, Core and beyond CPUs
49 */ 49 */
50 if (c->x86_vendor == X86_VENDOR_INTEL && 50 if (c->x86_vendor == X86_VENDOR_INTEL &&
51 (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14))) 51 (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14)))
52 flags->bm_control = 0; 52 flags->bm_control = 0;
53} 53}
54EXPORT_SYMBOL(acpi_processor_power_init_bm_check); 54EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 4b2af86e3e8d..183c3457d2f4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -204,10 +204,7 @@ static void print_mce_head(void)
204static void print_mce_tail(void) 204static void print_mce_tail(void)
205{ 205{
206 printk(KERN_EMERG "This is not a software problem!\n" 206 printk(KERN_EMERG "This is not a software problem!\n"
207#if (!defined(CONFIG_EDAC) || !defined(CONFIG_CPU_SUP_AMD)) 207 "Run through mcelog --ascii to decode and contact your hardware vendor\n");
208 "Run through mcelog --ascii to decode and contact your hardware vendor\n"
209#endif
210 );
211} 208}
212 209
213#define PANIC_TIMEOUT 5 /* 5 seconds */ 210#define PANIC_TIMEOUT 5 /* 5 seconds */
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 43cec6bdda63..1736c5a725aa 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -10,6 +10,14 @@
10EXPORT_SYMBOL(mcount); 10EXPORT_SYMBOL(mcount);
11#endif 11#endif
12 12
13/*
14 * Note, this is a prototype to get at the symbol for
15 * the export, but dont use it from C code, it is used
16 * by assembly code and is not using C calling convention!
17 */
18extern void cmpxchg8b_emu(void);
19EXPORT_SYMBOL(cmpxchg8b_emu);
20
13/* Networking helper routines. */ 21/* Networking helper routines. */
14EXPORT_SYMBOL(csum_partial_copy_generic); 22EXPORT_SYMBOL(csum_partial_copy_generic);
15 23
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 9e609206fac9..3e549b8ec8c9 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_X86_32),y)
15 obj-y += atomic64_32.o 15 obj-y += atomic64_32.o
16 lib-y += checksum_32.o 16 lib-y += checksum_32.o
17 lib-y += strstr_32.o 17 lib-y += strstr_32.o
18 lib-y += semaphore_32.o string_32.o 18 lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o
19 19
20 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o 20 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
21else 21else
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
new file mode 100644
index 000000000000..828cb710dec2
--- /dev/null
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -0,0 +1,57 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; version 2
5 * of the License.
6 *
7 */
8
9#include <linux/linkage.h>
10#include <asm/alternative-asm.h>
11#include <asm/frame.h>
12#include <asm/dwarf2.h>
13
14
15.text
16
17/*
18 * Inputs:
19 * %esi : memory location to compare
20 * %eax : low 32 bits of old value
21 * %edx : high 32 bits of old value
22 * %ebx : low 32 bits of new value
23 * %ecx : high 32 bits of new value
24 */
25ENTRY(cmpxchg8b_emu)
26CFI_STARTPROC
27
28#
29# Emulate 'cmpxchg8b (%esi)' on UP except we don't
30# set the whole ZF thing (caller will just compare
31# eax:edx with the expected value)
32#
33cmpxchg8b_emu:
34 pushfl
35 cli
36
37 cmpl (%esi), %eax
38 jne not_same
39 cmpl 4(%esi), %edx
40 jne half_same
41
42 movl %ebx, (%esi)
43 movl %ecx, 4(%esi)
44
45 popfl
46 ret
47
48 not_same:
49 movl (%esi), %eax
50 half_same:
51 movl 4(%esi), %edx
52
53 popfl
54 ret
55
56CFI_ENDPROC
57ENDPROC(cmpxchg8b_emu)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 52e62e57fedd..b22d13b0c71d 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -266,7 +266,7 @@ void pcibios_set_master(struct pci_dev *dev)
266 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 266 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
267} 267}
268 268
269static struct vm_operations_struct pci_mmap_ops = { 269static const struct vm_operations_struct pci_mmap_ops = {
270 .access = generic_access_phys, 270 .access = generic_access_phys,
271}; 271};
272 272
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index b53225d2cac3..e133ce25e290 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -100,7 +100,7 @@ static int xen_array_release(struct inode *inode, struct file *file)
100 return 0; 100 return 0;
101} 101}
102 102
103static struct file_operations u32_array_fops = { 103static const struct file_operations u32_array_fops = {
104 .owner = THIS_MODULE, 104 .owner = THIS_MODULE,
105 .open = u32_array_open, 105 .open = u32_array_open,
106 .release= xen_array_release, 106 .release= xen_array_release,
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5633b86e3ed1..7c1c59ea9ec6 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1161,7 +1161,13 @@ int acpi_check_resource_conflict(struct resource *res)
1161 res_list_elem->name, 1161 res_list_elem->name,
1162 (long long) res_list_elem->start, 1162 (long long) res_list_elem->start,
1163 (long long) res_list_elem->end); 1163 (long long) res_list_elem->end);
1164 printk(KERN_INFO "ACPI: Device needs an ACPI driver\n"); 1164 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1165 printk(KERN_NOTICE "ACPI: This conflict may"
1166 " cause random problems and system"
1167 " instability\n");
1168 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1169 " for this device, you should use it instead of"
1170 " the native driver\n");
1165 } 1171 }
1166 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1172 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1167 return -EBUSY; 1173 return -EBUSY;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cc61a6220102..bbd066e7f854 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1166,7 +1166,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1166#ifdef CONFIG_ACPI_PROCFS 1166#ifdef CONFIG_ACPI_PROCFS
1167 struct proc_dir_entry *entry = NULL; 1167 struct proc_dir_entry *entry = NULL;
1168#endif 1168#endif
1169 unsigned int i;
1170 1169
1171 if (boot_option_idle_override) 1170 if (boot_option_idle_override)
1172 return 0; 1171 return 0;
@@ -1214,13 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1214 acpi_processor_setup_cpuidle(pr); 1213 acpi_processor_setup_cpuidle(pr);
1215 if (cpuidle_register_device(&pr->power.dev)) 1214 if (cpuidle_register_device(&pr->power.dev))
1216 return -EIO; 1215 return -EIO;
1217
1218 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1219 for (i = 1; i <= pr->power.count; i++)
1220 if (pr->power.states[i].valid)
1221 printk(" C%d[C%d]", i,
1222 pr->power.states[i].type);
1223 printk(")\n");
1224 } 1216 }
1225#ifdef CONFIG_ACPI_PROCFS 1217#ifdef CONFIG_ACPI_PROCFS
1226 /* 'power' [R] */ 1218 /* 'power' [R] */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a4fddb24476f..f6e54bf8dd96 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
285 struct file *file); 285 struct file *file);
286static ssize_t acpi_video_device_write_brightness(struct file *file, 286static ssize_t acpi_video_device_write_brightness(struct file *file,
287 const char __user *buffer, size_t count, loff_t *data); 287 const char __user *buffer, size_t count, loff_t *data);
288static struct file_operations acpi_video_device_brightness_fops = { 288static const struct file_operations acpi_video_device_brightness_fops = {
289 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
290 .open = acpi_video_device_brightness_open_fs, 290 .open = acpi_video_device_brightness_open_fs,
291 .read = seq_read, 291 .read = seq_read,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 703364b52170..66e181345b3a 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1306,14 +1306,6 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1306 return; 1306 return;
1307} 1307}
1308 1308
1309/********** Set socket options for a VC **********/
1310
1311// int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
1312
1313/********** Set socket options for a VC **********/
1314
1315// int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
1316
1317/********** Send **********/ 1309/********** Send **********/
1318 1310
1319static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { 1311static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 5503bfc8e132..0c3026145443 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2031,7 +2031,7 @@ static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname,
2031 2031
2032 2032
2033static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, 2033static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname,
2034 void __user *optval,int optlen) 2034 void __user *optval,unsigned int optlen)
2035{ 2035{
2036 return -EINVAL; 2036 return -EINVAL;
2037} 2037}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b119640e1ee9..cd5049af47a9 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1244,7 +1244,7 @@ static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
1244 1244
1245 1245
1246static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname, 1246static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
1247 void __user *optval,int optlen) 1247 void __user *optval,unsigned int optlen)
1248{ 1248{
1249 func_enter (); 1249 func_enter ();
1250 func_exit (); 1250 func_exit ();
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 10f000dbe448..f766cc46b4c4 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1795,7 +1795,7 @@ fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *op
1795 1795
1796 1796
1797static int 1797static int
1798fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1798fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1799{ 1799{
1800 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1800 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1801 1801
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29e66d603d3c..70667033a568 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -921,9 +921,9 @@ out_free_rbpq_base:
921 he_dev->rbrq_phys); 921 he_dev->rbrq_phys);
922 i = CONFIG_RBPL_SIZE; 922 i = CONFIG_RBPL_SIZE;
923out_free_rbpl_virt: 923out_free_rbpl_virt:
924 while (--i) 924 while (i--)
925 pci_pool_free(he_dev->rbps_pool, he_dev->rbpl_virt[i].virt, 925 pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
926 he_dev->rbps_base[i].phys); 926 he_dev->rbpl_base[i].phys);
927 kfree(he_dev->rbpl_virt); 927 kfree(he_dev->rbpl_virt);
928 928
929out_free_rbpl_base: 929out_free_rbpl_base:
@@ -933,11 +933,11 @@ out_free_rbpl_base:
933out_destroy_rbpl_pool: 933out_destroy_rbpl_pool:
934 pci_pool_destroy(he_dev->rbpl_pool); 934 pci_pool_destroy(he_dev->rbpl_pool);
935 935
936 i = CONFIG_RBPL_SIZE; 936 i = CONFIG_RBPS_SIZE;
937out_free_rbps_virt: 937out_free_rbps_virt:
938 while (--i) 938 while (i--)
939 pci_pool_free(he_dev->rbpl_pool, he_dev->rbps_virt[i].virt, 939 pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
940 he_dev->rbpl_base[i].phys); 940 he_dev->rbps_base[i].phys);
941 kfree(he_dev->rbps_virt); 941 kfree(he_dev->rbps_virt);
942 942
943out_free_rbps_base: 943out_free_rbps_base:
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 01ce241dbeae..4e49021e67ee 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2590,7 +2590,7 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2590} 2590}
2591 2591
2592static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, 2592static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2593 void *optval, int optlen) { 2593 void *optval, unsigned int optlen) {
2594 hrz_dev * dev = HRZ_DEV(atm_vcc->dev); 2594 hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2595 PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt"); 2595 PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt");
2596 switch (level) { 2596 switch (level) {
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 78c9736c3579..b2c1b37ab2e4 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2862,7 +2862,7 @@ static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2862} 2862}
2863 2863
2864static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname, 2864static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2865 void __user *optval, int optlen) 2865 void __user *optval, unsigned int optlen)
2866{ 2866{
2867 IF_EVENT(printk(">ia_setsockopt\n");) 2867 IF_EVENT(printk(">ia_setsockopt\n");)
2868 return -EINVAL; 2868 return -EINVAL;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 752b1ba81f7e..2e9635be048c 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1517,7 +1517,7 @@ static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
1517 1517
1518 1518
1519static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, 1519static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
1520 void __user *optval,int optlen) 1520 void __user *optval,unsigned int optlen)
1521{ 1521{
1522 return -EINVAL; 1522 return -EINVAL;
1523} 1523}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 24c3e21ab263..1ece0b47b581 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -426,7 +426,7 @@ out:
426 return err; 426 return err;
427} 427}
428 428
429static struct file_operations cciss_proc_fops = { 429static const struct file_operations cciss_proc_fops = {
430 .owner = THIS_MODULE, 430 .owner = THIS_MODULE,
431 .open = cciss_seq_open, 431 .open = cciss_seq_open,
432 .read = seq_read, 432 .read = seq_read,
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index d6f36c004d9b..870f12cfed93 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -131,7 +131,7 @@ struct agp_bridge_driver {
131struct agp_bridge_data { 131struct agp_bridge_data {
132 const struct agp_version *version; 132 const struct agp_version *version;
133 const struct agp_bridge_driver *driver; 133 const struct agp_bridge_driver *driver;
134 struct vm_operations_struct *vm_ops; 134 const struct vm_operations_struct *vm_ops;
135 void *previous_size; 135 void *previous_size;
136 void *current_size; 136 void *current_size;
137 void *dev_private_data; 137 void *dev_private_data;
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 5ea4da8e9954..dd84af4d4f7e 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -40,7 +40,7 @@ static struct aper_size_info_fixed alpha_core_agp_sizes[] =
40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ 40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
41}; 41};
42 42
43struct vm_operations_struct alpha_core_agp_vm_ops = { 43static const struct vm_operations_struct alpha_core_agp_vm_ops = {
44 .fault = alpha_core_agp_vm_fault, 44 .fault = alpha_core_agp_vm_fault,
45}; 45};
46 46
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index aaca40283be9..4f568cb9af3f 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp)
393 return as ? 0 : -ENOMEM; 393 return as ? 0 : -ENOMEM;
394} 394}
395 395
396static struct file_operations apm_bios_fops = { 396static const struct file_operations apm_bios_fops = {
397 .owner = THIS_MODULE, 397 .owner = THIS_MODULE,
398 .read = apm_read, 398 .read = apm_read,
399 .poll = apm_poll, 399 .poll = apm_poll,
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index e3dd24bff514..836d4f0a876f 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
217# define bfin_otp_ioctl NULL 217# define bfin_otp_ioctl NULL
218#endif 218#endif
219 219
220static struct file_operations bfin_otp_fops = { 220static const struct file_operations bfin_otp_fops = {
221 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
222 .unlocked_ioctl = bfin_otp_ioctl, 222 .unlocked_ioctl = bfin_otp_ioctl,
223 .read = bfin_otp_read, 223 .read = bfin_otp_read,
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index df5038bbcbc2..4254457d3911 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void)
3354 continue; 3354 continue;
3355 } 3355 }
3356#ifdef MODULE 3356#ifdef MODULE
3357 if (isparam && irq[i]) 3357 if (isparam && i < NR_CARDS && irq[i])
3358 cy_isa_irq = irq[i]; 3358 cy_isa_irq = irq[i];
3359 else 3359 else
3360#endif 3360#endif
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 00dd3de1be51..06aad0831c73 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -116,7 +116,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
116 if (!res) 116 if (!res)
117 return -ENOENT; 117 return -ENOENT;
118 118
119 mem = request_mem_region(res->start, res->end - res->start + 1, 119 mem = request_mem_region(res->start, resource_size(res),
120 pdev->name); 120 pdev->name);
121 if (mem == NULL) { 121 if (mem == NULL) {
122 ret = -EBUSY; 122 ret = -EBUSY;
@@ -124,7 +124,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
124 } 124 }
125 125
126 dev_set_drvdata(&pdev->dev, mem); 126 dev_set_drvdata(&pdev->dev, mem);
127 rng_base = ioremap(res->start, res->end - res->start + 1); 127 rng_base = ioremap(res->start, resource_size(res));
128 if (!rng_base) { 128 if (!rng_base) {
129 ret = -ENOMEM; 129 ret = -ENOMEM;
130 goto err_ioremap; 130 goto err_ioremap;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6c8b65d069e5..a074fceb67d3 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -301,7 +301,7 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
301} 301}
302#endif 302#endif
303 303
304static struct vm_operations_struct mmap_mem_ops = { 304static const struct vm_operations_struct mmap_mem_ops = {
305#ifdef CONFIG_HAVE_IOREMAP_PROT 305#ifdef CONFIG_HAVE_IOREMAP_PROT
306 .access = generic_access_phys 306 .access = generic_access_phys
307#endif 307#endif
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 30f095a8c2d4..1997270bb6f4 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -239,7 +239,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
239 return VM_FAULT_NOPAGE; 239 return VM_FAULT_NOPAGE;
240} 240}
241 241
242static struct vm_operations_struct mspec_vm_ops = { 242static const struct vm_operations_struct mspec_vm_ops = {
243 .open = mspec_open, 243 .open = mspec_open,
244 .close = mspec_close, 244 .close = mspec_close,
245 .fault = mspec_fault, 245 .fault = mspec_fault,
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 53761cefa915..e066c4fdf81b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -261,6 +261,9 @@ done:
261 return 0; 261 return 0;
262} 262}
263 263
264/* Traditional BSD devices */
265#ifdef CONFIG_LEGACY_PTYS
266
264static int pty_install(struct tty_driver *driver, struct tty_struct *tty) 267static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
265{ 268{
266 struct tty_struct *o_tty; 269 struct tty_struct *o_tty;
@@ -310,24 +313,6 @@ free_mem_out:
310 return -ENOMEM; 313 return -ENOMEM;
311} 314}
312 315
313
314static const struct tty_operations pty_ops = {
315 .install = pty_install,
316 .open = pty_open,
317 .close = pty_close,
318 .write = pty_write,
319 .write_room = pty_write_room,
320 .flush_buffer = pty_flush_buffer,
321 .chars_in_buffer = pty_chars_in_buffer,
322 .unthrottle = pty_unthrottle,
323 .set_termios = pty_set_termios,
324 .resize = pty_resize
325};
326
327/* Traditional BSD devices */
328#ifdef CONFIG_LEGACY_PTYS
329static struct tty_driver *pty_driver, *pty_slave_driver;
330
331static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file, 316static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
332 unsigned int cmd, unsigned long arg) 317 unsigned int cmd, unsigned long arg)
333{ 318{
@@ -341,7 +326,12 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
341static int legacy_count = CONFIG_LEGACY_PTY_COUNT; 326static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
342module_param(legacy_count, int, 0); 327module_param(legacy_count, int, 0);
343 328
344static const struct tty_operations pty_ops_bsd = { 329/*
330 * The master side of a pty can do TIOCSPTLCK and thus
331 * has pty_bsd_ioctl.
332 */
333static const struct tty_operations master_pty_ops_bsd = {
334 .install = pty_install,
345 .open = pty_open, 335 .open = pty_open,
346 .close = pty_close, 336 .close = pty_close,
347 .write = pty_write, 337 .write = pty_write,
@@ -354,8 +344,23 @@ static const struct tty_operations pty_ops_bsd = {
354 .resize = pty_resize 344 .resize = pty_resize
355}; 345};
356 346
347static const struct tty_operations slave_pty_ops_bsd = {
348 .install = pty_install,
349 .open = pty_open,
350 .close = pty_close,
351 .write = pty_write,
352 .write_room = pty_write_room,
353 .flush_buffer = pty_flush_buffer,
354 .chars_in_buffer = pty_chars_in_buffer,
355 .unthrottle = pty_unthrottle,
356 .set_termios = pty_set_termios,
357 .resize = pty_resize
358};
359
357static void __init legacy_pty_init(void) 360static void __init legacy_pty_init(void)
358{ 361{
362 struct tty_driver *pty_driver, *pty_slave_driver;
363
359 if (legacy_count <= 0) 364 if (legacy_count <= 0)
360 return; 365 return;
361 366
@@ -383,7 +388,7 @@ static void __init legacy_pty_init(void)
383 pty_driver->init_termios.c_ospeed = 38400; 388 pty_driver->init_termios.c_ospeed = 38400;
384 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW; 389 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
385 pty_driver->other = pty_slave_driver; 390 pty_driver->other = pty_slave_driver;
386 tty_set_operations(pty_driver, &pty_ops); 391 tty_set_operations(pty_driver, &master_pty_ops_bsd);
387 392
388 pty_slave_driver->owner = THIS_MODULE; 393 pty_slave_driver->owner = THIS_MODULE;
389 pty_slave_driver->driver_name = "pty_slave"; 394 pty_slave_driver->driver_name = "pty_slave";
@@ -399,7 +404,7 @@ static void __init legacy_pty_init(void)
399 pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS | 404 pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS |
400 TTY_DRIVER_REAL_RAW; 405 TTY_DRIVER_REAL_RAW;
401 pty_slave_driver->other = pty_driver; 406 pty_slave_driver->other = pty_driver;
402 tty_set_operations(pty_slave_driver, &pty_ops); 407 tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
403 408
404 if (tty_register_driver(pty_driver)) 409 if (tty_register_driver(pty_driver))
405 panic("Couldn't register pty driver"); 410 panic("Couldn't register pty driver");
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 5942a9d674c0..452370af95de 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
220 return 1; 220 return 1;
221 } 221 }
222 222
223 if ((long)info < (long)(&cy_port[0]) 223 if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
224 || (long)(&cy_port[NR_PORTS]) < (long)info) {
225 printk("Warning: cyclades_port out of range for (%s) in %s\n", 224 printk("Warning: cyclades_port out of range for (%s) in %s\n",
226 name, routine); 225 name, routine);
227 return 1; 226 return 1;
@@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
520 panic("TxInt on debug port!!!"); 519 panic("TxInt on debug port!!!");
521 } 520 }
522#endif 521#endif
523
524 info = &cy_port[channel];
525
526 /* validate the port number (as configured and open) */ 522 /* validate the port number (as configured and open) */
527 if ((channel < 0) || (NR_PORTS <= channel)) { 523 if ((channel < 0) || (NR_PORTS <= channel)) {
528 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 524 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
529 base_addr[CyTEOIR] = CyNOTRANS; 525 base_addr[CyTEOIR] = CyNOTRANS;
530 return IRQ_HANDLED; 526 return IRQ_HANDLED;
531 } 527 }
528 info = &cy_port[channel];
532 info->last_active = jiffies; 529 info->last_active = jiffies;
533 if (info->tty == 0) { 530 if (info->tty == 0) {
534 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 531 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index ea18a129b0b5..59499ee0fe6a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1389,7 +1389,7 @@ EXPORT_SYMBOL(tty_shutdown);
1389 * of ttys that the driver keeps. 1389 * of ttys that the driver keeps.
1390 * 1390 *
1391 * This method gets called from a work queue so that the driver private 1391 * This method gets called from a work queue so that the driver private
1392 * shutdown ops can sleep (needed for USB at least) 1392 * cleanup ops can sleep (needed for USB at least)
1393 */ 1393 */
1394static void release_one_tty(struct work_struct *work) 1394static void release_one_tty(struct work_struct *work)
1395{ 1395{
@@ -1397,10 +1397,9 @@ static void release_one_tty(struct work_struct *work)
1397 container_of(work, struct tty_struct, hangup_work); 1397 container_of(work, struct tty_struct, hangup_work);
1398 struct tty_driver *driver = tty->driver; 1398 struct tty_driver *driver = tty->driver;
1399 1399
1400 if (tty->ops->shutdown) 1400 if (tty->ops->cleanup)
1401 tty->ops->shutdown(tty); 1401 tty->ops->cleanup(tty);
1402 else 1402
1403 tty_shutdown(tty);
1404 tty->magic = 0; 1403 tty->magic = 0;
1405 tty_driver_kref_put(driver); 1404 tty_driver_kref_put(driver);
1406 module_put(driver->owner); 1405 module_put(driver->owner);
@@ -1415,6 +1414,12 @@ static void release_one_tty(struct work_struct *work)
1415static void queue_release_one_tty(struct kref *kref) 1414static void queue_release_one_tty(struct kref *kref)
1416{ 1415{
1417 struct tty_struct *tty = container_of(kref, struct tty_struct, kref); 1416 struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
1417
1418 if (tty->ops->shutdown)
1419 tty->ops->shutdown(tty);
1420 else
1421 tty_shutdown(tty);
1422
1418 /* The hangup queue is now free so we can reuse it rather than 1423 /* The hangup queue is now free so we can reuse it rather than
1419 waste a chunk of memory for each port */ 1424 waste a chunk of memory for each port */
1420 INIT_WORK(&tty->hangup_work, release_one_tty); 1425 INIT_WORK(&tty->hangup_work, release_one_tty);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 29c651ab0d78..6b36ee56e6fe 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
981 goto eperm; 981 goto eperm;
982 982
983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, 983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
984 sizeof(struct vt_setactivate))) 984 sizeof(struct vt_setactivate))) {
985 return -EFAULT; 985 ret = -EFAULT;
986 goto out;
987 }
986 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) 988 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
987 ret = -ENXIO; 989 ret = -ENXIO;
988 else { 990 else {
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f40ab699860f..4846d50199f3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
559 return status; 559 return status;
560} 560}
561 561
562static struct file_operations hwicap_fops = { 562static const struct file_operations hwicap_fops = {
563 .owner = THIS_MODULE, 563 .owner = THIS_MODULE,
564 .write = hwicap_write, 564 .write = hwicap_write,
565 .read = hwicap_read, 565 .read = hwicap_read,
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 4a1dfe1f4ba9..210338ea222f 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -78,18 +78,20 @@ void cn_queue_wrapper(struct work_struct *work)
78 struct cn_callback_entry *cbq = 78 struct cn_callback_entry *cbq =
79 container_of(work, struct cn_callback_entry, work); 79 container_of(work, struct cn_callback_entry, work);
80 struct cn_callback_data *d = &cbq->data; 80 struct cn_callback_data *d = &cbq->data;
81 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
82 struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
81 83
82 d->callback(d->callback_priv); 84 d->callback(msg, nsp);
83 85
84 d->destruct_data(d->ddata); 86 kfree_skb(d->skb);
85 d->ddata = NULL; 87 d->skb = NULL;
86 88
87 kfree(d->free); 89 kfree(d->free);
88} 90}
89 91
90static struct cn_callback_entry * 92static struct cn_callback_entry *
91cn_queue_alloc_callback_entry(char *name, struct cb_id *id, 93cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
92 void (*callback)(struct cn_msg *)) 94 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
93{ 95{
94 struct cn_callback_entry *cbq; 96 struct cn_callback_entry *cbq;
95 97
@@ -123,7 +125,7 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
123} 125}
124 126
125int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, 127int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
126 void (*callback)(struct cn_msg *)) 128 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
127{ 129{
128 struct cn_callback_entry *cbq, *__cbq; 130 struct cn_callback_entry *cbq, *__cbq;
129 int found = 0; 131 int found = 0;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 74f52af79563..f06024668f99 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -129,21 +129,19 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
129/* 129/*
130 * Callback helper - queues work and setup destructor for given data. 130 * Callback helper - queues work and setup destructor for given data.
131 */ 131 */
132static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) 132static int cn_call_callback(struct sk_buff *skb)
133{ 133{
134 struct cn_callback_entry *__cbq, *__new_cbq; 134 struct cn_callback_entry *__cbq, *__new_cbq;
135 struct cn_dev *dev = &cdev; 135 struct cn_dev *dev = &cdev;
136 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
136 int err = -ENODEV; 137 int err = -ENODEV;
137 138
138 spin_lock_bh(&dev->cbdev->queue_lock); 139 spin_lock_bh(&dev->cbdev->queue_lock);
139 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 140 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
140 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 141 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
141 if (likely(!work_pending(&__cbq->work) && 142 if (likely(!work_pending(&__cbq->work) &&
142 __cbq->data.ddata == NULL)) { 143 __cbq->data.skb == NULL)) {
143 __cbq->data.callback_priv = msg; 144 __cbq->data.skb = skb;
144
145 __cbq->data.ddata = data;
146 __cbq->data.destruct_data = destruct_data;
147 145
148 if (queue_cn_work(__cbq, &__cbq->work)) 146 if (queue_cn_work(__cbq, &__cbq->work))
149 err = 0; 147 err = 0;
@@ -156,10 +154,8 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
156 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 154 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
157 if (__new_cbq) { 155 if (__new_cbq) {
158 d = &__new_cbq->data; 156 d = &__new_cbq->data;
159 d->callback_priv = msg; 157 d->skb = skb;
160 d->callback = __cbq->data.callback; 158 d->callback = __cbq->data.callback;
161 d->ddata = data;
162 d->destruct_data = destruct_data;
163 d->free = __new_cbq; 159 d->free = __new_cbq;
164 160
165 __new_cbq->pdev = __cbq->pdev; 161 __new_cbq->pdev = __cbq->pdev;
@@ -191,7 +187,6 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
191 */ 187 */
192static void cn_rx_skb(struct sk_buff *__skb) 188static void cn_rx_skb(struct sk_buff *__skb)
193{ 189{
194 struct cn_msg *msg;
195 struct nlmsghdr *nlh; 190 struct nlmsghdr *nlh;
196 int err; 191 int err;
197 struct sk_buff *skb; 192 struct sk_buff *skb;
@@ -208,8 +203,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
208 return; 203 return;
209 } 204 }
210 205
211 msg = NLMSG_DATA(nlh); 206 err = cn_call_callback(skb);
212 err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
213 if (err < 0) 207 if (err < 0)
214 kfree_skb(skb); 208 kfree_skb(skb);
215 } 209 }
@@ -270,7 +264,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
270 * May sleep. 264 * May sleep.
271 */ 265 */
272int cn_add_callback(struct cb_id *id, char *name, 266int cn_add_callback(struct cb_id *id, char *name,
273 void (*callback)(struct cn_msg *)) 267 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
274{ 268{
275 int err; 269 int err;
276 struct cn_dev *dev = &cdev; 270 struct cn_dev *dev = &cdev;
@@ -352,7 +346,7 @@ static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
352 * 346 *
353 * Used for notification of a request's processing. 347 * Used for notification of a request's processing.
354 */ 348 */
355static void cn_callback(struct cn_msg *msg) 349static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
356{ 350{
357 struct cn_ctl_msg *ctl; 351 struct cn_ctl_msg *ctl;
358 struct cn_ctl_entry *ent; 352 struct cn_ctl_entry *ent;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bb11a429394a..662ed923d9eb 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file)
1487 return single_open(file, gpiolib_show, NULL); 1487 return single_open(file, gpiolib_show, NULL);
1488} 1488}
1489 1489
1490static struct file_operations gpiolib_operations = { 1490static const struct file_operations gpiolib_operations = {
1491 .open = gpiolib_open, 1491 .open = gpiolib_open,
1492 .read = seq_read, 1492 .read = seq_read,
1493 .llseek = seq_lseek, 1493 .llseek = seq_lseek,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ba728ad77f2a..8e7b0ebece0c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -482,6 +482,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
482 list_for_each_entry_safe(mode, t, &connector->user_modes, head) 482 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
483 drm_mode_remove(connector, mode); 483 drm_mode_remove(connector, mode);
484 484
485 kfree(connector->fb_helper_private);
485 mutex_lock(&dev->mode_config.mutex); 486 mutex_lock(&dev->mode_config.mutex);
486 drm_mode_object_put(dev, &connector->base); 487 drm_mode_object_put(dev, &connector->base);
487 list_del(&connector->head); 488 list_del(&connector->head);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index fe8697447f32..1fe4e1d344fd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h"
35 36
36static void drm_mode_validate_flag(struct drm_connector *connector, 37static void drm_mode_validate_flag(struct drm_connector *connector,
37 int flags) 38 int flags)
@@ -90,7 +91,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
90 list_for_each_entry_safe(mode, t, &connector->modes, head) 91 list_for_each_entry_safe(mode, t, &connector->modes, head)
91 mode->status = MODE_UNVERIFIED; 92 mode->status = MODE_UNVERIFIED;
92 93
93 connector->status = connector->funcs->detect(connector); 94 if (connector->force) {
95 if (connector->force == DRM_FORCE_ON)
96 connector->status = connector_status_connected;
97 else
98 connector->status = connector_status_disconnected;
99 if (connector->funcs->force)
100 connector->funcs->force(connector);
101 } else
102 connector->status = connector->funcs->detect(connector);
94 103
95 if (connector->status == connector_status_disconnected) { 104 if (connector->status == connector_status_disconnected) {
96 DRM_DEBUG_KMS("%s is disconnected\n", 105 DRM_DEBUG_KMS("%s is disconnected\n",
@@ -267,6 +276,65 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *con
267 return NULL; 276 return NULL;
268} 277}
269 278
279static bool drm_has_cmdline_mode(struct drm_connector *connector)
280{
281 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
282 struct drm_fb_helper_cmdline_mode *cmdline_mode;
283
284 if (!fb_help_conn)
285 return false;
286
287 cmdline_mode = &fb_help_conn->cmdline_mode;
288 return cmdline_mode->specified;
289}
290
291static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
292{
293 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
294 struct drm_fb_helper_cmdline_mode *cmdline_mode;
295 struct drm_display_mode *mode = NULL;
296
297 if (!fb_help_conn)
298 return mode;
299
300 cmdline_mode = &fb_help_conn->cmdline_mode;
301 if (cmdline_mode->specified == false)
302 return mode;
303
304 /* attempt to find a matching mode in the list of modes
305 * we have gotten so far, if not add a CVT mode that conforms
306 */
307 if (cmdline_mode->rb || cmdline_mode->margins)
308 goto create_mode;
309
310 list_for_each_entry(mode, &connector->modes, head) {
311 /* check width/height */
312 if (mode->hdisplay != cmdline_mode->xres ||
313 mode->vdisplay != cmdline_mode->yres)
314 continue;
315
316 if (cmdline_mode->refresh_specified) {
317 if (mode->vrefresh != cmdline_mode->refresh)
318 continue;
319 }
320
321 if (cmdline_mode->interlace) {
322 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
323 continue;
324 }
325 return mode;
326 }
327
328create_mode:
329 mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
330 cmdline_mode->yres,
331 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
332 cmdline_mode->rb, cmdline_mode->interlace,
333 cmdline_mode->margins);
334 list_add(&mode->head, &connector->modes);
335 return mode;
336}
337
270static bool drm_connector_enabled(struct drm_connector *connector, bool strict) 338static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
271{ 339{
272 bool enable; 340 bool enable;
@@ -317,10 +385,16 @@ static bool drm_target_preferred(struct drm_device *dev,
317 continue; 385 continue;
318 } 386 }
319 387
320 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 388 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
321 connector->base.id); 389 connector->base.id);
322 390
323 modes[i] = drm_has_preferred_mode(connector, width, height); 391 /* got for command line mode first */
392 modes[i] = drm_pick_cmdline_mode(connector, width, height);
393 if (!modes[i]) {
394 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
395 connector->base.id);
396 modes[i] = drm_has_preferred_mode(connector, width, height);
397 }
324 /* No preferred modes, pick one off the list */ 398 /* No preferred modes, pick one off the list */
325 if (!modes[i] && !list_empty(&connector->modes)) { 399 if (!modes[i] && !list_empty(&connector->modes)) {
326 list_for_each_entry(modes[i], &connector->modes, head) 400 list_for_each_entry(modes[i], &connector->modes, head)
@@ -369,6 +443,8 @@ static int drm_pick_crtcs(struct drm_device *dev,
369 my_score = 1; 443 my_score = 1;
370 if (connector->status == connector_status_connected) 444 if (connector->status == connector_status_connected)
371 my_score++; 445 my_score++;
446 if (drm_has_cmdline_mode(connector))
447 my_score++;
372 if (drm_has_preferred_mode(connector, width, height)) 448 if (drm_has_preferred_mode(connector, width, height))
373 my_score++; 449 my_score++;
374 450
@@ -943,6 +1019,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
943{ 1019{
944 int count = 0; 1020 int count = 0;
945 1021
1022 drm_fb_helper_parse_command_line(dev);
1023
946 count = drm_helper_probe_connector_modes(dev, 1024 count = drm_helper_probe_connector_modes(dev,
947 dev->mode_config.max_width, 1025 dev->mode_config.max_width,
948 dev->mode_config.max_height); 1026 dev->mode_config.max_height);
@@ -950,7 +1028,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
950 /* 1028 /*
951 * we shouldn't end up with no modes here. 1029 * we shouldn't end up with no modes here.
952 */ 1030 */
953 WARN(!count, "Connected connector with 0 modes\n"); 1031 WARN(!count, "No connectors reported connected with modes\n");
954 1032
955 drm_setup_crtcs(dev); 1033 drm_setup_crtcs(dev);
956 1034
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 90d76bacff17..3c0d2b3aed76 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -109,7 +109,9 @@ static struct edid_quirk {
109 109
110 110
111/* Valid EDID header has these bytes */ 111/* Valid EDID header has these bytes */
112static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 112static const u8 edid_header[] = {
113 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
114};
113 115
114/** 116/**
115 * edid_is_valid - sanity check EDID data 117 * edid_is_valid - sanity check EDID data
@@ -500,6 +502,19 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
500 } 502 }
501 return mode; 503 return mode;
502} 504}
505
506/*
507 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
508 * monitors fill with ascii space (0x20) instead.
509 */
510static int
511bad_std_timing(u8 a, u8 b)
512{
513 return (a == 0x00 && b == 0x00) ||
514 (a == 0x01 && b == 0x01) ||
515 (a == 0x20 && b == 0x20);
516}
517
503/** 518/**
504 * drm_mode_std - convert standard mode info (width, height, refresh) into mode 519 * drm_mode_std - convert standard mode info (width, height, refresh) into mode
505 * @t: standard timing params 520 * @t: standard timing params
@@ -513,6 +528,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
513 */ 528 */
514struct drm_display_mode *drm_mode_std(struct drm_device *dev, 529struct drm_display_mode *drm_mode_std(struct drm_device *dev,
515 struct std_timing *t, 530 struct std_timing *t,
531 int revision,
516 int timing_level) 532 int timing_level)
517{ 533{
518 struct drm_display_mode *mode; 534 struct drm_display_mode *mode;
@@ -523,14 +539,20 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
523 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) 539 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
524 >> EDID_TIMING_VFREQ_SHIFT; 540 >> EDID_TIMING_VFREQ_SHIFT;
525 541
542 if (bad_std_timing(t->hsize, t->vfreq_aspect))
543 return NULL;
544
526 /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ 545 /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
527 hsize = t->hsize * 8 + 248; 546 hsize = t->hsize * 8 + 248;
528 /* vrefresh_rate = vfreq + 60 */ 547 /* vrefresh_rate = vfreq + 60 */
529 vrefresh_rate = vfreq + 60; 548 vrefresh_rate = vfreq + 60;
530 /* the vdisplay is calculated based on the aspect ratio */ 549 /* the vdisplay is calculated based on the aspect ratio */
531 if (aspect_ratio == 0) 550 if (aspect_ratio == 0) {
532 vsize = (hsize * 10) / 16; 551 if (revision < 3)
533 else if (aspect_ratio == 1) 552 vsize = hsize;
553 else
554 vsize = (hsize * 10) / 16;
555 } else if (aspect_ratio == 1)
534 vsize = (hsize * 3) / 4; 556 vsize = (hsize * 3) / 4;
535 else if (aspect_ratio == 2) 557 else if (aspect_ratio == 2)
536 vsize = (hsize * 4) / 5; 558 vsize = (hsize * 4) / 5;
@@ -538,7 +560,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
538 vsize = (hsize * 9) / 16; 560 vsize = (hsize * 9) / 16;
539 /* HDTV hack */ 561 /* HDTV hack */
540 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { 562 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
541 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 563 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
564 false);
542 mode->hdisplay = 1366; 565 mode->hdisplay = 1366;
543 mode->vsync_start = mode->vsync_start - 1; 566 mode->vsync_start = mode->vsync_start - 1;
544 mode->vsync_end = mode->vsync_end - 1; 567 mode->vsync_end = mode->vsync_end - 1;
@@ -557,7 +580,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
557 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 580 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
558 break; 581 break;
559 case LEVEL_CVT: 582 case LEVEL_CVT:
560 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 583 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
584 false);
561 break; 585 break;
562 } 586 }
563 return mode; 587 return mode;
@@ -779,7 +803,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
779 continue; 803 continue;
780 804
781 newmode = drm_mode_std(dev, &edid->standard_timings[i], 805 newmode = drm_mode_std(dev, &edid->standard_timings[i],
782 timing_level); 806 edid->revision, timing_level);
783 if (newmode) { 807 if (newmode) {
784 drm_mode_probed_add(connector, newmode); 808 drm_mode_probed_add(connector, newmode);
785 modes++; 809 modes++;
@@ -829,13 +853,13 @@ static int add_detailed_info(struct drm_connector *connector,
829 case EDID_DETAIL_MONITOR_CPDATA: 853 case EDID_DETAIL_MONITOR_CPDATA:
830 break; 854 break;
831 case EDID_DETAIL_STD_MODES: 855 case EDID_DETAIL_STD_MODES:
832 /* Five modes per detailed section */ 856 for (j = 0; j < 6; i++) {
833 for (j = 0; j < 5; i++) {
834 struct std_timing *std; 857 struct std_timing *std;
835 struct drm_display_mode *newmode; 858 struct drm_display_mode *newmode;
836 859
837 std = &data->data.timings[j]; 860 std = &data->data.timings[j];
838 newmode = drm_mode_std(dev, std, 861 newmode = drm_mode_std(dev, std,
862 edid->revision,
839 timing_level); 863 timing_level);
840 if (newmode) { 864 if (newmode) {
841 drm_mode_probed_add(connector, newmode); 865 drm_mode_probed_add(connector, newmode);
@@ -964,7 +988,9 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
964 struct drm_display_mode *newmode; 988 struct drm_display_mode *newmode;
965 989
966 std = &data->data.timings[j]; 990 std = &data->data.timings[j];
967 newmode = drm_mode_std(dev, std, timing_level); 991 newmode = drm_mode_std(dev, std,
992 edid->revision,
993 timing_level);
968 if (newmode) { 994 if (newmode) {
969 drm_mode_probed_add(connector, newmode); 995 drm_mode_probed_add(connector, newmode);
970 modes++; 996 modes++;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2c4671314884..819ddcbfcce5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -40,6 +40,199 @@ MODULE_LICENSE("GPL and additional rights");
40 40
41static LIST_HEAD(kernel_fb_helper_list); 41static LIST_HEAD(kernel_fb_helper_list);
42 42
43int drm_fb_helper_add_connector(struct drm_connector *connector)
44{
45 connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
46 if (!connector->fb_helper_private)
47 return -ENOMEM;
48
49 return 0;
50}
51EXPORT_SYMBOL(drm_fb_helper_add_connector);
52
53static int my_atoi(const char *name)
54{
55 int val = 0;
56
57 for (;; name++) {
58 switch (*name) {
59 case '0' ... '9':
60 val = 10*val+(*name-'0');
61 break;
62 default:
63 return val;
64 }
65 }
66}
67
68/**
69 * drm_fb_helper_connector_parse_command_line - parse command line for connector
70 * @connector - connector to parse line for
71 * @mode_option - per connector mode option
72 *
73 * This parses the connector specific then generic command lines for
74 * modes and options to configure the connector.
75 *
76 * This uses the same parameters as the fb modedb.c, except for extra
77 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
78 *
79 * enable/enable Digital/disable bit at the end
80 */
81static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
82 const char *mode_option)
83{
84 const char *name;
85 unsigned int namelen;
86 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
87 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
88 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
89 int i;
90 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
91 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
92 struct drm_fb_helper_cmdline_mode *cmdline_mode;
93
94 if (!fb_help_conn)
95 return false;
96
97 cmdline_mode = &fb_help_conn->cmdline_mode;
98 if (!mode_option)
99 mode_option = fb_mode_option;
100
101 if (!mode_option) {
102 cmdline_mode->specified = false;
103 return false;
104 }
105
106 name = mode_option;
107 namelen = strlen(name);
108 for (i = namelen-1; i >= 0; i--) {
109 switch (name[i]) {
110 case '@':
111 namelen = i;
112 if (!refresh_specified && !bpp_specified &&
113 !yres_specified) {
114 refresh = my_atoi(&name[i+1]);
115 refresh_specified = 1;
116 if (cvt || rb)
117 cvt = 0;
118 } else
119 goto done;
120 break;
121 case '-':
122 namelen = i;
123 if (!bpp_specified && !yres_specified) {
124 bpp = my_atoi(&name[i+1]);
125 bpp_specified = 1;
126 if (cvt || rb)
127 cvt = 0;
128 } else
129 goto done;
130 break;
131 case 'x':
132 if (!yres_specified) {
133 yres = my_atoi(&name[i+1]);
134 yres_specified = 1;
135 } else
136 goto done;
137 case '0' ... '9':
138 break;
139 case 'M':
140 if (!yres_specified)
141 cvt = 1;
142 break;
143 case 'R':
144 if (!cvt)
145 rb = 1;
146 break;
147 case 'm':
148 if (!cvt)
149 margins = 1;
150 break;
151 case 'i':
152 if (!cvt)
153 interlace = 1;
154 break;
155 case 'e':
156 force = DRM_FORCE_ON;
157 break;
158 case 'D':
159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
161 force = DRM_FORCE_ON;
162 else
163 force = DRM_FORCE_ON_DIGITAL;
164 break;
165 case 'd':
166 force = DRM_FORCE_OFF;
167 break;
168 default:
169 goto done;
170 }
171 }
172 if (i < 0 && yres_specified) {
173 xres = my_atoi(name);
174 res_specified = 1;
175 }
176done:
177
178 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
179 drm_get_connector_name(connector), xres, yres,
180 (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
181 "", (margins) ? " with margins" : "", (interlace) ?
182 " interlaced" : "");
183
184 if (force) {
185 const char *s;
186 switch (force) {
187 case DRM_FORCE_OFF: s = "OFF"; break;
188 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
189 default:
190 case DRM_FORCE_ON: s = "ON"; break;
191 }
192
193 DRM_INFO("forcing %s connector %s\n",
194 drm_get_connector_name(connector), s);
195 connector->force = force;
196 }
197
198 if (res_specified) {
199 cmdline_mode->specified = true;
200 cmdline_mode->xres = xres;
201 cmdline_mode->yres = yres;
202 }
203
204 if (refresh_specified) {
205 cmdline_mode->refresh_specified = true;
206 cmdline_mode->refresh = refresh;
207 }
208
209 if (bpp_specified) {
210 cmdline_mode->bpp_specified = true;
211 cmdline_mode->bpp = bpp;
212 }
213 cmdline_mode->rb = rb ? true : false;
214 cmdline_mode->cvt = cvt ? true : false;
215 cmdline_mode->interlace = interlace ? true : false;
216
217 return true;
218}
219
220int drm_fb_helper_parse_command_line(struct drm_device *dev)
221{
222 struct drm_connector *connector;
223
224 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
225 char *option = NULL;
226
227 /* do something on return - turn off connector maybe */
228 if (fb_get_options(drm_get_connector_name(connector), &option))
229 continue;
230
231 drm_fb_helper_connector_parse_command_line(connector, option);
232 }
233 return 0;
234}
235
43bool drm_fb_helper_force_kernel_mode(void) 236bool drm_fb_helper_force_kernel_mode(void)
44{ 237{
45 int i = 0; 238 int i = 0;
@@ -87,6 +280,7 @@ void drm_fb_helper_restore(void)
87} 280}
88EXPORT_SYMBOL(drm_fb_helper_restore); 281EXPORT_SYMBOL(drm_fb_helper_restore);
89 282
283#ifdef CONFIG_MAGIC_SYSRQ
90static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 284static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
91{ 285{
92 drm_fb_helper_restore(); 286 drm_fb_helper_restore();
@@ -103,6 +297,7 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
103 .help_msg = "force-fb(V)", 297 .help_msg = "force-fb(V)",
104 .action_msg = "Restore framebuffer console", 298 .action_msg = "Restore framebuffer console",
105}; 299};
300#endif
106 301
107static void drm_fb_helper_on(struct fb_info *info) 302static void drm_fb_helper_on(struct fb_info *info)
108{ 303{
@@ -484,6 +679,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
484 uint32_t fb_height, 679 uint32_t fb_height,
485 uint32_t surface_width, 680 uint32_t surface_width,
486 uint32_t surface_height, 681 uint32_t surface_height,
682 uint32_t surface_depth,
683 uint32_t surface_bpp,
487 struct drm_framebuffer **fb_ptr)) 684 struct drm_framebuffer **fb_ptr))
488{ 685{
489 struct drm_crtc *crtc; 686 struct drm_crtc *crtc;
@@ -497,8 +694,43 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
497 struct drm_framebuffer *fb; 694 struct drm_framebuffer *fb;
498 struct drm_mode_set *modeset = NULL; 695 struct drm_mode_set *modeset = NULL;
499 struct drm_fb_helper *fb_helper; 696 struct drm_fb_helper *fb_helper;
697 uint32_t surface_depth = 24, surface_bpp = 32;
500 698
501 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 699 /* first up get a count of crtcs now in use and new min/maxes width/heights */
700 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
701 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
702
703 struct drm_fb_helper_cmdline_mode *cmdline_mode;
704
705 if (!fb_help_conn)
706 continue;
707
708 cmdline_mode = &fb_help_conn->cmdline_mode;
709
710 if (cmdline_mode->bpp_specified) {
711 switch (cmdline_mode->bpp) {
712 case 8:
713 surface_depth = surface_bpp = 8;
714 break;
715 case 15:
716 surface_depth = 15;
717 surface_bpp = 16;
718 break;
719 case 16:
720 surface_depth = surface_bpp = 16;
721 break;
722 case 24:
723 surface_depth = surface_bpp = 24;
724 break;
725 case 32:
726 surface_depth = 24;
727 surface_bpp = 32;
728 break;
729 }
730 break;
731 }
732 }
733
502 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 734 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
503 if (drm_helper_crtc_in_use(crtc)) { 735 if (drm_helper_crtc_in_use(crtc)) {
504 if (crtc->desired_mode) { 736 if (crtc->desired_mode) {
@@ -527,7 +759,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
527 /* do we have an fb already? */ 759 /* do we have an fb already? */
528 if (list_empty(&dev->mode_config.fb_kernel_list)) { 760 if (list_empty(&dev->mode_config.fb_kernel_list)) {
529 ret = (*fb_create)(dev, fb_width, fb_height, surface_width, 761 ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
530 surface_height, &fb); 762 surface_height, surface_depth, surface_bpp,
763 &fb);
531 if (ret) 764 if (ret)
532 return -EINVAL; 765 return -EINVAL;
533 new_fb = 1; 766 new_fb = 1;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 49404ce1666e..51f677215f1d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
88#define HV_FACTOR 1000 88#define HV_FACTOR 1000
89struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, 89struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
90 int vdisplay, int vrefresh, 90 int vdisplay, int vrefresh,
91 bool reduced, bool interlaced) 91 bool reduced, bool interlaced, bool margins)
92{ 92{
93 /* 1) top/bottom margin size (% of height) - default: 1.8, */ 93 /* 1) top/bottom margin size (% of height) - default: 1.8, */
94#define CVT_MARGIN_PERCENTAGE 18 94#define CVT_MARGIN_PERCENTAGE 18
@@ -101,7 +101,6 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
101 /* Pixel Clock step (kHz) */ 101 /* Pixel Clock step (kHz) */
102#define CVT_CLOCK_STEP 250 102#define CVT_CLOCK_STEP 250
103 struct drm_display_mode *drm_mode; 103 struct drm_display_mode *drm_mode;
104 bool margins = false;
105 unsigned int vfieldrate, hperiod; 104 unsigned int vfieldrate, hperiod;
106 int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; 105 int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
107 int interlace; 106 int interlace;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 7e1fbe5d4779..4ac900f4647f 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369} 369}
370 370
371/** AGP virtual memory operations */ 371/** AGP virtual memory operations */
372static struct vm_operations_struct drm_vm_ops = { 372static const struct vm_operations_struct drm_vm_ops = {
373 .fault = drm_vm_fault, 373 .fault = drm_vm_fault,
374 .open = drm_vm_open, 374 .open = drm_vm_open,
375 .close = drm_vm_close, 375 .close = drm_vm_close,
376}; 376};
377 377
378/** Shared virtual memory operations */ 378/** Shared virtual memory operations */
379static struct vm_operations_struct drm_vm_shm_ops = { 379static const struct vm_operations_struct drm_vm_shm_ops = {
380 .fault = drm_vm_shm_fault, 380 .fault = drm_vm_shm_fault,
381 .open = drm_vm_open, 381 .open = drm_vm_open,
382 .close = drm_vm_shm_close, 382 .close = drm_vm_shm_close,
383}; 383};
384 384
385/** DMA virtual memory operations */ 385/** DMA virtual memory operations */
386static struct vm_operations_struct drm_vm_dma_ops = { 386static const struct vm_operations_struct drm_vm_dma_ops = {
387 .fault = drm_vm_dma_fault, 387 .fault = drm_vm_dma_fault,
388 .open = drm_vm_open, 388 .open = drm_vm_open,
389 .close = drm_vm_close, 389 .close = drm_vm_close,
390}; 390};
391 391
392/** Scatter-gather virtual memory operations */ 392/** Scatter-gather virtual memory operations */
393static struct vm_operations_struct drm_vm_sg_ops = { 393static const struct vm_operations_struct drm_vm_sg_ops = {
394 .fault = drm_vm_sg_fault, 394 .fault = drm_vm_sg_fault,
395 .open = drm_vm_open, 395 .open = drm_vm_open,
396 .close = drm_vm_close, 396 .close = drm_vm_close,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7ba4a232a97f..e85d7e9eed7d 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -110,6 +110,7 @@ EXPORT_SYMBOL(intelfb_resize);
110static int intelfb_create(struct drm_device *dev, uint32_t fb_width, 110static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
111 uint32_t fb_height, uint32_t surface_width, 111 uint32_t fb_height, uint32_t surface_width,
112 uint32_t surface_height, 112 uint32_t surface_height,
113 uint32_t surface_depth, uint32_t surface_bpp,
113 struct drm_framebuffer **fb_p) 114 struct drm_framebuffer **fb_p)
114{ 115{
115 struct fb_info *info; 116 struct fb_info *info;
@@ -125,9 +126,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
125 mode_cmd.width = surface_width; 126 mode_cmd.width = surface_width;
126 mode_cmd.height = surface_height; 127 mode_cmd.height = surface_height;
127 128
128 mode_cmd.bpp = 32; 129 mode_cmd.bpp = surface_bpp;
129 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 130 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
130 mode_cmd.depth = 24; 131 mode_cmd.depth = surface_depth;
131 132
132 size = mode_cmd.pitch * mode_cmd.height; 133 size = mode_cmd.pitch * mode_cmd.height;
133 size = ALIGN(size, PAGE_SIZE); 134 size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644
index 000000000000..403eb3a5891f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -0,0 +1,3 @@
1mkregtable
2*_reg_safe.h
3
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index e2b92c445bab..d4e6e6e4a938 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -57,13 +57,4 @@
57#define VGA_RENDER_CONTROL 0x0300 57#define VGA_RENDER_CONTROL 0x0300
58#define VGA_VSTATUS_CNTL_MASK 0x00030000 58#define VGA_VSTATUS_CNTL_MASK 0x00030000
59 59
60/* AVIVO disable VGA rendering */
61static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev)
62{
63 u32 vga_render;
64 vga_render = RREG32(VGA_RENDER_CONTROL);
65 vga_render &= ~VGA_VSTATUS_CNTL_MASK;
66 WREG32(VGA_RENDER_CONTROL, vga_render);
67}
68
69#endif 60#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index be51c5f7d0f6..e6cce24de802 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
863void r100_cs_dump_packet(struct radeon_cs_parser *p, 863void r100_cs_dump_packet(struct radeon_cs_parser *p,
864 struct radeon_cs_packet *pkt) 864 struct radeon_cs_packet *pkt)
865{ 865{
866 struct radeon_cs_chunk *ib_chunk;
867 volatile uint32_t *ib; 866 volatile uint32_t *ib;
868 unsigned i; 867 unsigned i;
869 unsigned idx; 868 unsigned idx;
870 869
871 ib = p->ib->ptr; 870 ib = p->ib->ptr;
872 ib_chunk = &p->chunks[p->chunk_ib_idx];
873 idx = pkt->idx; 871 idx = pkt->idx;
874 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 872 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
875 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 873 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
896 idx, ib_chunk->length_dw); 894 idx, ib_chunk->length_dw);
897 return -EINVAL; 895 return -EINVAL;
898 } 896 }
899 header = ib_chunk->kdata[idx]; 897 header = radeon_get_ib_value(p, idx);
900 pkt->idx = idx; 898 pkt->idx = idx;
901 pkt->type = CP_PACKET_GET_TYPE(header); 899 pkt->type = CP_PACKET_GET_TYPE(header);
902 pkt->count = CP_PACKET_GET_COUNT(header); 900 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
939 */ 937 */
940int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 938int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
941{ 939{
942 struct radeon_cs_chunk *ib_chunk;
943 struct drm_mode_object *obj; 940 struct drm_mode_object *obj;
944 struct drm_crtc *crtc; 941 struct drm_crtc *crtc;
945 struct radeon_crtc *radeon_crtc; 942 struct radeon_crtc *radeon_crtc;
@@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
947 int crtc_id; 944 int crtc_id;
948 int r; 945 int r;
949 uint32_t header, h_idx, reg; 946 uint32_t header, h_idx, reg;
947 volatile uint32_t *ib;
950 948
951 ib_chunk = &p->chunks[p->chunk_ib_idx]; 949 ib = p->ib->ptr;
952 950
953 /* parse the wait until */ 951 /* parse the wait until */
954 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 952 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -963,24 +961,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
963 return r; 961 return r;
964 } 962 }
965 963
966 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { 964 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
967 DRM_ERROR("vline wait had illegal wait until\n"); 965 DRM_ERROR("vline wait had illegal wait until\n");
968 r = -EINVAL; 966 r = -EINVAL;
969 return r; 967 return r;
970 } 968 }
971 969
972 /* jump over the NOP */ 970 /* jump over the NOP */
973 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 971 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
974 if (r) 972 if (r)
975 return r; 973 return r;
976 974
977 h_idx = p->idx - 2; 975 h_idx = p->idx - 2;
978 p->idx += waitreloc.count; 976 p->idx += waitreloc.count + 2;
979 p->idx += p3reloc.count; 977 p->idx += p3reloc.count + 2;
980 978
981 header = ib_chunk->kdata[h_idx]; 979 header = radeon_get_ib_value(p, h_idx);
982 crtc_id = ib_chunk->kdata[h_idx + 5]; 980 crtc_id = radeon_get_ib_value(p, h_idx + 5);
983 reg = ib_chunk->kdata[h_idx] >> 2; 981 reg = header >> 2;
984 mutex_lock(&p->rdev->ddev->mode_config.mutex); 982 mutex_lock(&p->rdev->ddev->mode_config.mutex);
985 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 983 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
986 if (!obj) { 984 if (!obj) {
@@ -994,16 +992,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
994 992
995 if (!crtc->enabled) { 993 if (!crtc->enabled) {
996 /* if the CRTC isn't enabled - we need to nop out the wait until */ 994 /* if the CRTC isn't enabled - we need to nop out the wait until */
997 ib_chunk->kdata[h_idx + 2] = PACKET2(0); 995 ib[h_idx + 2] = PACKET2(0);
998 ib_chunk->kdata[h_idx + 3] = PACKET2(0); 996 ib[h_idx + 3] = PACKET2(0);
999 } else if (crtc_id == 1) { 997 } else if (crtc_id == 1) {
1000 switch (reg) { 998 switch (reg) {
1001 case AVIVO_D1MODE_VLINE_START_END: 999 case AVIVO_D1MODE_VLINE_START_END:
1002 header &= R300_CP_PACKET0_REG_MASK; 1000 header &= ~R300_CP_PACKET0_REG_MASK;
1003 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1001 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1004 break; 1002 break;
1005 case RADEON_CRTC_GUI_TRIG_VLINE: 1003 case RADEON_CRTC_GUI_TRIG_VLINE:
1006 header &= R300_CP_PACKET0_REG_MASK; 1004 header &= ~R300_CP_PACKET0_REG_MASK;
1007 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1005 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1008 break; 1006 break;
1009 default: 1007 default:
@@ -1011,8 +1009,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1011 r = -EINVAL; 1009 r = -EINVAL;
1012 goto out; 1010 goto out;
1013 } 1011 }
1014 ib_chunk->kdata[h_idx] = header; 1012 ib[h_idx] = header;
1015 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1013 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1016 } 1014 }
1017out: 1015out:
1018 mutex_unlock(&p->rdev->ddev->mode_config.mutex); 1016 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@@ -1033,7 +1031,6 @@ out:
1033int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 1031int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1034 struct radeon_cs_reloc **cs_reloc) 1032 struct radeon_cs_reloc **cs_reloc)
1035{ 1033{
1036 struct radeon_cs_chunk *ib_chunk;
1037 struct radeon_cs_chunk *relocs_chunk; 1034 struct radeon_cs_chunk *relocs_chunk;
1038 struct radeon_cs_packet p3reloc; 1035 struct radeon_cs_packet p3reloc;
1039 unsigned idx; 1036 unsigned idx;
@@ -1044,7 +1041,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1044 return -EINVAL; 1041 return -EINVAL;
1045 } 1042 }
1046 *cs_reloc = NULL; 1043 *cs_reloc = NULL;
1047 ib_chunk = &p->chunks[p->chunk_ib_idx];
1048 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 1044 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1049 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 1045 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1050 if (r) { 1046 if (r) {
@@ -1057,7 +1053,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1057 r100_cs_dump_packet(p, &p3reloc); 1053 r100_cs_dump_packet(p, &p3reloc);
1058 return -EINVAL; 1054 return -EINVAL;
1059 } 1055 }
1060 idx = ib_chunk->kdata[p3reloc.idx + 1]; 1056 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1061 if (idx >= relocs_chunk->length_dw) { 1057 if (idx >= relocs_chunk->length_dw) {
1062 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 1058 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1063 idx, relocs_chunk->length_dw); 1059 idx, relocs_chunk->length_dw);
@@ -1126,7 +1122,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1126 struct radeon_cs_packet *pkt, 1122 struct radeon_cs_packet *pkt,
1127 unsigned idx, unsigned reg) 1123 unsigned idx, unsigned reg)
1128{ 1124{
1129 struct radeon_cs_chunk *ib_chunk;
1130 struct radeon_cs_reloc *reloc; 1125 struct radeon_cs_reloc *reloc;
1131 struct r100_cs_track *track; 1126 struct r100_cs_track *track;
1132 volatile uint32_t *ib; 1127 volatile uint32_t *ib;
@@ -1134,11 +1129,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1134 int r; 1129 int r;
1135 int i, face; 1130 int i, face;
1136 u32 tile_flags = 0; 1131 u32 tile_flags = 0;
1132 u32 idx_value;
1137 1133
1138 ib = p->ib->ptr; 1134 ib = p->ib->ptr;
1139 ib_chunk = &p->chunks[p->chunk_ib_idx];
1140 track = (struct r100_cs_track *)p->track; 1135 track = (struct r100_cs_track *)p->track;
1141 1136
1137 idx_value = radeon_get_ib_value(p, idx);
1138
1142 switch (reg) { 1139 switch (reg) {
1143 case RADEON_CRTC_GUI_TRIG_VLINE: 1140 case RADEON_CRTC_GUI_TRIG_VLINE:
1144 r = r100_cs_packet_parse_vline(p); 1141 r = r100_cs_packet_parse_vline(p);
@@ -1166,8 +1163,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1166 return r; 1163 return r;
1167 } 1164 }
1168 track->zb.robj = reloc->robj; 1165 track->zb.robj = reloc->robj;
1169 track->zb.offset = ib_chunk->kdata[idx]; 1166 track->zb.offset = idx_value;
1170 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1167 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1171 break; 1168 break;
1172 case RADEON_RB3D_COLOROFFSET: 1169 case RADEON_RB3D_COLOROFFSET:
1173 r = r100_cs_packet_next_reloc(p, &reloc); 1170 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1178,8 +1175,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1178 return r; 1175 return r;
1179 } 1176 }
1180 track->cb[0].robj = reloc->robj; 1177 track->cb[0].robj = reloc->robj;
1181 track->cb[0].offset = ib_chunk->kdata[idx]; 1178 track->cb[0].offset = idx_value;
1182 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1179 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1183 break; 1180 break;
1184 case RADEON_PP_TXOFFSET_0: 1181 case RADEON_PP_TXOFFSET_0:
1185 case RADEON_PP_TXOFFSET_1: 1182 case RADEON_PP_TXOFFSET_1:
@@ -1192,7 +1189,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1192 r100_cs_dump_packet(p, pkt); 1189 r100_cs_dump_packet(p, pkt);
1193 return r; 1190 return r;
1194 } 1191 }
1195 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1192 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1196 track->textures[i].robj = reloc->robj; 1193 track->textures[i].robj = reloc->robj;
1197 break; 1194 break;
1198 case RADEON_PP_CUBIC_OFFSET_T0_0: 1195 case RADEON_PP_CUBIC_OFFSET_T0_0:
@@ -1208,8 +1205,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1208 r100_cs_dump_packet(p, pkt); 1205 r100_cs_dump_packet(p, pkt);
1209 return r; 1206 return r;
1210 } 1207 }
1211 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; 1208 track->textures[0].cube_info[i].offset = idx_value;
1212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1209 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1213 track->textures[0].cube_info[i].robj = reloc->robj; 1210 track->textures[0].cube_info[i].robj = reloc->robj;
1214 break; 1211 break;
1215 case RADEON_PP_CUBIC_OFFSET_T1_0: 1212 case RADEON_PP_CUBIC_OFFSET_T1_0:
@@ -1225,8 +1222,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1225 r100_cs_dump_packet(p, pkt); 1222 r100_cs_dump_packet(p, pkt);
1226 return r; 1223 return r;
1227 } 1224 }
1228 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; 1225 track->textures[1].cube_info[i].offset = idx_value;
1229 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1226 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1230 track->textures[1].cube_info[i].robj = reloc->robj; 1227 track->textures[1].cube_info[i].robj = reloc->robj;
1231 break; 1228 break;
1232 case RADEON_PP_CUBIC_OFFSET_T2_0: 1229 case RADEON_PP_CUBIC_OFFSET_T2_0:
@@ -1242,12 +1239,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1242 r100_cs_dump_packet(p, pkt); 1239 r100_cs_dump_packet(p, pkt);
1243 return r; 1240 return r;
1244 } 1241 }
1245 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; 1242 track->textures[2].cube_info[i].offset = idx_value;
1246 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1243 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1247 track->textures[2].cube_info[i].robj = reloc->robj; 1244 track->textures[2].cube_info[i].robj = reloc->robj;
1248 break; 1245 break;
1249 case RADEON_RE_WIDTH_HEIGHT: 1246 case RADEON_RE_WIDTH_HEIGHT:
1250 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 1247 track->maxy = ((idx_value >> 16) & 0x7FF);
1251 break; 1248 break;
1252 case RADEON_RB3D_COLORPITCH: 1249 case RADEON_RB3D_COLORPITCH:
1253 r = r100_cs_packet_next_reloc(p, &reloc); 1250 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1263,17 +1260,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1263 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1260 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1264 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1261 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1265 1262
1266 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 1263 tmp = idx_value & ~(0x7 << 16);
1267 tmp |= tile_flags; 1264 tmp |= tile_flags;
1268 ib[idx] = tmp; 1265 ib[idx] = tmp;
1269 1266
1270 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 1267 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1271 break; 1268 break;
1272 case RADEON_RB3D_DEPTHPITCH: 1269 case RADEON_RB3D_DEPTHPITCH:
1273 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 1270 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1274 break; 1271 break;
1275 case RADEON_RB3D_CNTL: 1272 case RADEON_RB3D_CNTL:
1276 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1273 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1277 case 7: 1274 case 7:
1278 case 8: 1275 case 8:
1279 case 9: 1276 case 9:
@@ -1291,13 +1288,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1291 break; 1288 break;
1292 default: 1289 default:
1293 DRM_ERROR("Invalid color buffer format (%d) !\n", 1290 DRM_ERROR("Invalid color buffer format (%d) !\n",
1294 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1291 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1295 return -EINVAL; 1292 return -EINVAL;
1296 } 1293 }
1297 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 1294 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1298 break; 1295 break;
1299 case RADEON_RB3D_ZSTENCILCNTL: 1296 case RADEON_RB3D_ZSTENCILCNTL:
1300 switch (ib_chunk->kdata[idx] & 0xf) { 1297 switch (idx_value & 0xf) {
1301 case 0: 1298 case 0:
1302 track->zb.cpp = 2; 1299 track->zb.cpp = 2;
1303 break; 1300 break;
@@ -1321,44 +1318,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1321 r100_cs_dump_packet(p, pkt); 1318 r100_cs_dump_packet(p, pkt);
1322 return r; 1319 return r;
1323 } 1320 }
1324 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1321 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1325 break; 1322 break;
1326 case RADEON_PP_CNTL: 1323 case RADEON_PP_CNTL:
1327 { 1324 {
1328 uint32_t temp = ib_chunk->kdata[idx] >> 4; 1325 uint32_t temp = idx_value >> 4;
1329 for (i = 0; i < track->num_texture; i++) 1326 for (i = 0; i < track->num_texture; i++)
1330 track->textures[i].enabled = !!(temp & (1 << i)); 1327 track->textures[i].enabled = !!(temp & (1 << i));
1331 } 1328 }
1332 break; 1329 break;
1333 case RADEON_SE_VF_CNTL: 1330 case RADEON_SE_VF_CNTL:
1334 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1331 track->vap_vf_cntl = idx_value;
1335 break; 1332 break;
1336 case RADEON_SE_VTX_FMT: 1333 case RADEON_SE_VTX_FMT:
1337 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); 1334 track->vtx_size = r100_get_vtx_size(idx_value);
1338 break; 1335 break;
1339 case RADEON_PP_TEX_SIZE_0: 1336 case RADEON_PP_TEX_SIZE_0:
1340 case RADEON_PP_TEX_SIZE_1: 1337 case RADEON_PP_TEX_SIZE_1:
1341 case RADEON_PP_TEX_SIZE_2: 1338 case RADEON_PP_TEX_SIZE_2:
1342 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1339 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1343 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 1340 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1344 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1341 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1345 break; 1342 break;
1346 case RADEON_PP_TEX_PITCH_0: 1343 case RADEON_PP_TEX_PITCH_0:
1347 case RADEON_PP_TEX_PITCH_1: 1344 case RADEON_PP_TEX_PITCH_1:
1348 case RADEON_PP_TEX_PITCH_2: 1345 case RADEON_PP_TEX_PITCH_2:
1349 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1346 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1350 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 1347 track->textures[i].pitch = idx_value + 32;
1351 break; 1348 break;
1352 case RADEON_PP_TXFILTER_0: 1349 case RADEON_PP_TXFILTER_0:
1353 case RADEON_PP_TXFILTER_1: 1350 case RADEON_PP_TXFILTER_1:
1354 case RADEON_PP_TXFILTER_2: 1351 case RADEON_PP_TXFILTER_2:
1355 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1352 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1356 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) 1353 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1357 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1354 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1358 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 1355 tmp = (idx_value >> 23) & 0x7;
1359 if (tmp == 2 || tmp == 6) 1356 if (tmp == 2 || tmp == 6)
1360 track->textures[i].roundup_w = false; 1357 track->textures[i].roundup_w = false;
1361 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 1358 tmp = (idx_value >> 27) & 0x7;
1362 if (tmp == 2 || tmp == 6) 1359 if (tmp == 2 || tmp == 6)
1363 track->textures[i].roundup_h = false; 1360 track->textures[i].roundup_h = false;
1364 break; 1361 break;
@@ -1366,16 +1363,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1366 case RADEON_PP_TXFORMAT_1: 1363 case RADEON_PP_TXFORMAT_1:
1367 case RADEON_PP_TXFORMAT_2: 1364 case RADEON_PP_TXFORMAT_2:
1368 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1365 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1369 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { 1366 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1370 track->textures[i].use_pitch = 1; 1367 track->textures[i].use_pitch = 1;
1371 } else { 1368 } else {
1372 track->textures[i].use_pitch = 0; 1369 track->textures[i].use_pitch = 0;
1373 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1370 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1374 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1371 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1375 } 1372 }
1376 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1373 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1377 track->textures[i].tex_coord_type = 2; 1374 track->textures[i].tex_coord_type = 2;
1378 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 1375 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1379 case RADEON_TXFORMAT_I8: 1376 case RADEON_TXFORMAT_I8:
1380 case RADEON_TXFORMAT_RGB332: 1377 case RADEON_TXFORMAT_RGB332:
1381 case RADEON_TXFORMAT_Y8: 1378 case RADEON_TXFORMAT_Y8:
@@ -1402,13 +1399,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1402 track->textures[i].cpp = 4; 1399 track->textures[i].cpp = 4;
1403 break; 1400 break;
1404 } 1401 }
1405 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 1402 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1406 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 1403 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1407 break; 1404 break;
1408 case RADEON_PP_CUBIC_FACES_0: 1405 case RADEON_PP_CUBIC_FACES_0:
1409 case RADEON_PP_CUBIC_FACES_1: 1406 case RADEON_PP_CUBIC_FACES_1:
1410 case RADEON_PP_CUBIC_FACES_2: 1407 case RADEON_PP_CUBIC_FACES_2:
1411 tmp = ib_chunk->kdata[idx]; 1408 tmp = idx_value;
1412 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1409 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1413 for (face = 0; face < 4; face++) { 1410 for (face = 0; face < 4; face++) {
1414 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1411 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -1427,15 +1424,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1427 struct radeon_cs_packet *pkt, 1424 struct radeon_cs_packet *pkt,
1428 struct radeon_object *robj) 1425 struct radeon_object *robj)
1429{ 1426{
1430 struct radeon_cs_chunk *ib_chunk;
1431 unsigned idx; 1427 unsigned idx;
1432 1428 u32 value;
1433 ib_chunk = &p->chunks[p->chunk_ib_idx];
1434 idx = pkt->idx + 1; 1429 idx = pkt->idx + 1;
1435 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { 1430 value = radeon_get_ib_value(p, idx + 2);
1431 if ((value + 1) > radeon_object_size(robj)) {
1436 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1432 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1437 "(need %u have %lu) !\n", 1433 "(need %u have %lu) !\n",
1438 ib_chunk->kdata[idx+2] + 1, 1434 value + 1,
1439 radeon_object_size(robj)); 1435 radeon_object_size(robj));
1440 return -EINVAL; 1436 return -EINVAL;
1441 } 1437 }
@@ -1445,59 +1441,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1445static int r100_packet3_check(struct radeon_cs_parser *p, 1441static int r100_packet3_check(struct radeon_cs_parser *p,
1446 struct radeon_cs_packet *pkt) 1442 struct radeon_cs_packet *pkt)
1447{ 1443{
1448 struct radeon_cs_chunk *ib_chunk;
1449 struct radeon_cs_reloc *reloc; 1444 struct radeon_cs_reloc *reloc;
1450 struct r100_cs_track *track; 1445 struct r100_cs_track *track;
1451 unsigned idx; 1446 unsigned idx;
1452 unsigned i, c;
1453 volatile uint32_t *ib; 1447 volatile uint32_t *ib;
1454 int r; 1448 int r;
1455 1449
1456 ib = p->ib->ptr; 1450 ib = p->ib->ptr;
1457 ib_chunk = &p->chunks[p->chunk_ib_idx];
1458 idx = pkt->idx + 1; 1451 idx = pkt->idx + 1;
1459 track = (struct r100_cs_track *)p->track; 1452 track = (struct r100_cs_track *)p->track;
1460 switch (pkt->opcode) { 1453 switch (pkt->opcode) {
1461 case PACKET3_3D_LOAD_VBPNTR: 1454 case PACKET3_3D_LOAD_VBPNTR:
1462 c = ib_chunk->kdata[idx++]; 1455 r = r100_packet3_load_vbpntr(p, pkt, idx);
1463 track->num_arrays = c; 1456 if (r)
1464 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1457 return r;
1465 r = r100_cs_packet_next_reloc(p, &reloc);
1466 if (r) {
1467 DRM_ERROR("No reloc for packet3 %d\n",
1468 pkt->opcode);
1469 r100_cs_dump_packet(p, pkt);
1470 return r;
1471 }
1472 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1473 track->arrays[i + 0].robj = reloc->robj;
1474 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1475 track->arrays[i + 0].esize &= 0x7F;
1476 r = r100_cs_packet_next_reloc(p, &reloc);
1477 if (r) {
1478 DRM_ERROR("No reloc for packet3 %d\n",
1479 pkt->opcode);
1480 r100_cs_dump_packet(p, pkt);
1481 return r;
1482 }
1483 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1484 track->arrays[i + 1].robj = reloc->robj;
1485 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1486 track->arrays[i + 1].esize &= 0x7F;
1487 }
1488 if (c & 1) {
1489 r = r100_cs_packet_next_reloc(p, &reloc);
1490 if (r) {
1491 DRM_ERROR("No reloc for packet3 %d\n",
1492 pkt->opcode);
1493 r100_cs_dump_packet(p, pkt);
1494 return r;
1495 }
1496 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1497 track->arrays[i + 0].robj = reloc->robj;
1498 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1499 track->arrays[i + 0].esize &= 0x7F;
1500 }
1501 break; 1458 break;
1502 case PACKET3_INDX_BUFFER: 1459 case PACKET3_INDX_BUFFER:
1503 r = r100_cs_packet_next_reloc(p, &reloc); 1460 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1506,7 +1463,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1506 r100_cs_dump_packet(p, pkt); 1463 r100_cs_dump_packet(p, pkt);
1507 return r; 1464 return r;
1508 } 1465 }
1509 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1466 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1510 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1467 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1511 if (r) { 1468 if (r) {
1512 return r; 1469 return r;
@@ -1520,27 +1477,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1520 r100_cs_dump_packet(p, pkt); 1477 r100_cs_dump_packet(p, pkt);
1521 return r; 1478 return r;
1522 } 1479 }
1523 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1480 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1524 track->num_arrays = 1; 1481 track->num_arrays = 1;
1525 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); 1482 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1526 1483
1527 track->arrays[0].robj = reloc->robj; 1484 track->arrays[0].robj = reloc->robj;
1528 track->arrays[0].esize = track->vtx_size; 1485 track->arrays[0].esize = track->vtx_size;
1529 1486
1530 track->max_indx = ib_chunk->kdata[idx+1]; 1487 track->max_indx = radeon_get_ib_value(p, idx+1);
1531 1488
1532 track->vap_vf_cntl = ib_chunk->kdata[idx+3]; 1489 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1533 track->immd_dwords = pkt->count - 1; 1490 track->immd_dwords = pkt->count - 1;
1534 r = r100_cs_track_check(p->rdev, track); 1491 r = r100_cs_track_check(p->rdev, track);
1535 if (r) 1492 if (r)
1536 return r; 1493 return r;
1537 break; 1494 break;
1538 case PACKET3_3D_DRAW_IMMD: 1495 case PACKET3_3D_DRAW_IMMD:
1539 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1496 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1540 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1497 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1541 return -EINVAL; 1498 return -EINVAL;
1542 } 1499 }
1543 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1500 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1544 track->immd_dwords = pkt->count - 1; 1501 track->immd_dwords = pkt->count - 1;
1545 r = r100_cs_track_check(p->rdev, track); 1502 r = r100_cs_track_check(p->rdev, track);
1546 if (r) 1503 if (r)
@@ -1548,11 +1505,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1548 break; 1505 break;
1549 /* triggers drawing using in-packet vertex data */ 1506 /* triggers drawing using in-packet vertex data */
1550 case PACKET3_3D_DRAW_IMMD_2: 1507 case PACKET3_3D_DRAW_IMMD_2:
1551 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1508 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1552 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1509 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1553 return -EINVAL; 1510 return -EINVAL;
1554 } 1511 }
1555 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1512 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1556 track->immd_dwords = pkt->count; 1513 track->immd_dwords = pkt->count;
1557 r = r100_cs_track_check(p->rdev, track); 1514 r = r100_cs_track_check(p->rdev, track);
1558 if (r) 1515 if (r)
@@ -1560,28 +1517,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1560 break; 1517 break;
1561 /* triggers drawing using in-packet vertex data */ 1518 /* triggers drawing using in-packet vertex data */
1562 case PACKET3_3D_DRAW_VBUF_2: 1519 case PACKET3_3D_DRAW_VBUF_2:
1563 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1520 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1564 r = r100_cs_track_check(p->rdev, track); 1521 r = r100_cs_track_check(p->rdev, track);
1565 if (r) 1522 if (r)
1566 return r; 1523 return r;
1567 break; 1524 break;
1568 /* triggers drawing of vertex buffers setup elsewhere */ 1525 /* triggers drawing of vertex buffers setup elsewhere */
1569 case PACKET3_3D_DRAW_INDX_2: 1526 case PACKET3_3D_DRAW_INDX_2:
1570 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1527 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1571 r = r100_cs_track_check(p->rdev, track); 1528 r = r100_cs_track_check(p->rdev, track);
1572 if (r) 1529 if (r)
1573 return r; 1530 return r;
1574 break; 1531 break;
1575 /* triggers drawing using indices to vertex buffer */ 1532 /* triggers drawing using indices to vertex buffer */
1576 case PACKET3_3D_DRAW_VBUF: 1533 case PACKET3_3D_DRAW_VBUF:
1577 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1534 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1578 r = r100_cs_track_check(p->rdev, track); 1535 r = r100_cs_track_check(p->rdev, track);
1579 if (r) 1536 if (r)
1580 return r; 1537 return r;
1581 break; 1538 break;
1582 /* triggers drawing of vertex buffers setup elsewhere */ 1539 /* triggers drawing of vertex buffers setup elsewhere */
1583 case PACKET3_3D_DRAW_INDX: 1540 case PACKET3_3D_DRAW_INDX:
1584 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1541 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1585 r = r100_cs_track_check(p->rdev, track); 1542 r = r100_cs_track_check(p->rdev, track);
1586 if (r) 1543 if (r)
1587 return r; 1544 return r;
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 70a82eda394a..0daf0d76a891 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
84 struct radeon_cs_packet *pkt, 84 struct radeon_cs_packet *pkt,
85 unsigned idx, unsigned reg); 85 unsigned idx, unsigned reg);
86 86
87
88
87static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 89static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
88 struct radeon_cs_packet *pkt, 90 struct radeon_cs_packet *pkt,
89 unsigned idx, 91 unsigned idx,
@@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
93 u32 tile_flags = 0; 95 u32 tile_flags = 0;
94 u32 tmp; 96 u32 tmp;
95 struct radeon_cs_reloc *reloc; 97 struct radeon_cs_reloc *reloc;
96 struct radeon_cs_chunk *ib_chunk; 98 u32 value;
97
98 ib_chunk = &p->chunks[p->chunk_ib_idx];
99 99
100 r = r100_cs_packet_next_reloc(p, &reloc); 100 r = r100_cs_packet_next_reloc(p, &reloc);
101 if (r) { 101 if (r) {
@@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
104 r100_cs_dump_packet(p, pkt); 104 r100_cs_dump_packet(p, pkt);
105 return r; 105 return r;
106 } 106 }
107 tmp = ib_chunk->kdata[idx] & 0x003fffff; 107 value = radeon_get_ib_value(p, idx);
108 tmp = value & 0x003fffff;
108 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 109 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
109 110
110 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 111 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
@@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
119 } 120 }
120 121
121 tmp |= tile_flags; 122 tmp |= tile_flags;
122 p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; 123 p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
123 return 0; 124 return 0;
124} 125}
126
127static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
128 struct radeon_cs_packet *pkt,
129 int idx)
130{
131 unsigned c, i;
132 struct radeon_cs_reloc *reloc;
133 struct r100_cs_track *track;
134 int r = 0;
135 volatile uint32_t *ib;
136 u32 idx_value;
137
138 ib = p->ib->ptr;
139 track = (struct r100_cs_track *)p->track;
140 c = radeon_get_ib_value(p, idx++) & 0x1F;
141 track->num_arrays = c;
142 for (i = 0; i < (c - 1); i+=2, idx+=3) {
143 r = r100_cs_packet_next_reloc(p, &reloc);
144 if (r) {
145 DRM_ERROR("No reloc for packet3 %d\n",
146 pkt->opcode);
147 r100_cs_dump_packet(p, pkt);
148 return r;
149 }
150 idx_value = radeon_get_ib_value(p, idx);
151 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
152
153 track->arrays[i + 0].esize = idx_value >> 8;
154 track->arrays[i + 0].robj = reloc->robj;
155 track->arrays[i + 0].esize &= 0x7F;
156 r = r100_cs_packet_next_reloc(p, &reloc);
157 if (r) {
158 DRM_ERROR("No reloc for packet3 %d\n",
159 pkt->opcode);
160 r100_cs_dump_packet(p, pkt);
161 return r;
162 }
163 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
164 track->arrays[i + 1].robj = reloc->robj;
165 track->arrays[i + 1].esize = idx_value >> 24;
166 track->arrays[i + 1].esize &= 0x7F;
167 }
168 if (c & 1) {
169 r = r100_cs_packet_next_reloc(p, &reloc);
170 if (r) {
171 DRM_ERROR("No reloc for packet3 %d\n",
172 pkt->opcode);
173 r100_cs_dump_packet(p, pkt);
174 return r;
175 }
176 idx_value = radeon_get_ib_value(p, idx);
177 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
178 track->arrays[i + 0].robj = reloc->robj;
179 track->arrays[i + 0].esize = idx_value >> 8;
180 track->arrays[i + 0].esize &= 0x7F;
181 }
182 return r;
183}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 568c74bfba3d..cf7fea5ff2e5 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
96 struct radeon_cs_packet *pkt, 96 struct radeon_cs_packet *pkt,
97 unsigned idx, unsigned reg) 97 unsigned idx, unsigned reg)
98{ 98{
99 struct radeon_cs_chunk *ib_chunk;
100 struct radeon_cs_reloc *reloc; 99 struct radeon_cs_reloc *reloc;
101 struct r100_cs_track *track; 100 struct r100_cs_track *track;
102 volatile uint32_t *ib; 101 volatile uint32_t *ib;
@@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
105 int i; 104 int i;
106 int face; 105 int face;
107 u32 tile_flags = 0; 106 u32 tile_flags = 0;
107 u32 idx_value;
108 108
109 ib = p->ib->ptr; 109 ib = p->ib->ptr;
110 ib_chunk = &p->chunks[p->chunk_ib_idx];
111 track = (struct r100_cs_track *)p->track; 110 track = (struct r100_cs_track *)p->track;
112 111 idx_value = radeon_get_ib_value(p, idx);
113 switch (reg) { 112 switch (reg) {
114 case RADEON_CRTC_GUI_TRIG_VLINE: 113 case RADEON_CRTC_GUI_TRIG_VLINE:
115 r = r100_cs_packet_parse_vline(p); 114 r = r100_cs_packet_parse_vline(p);
@@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
137 return r; 136 return r;
138 } 137 }
139 track->zb.robj = reloc->robj; 138 track->zb.robj = reloc->robj;
140 track->zb.offset = ib_chunk->kdata[idx]; 139 track->zb.offset = idx_value;
141 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 140 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
142 break; 141 break;
143 case RADEON_RB3D_COLOROFFSET: 142 case RADEON_RB3D_COLOROFFSET:
144 r = r100_cs_packet_next_reloc(p, &reloc); 143 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
149 return r; 148 return r;
150 } 149 }
151 track->cb[0].robj = reloc->robj; 150 track->cb[0].robj = reloc->robj;
152 track->cb[0].offset = ib_chunk->kdata[idx]; 151 track->cb[0].offset = idx_value;
153 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 152 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
154 break; 153 break;
155 case R200_PP_TXOFFSET_0: 154 case R200_PP_TXOFFSET_0:
156 case R200_PP_TXOFFSET_1: 155 case R200_PP_TXOFFSET_1:
@@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
166 r100_cs_dump_packet(p, pkt); 165 r100_cs_dump_packet(p, pkt);
167 return r; 166 return r;
168 } 167 }
169 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 168 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
170 track->textures[i].robj = reloc->robj; 169 track->textures[i].robj = reloc->robj;
171 break; 170 break;
172 case R200_PP_CUBIC_OFFSET_F1_0: 171 case R200_PP_CUBIC_OFFSET_F1_0:
@@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
208 r100_cs_dump_packet(p, pkt); 207 r100_cs_dump_packet(p, pkt);
209 return r; 208 return r;
210 } 209 }
211 track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx]; 210 track->textures[i].cube_info[face - 1].offset = idx_value;
212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 211 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
213 track->textures[i].cube_info[face - 1].robj = reloc->robj; 212 track->textures[i].cube_info[face - 1].robj = reloc->robj;
214 break; 213 break;
215 case RADEON_RE_WIDTH_HEIGHT: 214 case RADEON_RE_WIDTH_HEIGHT:
216 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 215 track->maxy = ((idx_value >> 16) & 0x7FF);
217 break; 216 break;
218 case RADEON_RB3D_COLORPITCH: 217 case RADEON_RB3D_COLORPITCH:
219 r = r100_cs_packet_next_reloc(p, &reloc); 218 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
229 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 228 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
230 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 229 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
231 230
232 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 231 tmp = idx_value & ~(0x7 << 16);
233 tmp |= tile_flags; 232 tmp |= tile_flags;
234 ib[idx] = tmp; 233 ib[idx] = tmp;
235 234
236 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 235 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
237 break; 236 break;
238 case RADEON_RB3D_DEPTHPITCH: 237 case RADEON_RB3D_DEPTHPITCH:
239 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 238 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
240 break; 239 break;
241 case RADEON_RB3D_CNTL: 240 case RADEON_RB3D_CNTL:
242 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 241 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
243 case 7: 242 case 7:
244 case 8: 243 case 8:
245 case 9: 244 case 9:
@@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 break; 256 break;
258 default: 257 default:
259 DRM_ERROR("Invalid color buffer format (%d) !\n", 258 DRM_ERROR("Invalid color buffer format (%d) !\n",
260 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 259 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
261 return -EINVAL; 260 return -EINVAL;
262 } 261 }
263 if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) { 262 if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
264 DRM_ERROR("No support for depth xy offset in kms\n"); 263 DRM_ERROR("No support for depth xy offset in kms\n");
265 return -EINVAL; 264 return -EINVAL;
266 } 265 }
267 266
268 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 267 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
269 break; 268 break;
270 case RADEON_RB3D_ZSTENCILCNTL: 269 case RADEON_RB3D_ZSTENCILCNTL:
271 switch (ib_chunk->kdata[idx] & 0xf) { 270 switch (idx_value & 0xf) {
272 case 0: 271 case 0:
273 track->zb.cpp = 2; 272 track->zb.cpp = 2;
274 break; 273 break;
@@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p,
292 r100_cs_dump_packet(p, pkt); 291 r100_cs_dump_packet(p, pkt);
293 return r; 292 return r;
294 } 293 }
295 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 294 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
296 break; 295 break;
297 case RADEON_PP_CNTL: 296 case RADEON_PP_CNTL:
298 { 297 {
299 uint32_t temp = ib_chunk->kdata[idx] >> 4; 298 uint32_t temp = idx_value >> 4;
300 for (i = 0; i < track->num_texture; i++) 299 for (i = 0; i < track->num_texture; i++)
301 track->textures[i].enabled = !!(temp & (1 << i)); 300 track->textures[i].enabled = !!(temp & (1 << i));
302 } 301 }
303 break; 302 break;
304 case RADEON_SE_VF_CNTL: 303 case RADEON_SE_VF_CNTL:
305 track->vap_vf_cntl = ib_chunk->kdata[idx]; 304 track->vap_vf_cntl = idx_value;
306 break; 305 break;
307 case 0x210c: 306 case 0x210c:
308 /* VAP_VF_MAX_VTX_INDX */ 307 /* VAP_VF_MAX_VTX_INDX */
309 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; 308 track->max_indx = idx_value & 0x00FFFFFFUL;
310 break; 309 break;
311 case R200_SE_VTX_FMT_0: 310 case R200_SE_VTX_FMT_0:
312 track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]); 311 track->vtx_size = r200_get_vtx_size_0(idx_value);
313 break; 312 break;
314 case R200_SE_VTX_FMT_1: 313 case R200_SE_VTX_FMT_1:
315 track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]); 314 track->vtx_size += r200_get_vtx_size_1(idx_value);
316 break; 315 break;
317 case R200_PP_TXSIZE_0: 316 case R200_PP_TXSIZE_0:
318 case R200_PP_TXSIZE_1: 317 case R200_PP_TXSIZE_1:
@@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
321 case R200_PP_TXSIZE_4: 320 case R200_PP_TXSIZE_4:
322 case R200_PP_TXSIZE_5: 321 case R200_PP_TXSIZE_5:
323 i = (reg - R200_PP_TXSIZE_0) / 32; 322 i = (reg - R200_PP_TXSIZE_0) / 32;
324 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 323 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
325 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 324 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
326 break; 325 break;
327 case R200_PP_TXPITCH_0: 326 case R200_PP_TXPITCH_0:
328 case R200_PP_TXPITCH_1: 327 case R200_PP_TXPITCH_1:
@@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
331 case R200_PP_TXPITCH_4: 330 case R200_PP_TXPITCH_4:
332 case R200_PP_TXPITCH_5: 331 case R200_PP_TXPITCH_5:
333 i = (reg - R200_PP_TXPITCH_0) / 32; 332 i = (reg - R200_PP_TXPITCH_0) / 32;
334 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 333 track->textures[i].pitch = idx_value + 32;
335 break; 334 break;
336 case R200_PP_TXFILTER_0: 335 case R200_PP_TXFILTER_0:
337 case R200_PP_TXFILTER_1: 336 case R200_PP_TXFILTER_1:
@@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
340 case R200_PP_TXFILTER_4: 339 case R200_PP_TXFILTER_4:
341 case R200_PP_TXFILTER_5: 340 case R200_PP_TXFILTER_5:
342 i = (reg - R200_PP_TXFILTER_0) / 32; 341 i = (reg - R200_PP_TXFILTER_0) / 32;
343 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK) 342 track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
344 >> R200_MAX_MIP_LEVEL_SHIFT); 343 >> R200_MAX_MIP_LEVEL_SHIFT);
345 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 344 tmp = (idx_value >> 23) & 0x7;
346 if (tmp == 2 || tmp == 6) 345 if (tmp == 2 || tmp == 6)
347 track->textures[i].roundup_w = false; 346 track->textures[i].roundup_w = false;
348 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 347 tmp = (idx_value >> 27) & 0x7;
349 if (tmp == 2 || tmp == 6) 348 if (tmp == 2 || tmp == 6)
350 track->textures[i].roundup_h = false; 349 track->textures[i].roundup_h = false;
351 break; 350 break;
@@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
364 case R200_PP_TXFORMAT_X_4: 363 case R200_PP_TXFORMAT_X_4:
365 case R200_PP_TXFORMAT_X_5: 364 case R200_PP_TXFORMAT_X_5:
366 i = (reg - R200_PP_TXFORMAT_X_0) / 32; 365 i = (reg - R200_PP_TXFORMAT_X_0) / 32;
367 track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7; 366 track->textures[i].txdepth = idx_value & 0x7;
368 tmp = (ib_chunk->kdata[idx] >> 16) & 0x3; 367 tmp = (idx_value >> 16) & 0x3;
369 /* 2D, 3D, CUBE */ 368 /* 2D, 3D, CUBE */
370 switch (tmp) { 369 switch (tmp) {
371 case 0: 370 case 0:
@@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
389 case R200_PP_TXFORMAT_4: 388 case R200_PP_TXFORMAT_4:
390 case R200_PP_TXFORMAT_5: 389 case R200_PP_TXFORMAT_5:
391 i = (reg - R200_PP_TXFORMAT_0) / 32; 390 i = (reg - R200_PP_TXFORMAT_0) / 32;
392 if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) { 391 if (idx_value & R200_TXFORMAT_NON_POWER2) {
393 track->textures[i].use_pitch = 1; 392 track->textures[i].use_pitch = 1;
394 } else { 393 } else {
395 track->textures[i].use_pitch = 0; 394 track->textures[i].use_pitch = 0;
396 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 395 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
397 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 396 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
398 } 397 }
399 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 398 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
400 case R200_TXFORMAT_I8: 399 case R200_TXFORMAT_I8:
401 case R200_TXFORMAT_RGB332: 400 case R200_TXFORMAT_RGB332:
402 case R200_TXFORMAT_Y8: 401 case R200_TXFORMAT_Y8:
@@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
424 track->textures[i].cpp = 4; 423 track->textures[i].cpp = 4;
425 break; 424 break;
426 } 425 }
427 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 426 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
428 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 427 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
429 break; 428 break;
430 case R200_PP_CUBIC_FACES_0: 429 case R200_PP_CUBIC_FACES_0:
431 case R200_PP_CUBIC_FACES_1: 430 case R200_PP_CUBIC_FACES_1:
@@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
433 case R200_PP_CUBIC_FACES_3: 432 case R200_PP_CUBIC_FACES_3:
434 case R200_PP_CUBIC_FACES_4: 433 case R200_PP_CUBIC_FACES_4:
435 case R200_PP_CUBIC_FACES_5: 434 case R200_PP_CUBIC_FACES_5:
436 tmp = ib_chunk->kdata[idx]; 435 tmp = idx_value;
437 i = (reg - R200_PP_CUBIC_FACES_0) / 32; 436 i = (reg - R200_PP_CUBIC_FACES_0) / 32;
438 for (face = 0; face < 4; face++) { 437 for (face = 0; face < 4; face++) {
439 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 438 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index bb151ecdf8fc..1ebea8cc8c93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -697,17 +697,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
697 struct radeon_cs_packet *pkt, 697 struct radeon_cs_packet *pkt,
698 unsigned idx, unsigned reg) 698 unsigned idx, unsigned reg)
699{ 699{
700 struct radeon_cs_chunk *ib_chunk;
701 struct radeon_cs_reloc *reloc; 700 struct radeon_cs_reloc *reloc;
702 struct r100_cs_track *track; 701 struct r100_cs_track *track;
703 volatile uint32_t *ib; 702 volatile uint32_t *ib;
704 uint32_t tmp, tile_flags = 0; 703 uint32_t tmp, tile_flags = 0;
705 unsigned i; 704 unsigned i;
706 int r; 705 int r;
706 u32 idx_value;
707 707
708 ib = p->ib->ptr; 708 ib = p->ib->ptr;
709 ib_chunk = &p->chunks[p->chunk_ib_idx];
710 track = (struct r100_cs_track *)p->track; 709 track = (struct r100_cs_track *)p->track;
710 idx_value = radeon_get_ib_value(p, idx);
711
711 switch(reg) { 712 switch(reg) {
712 case AVIVO_D1MODE_VLINE_START_END: 713 case AVIVO_D1MODE_VLINE_START_END:
713 case RADEON_CRTC_GUI_TRIG_VLINE: 714 case RADEON_CRTC_GUI_TRIG_VLINE:
@@ -738,8 +739,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
738 return r; 739 return r;
739 } 740 }
740 track->cb[i].robj = reloc->robj; 741 track->cb[i].robj = reloc->robj;
741 track->cb[i].offset = ib_chunk->kdata[idx]; 742 track->cb[i].offset = idx_value;
742 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 743 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
743 break; 744 break;
744 case R300_ZB_DEPTHOFFSET: 745 case R300_ZB_DEPTHOFFSET:
745 r = r100_cs_packet_next_reloc(p, &reloc); 746 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -750,8 +751,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
750 return r; 751 return r;
751 } 752 }
752 track->zb.robj = reloc->robj; 753 track->zb.robj = reloc->robj;
753 track->zb.offset = ib_chunk->kdata[idx]; 754 track->zb.offset = idx_value;
754 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 755 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
755 break; 756 break;
756 case R300_TX_OFFSET_0: 757 case R300_TX_OFFSET_0:
757 case R300_TX_OFFSET_0+4: 758 case R300_TX_OFFSET_0+4:
@@ -777,32 +778,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
777 r100_cs_dump_packet(p, pkt); 778 r100_cs_dump_packet(p, pkt);
778 return r; 779 return r;
779 } 780 }
780 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 781 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
781 track->textures[i].robj = reloc->robj; 782 track->textures[i].robj = reloc->robj;
782 break; 783 break;
783 /* Tracked registers */ 784 /* Tracked registers */
784 case 0x2084: 785 case 0x2084:
785 /* VAP_VF_CNTL */ 786 /* VAP_VF_CNTL */
786 track->vap_vf_cntl = ib_chunk->kdata[idx]; 787 track->vap_vf_cntl = idx_value;
787 break; 788 break;
788 case 0x20B4: 789 case 0x20B4:
789 /* VAP_VTX_SIZE */ 790 /* VAP_VTX_SIZE */
790 track->vtx_size = ib_chunk->kdata[idx] & 0x7F; 791 track->vtx_size = idx_value & 0x7F;
791 break; 792 break;
792 case 0x2134: 793 case 0x2134:
793 /* VAP_VF_MAX_VTX_INDX */ 794 /* VAP_VF_MAX_VTX_INDX */
794 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; 795 track->max_indx = idx_value & 0x00FFFFFFUL;
795 break; 796 break;
796 case 0x43E4: 797 case 0x43E4:
797 /* SC_SCISSOR1 */ 798 /* SC_SCISSOR1 */
798 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; 799 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
799 if (p->rdev->family < CHIP_RV515) { 800 if (p->rdev->family < CHIP_RV515) {
800 track->maxy -= 1440; 801 track->maxy -= 1440;
801 } 802 }
802 break; 803 break;
803 case 0x4E00: 804 case 0x4E00:
804 /* RB3D_CCTL */ 805 /* RB3D_CCTL */
805 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1; 806 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
806 break; 807 break;
807 case 0x4E38: 808 case 0x4E38:
808 case 0x4E3C: 809 case 0x4E3C:
@@ -825,13 +826,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
825 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 826 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
826 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 827 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
827 828
828 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 829 tmp = idx_value & ~(0x7 << 16);
829 tmp |= tile_flags; 830 tmp |= tile_flags;
830 ib[idx] = tmp; 831 ib[idx] = tmp;
831 832
832 i = (reg - 0x4E38) >> 2; 833 i = (reg - 0x4E38) >> 2;
833 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; 834 track->cb[i].pitch = idx_value & 0x3FFE;
834 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { 835 switch (((idx_value >> 21) & 0xF)) {
835 case 9: 836 case 9:
836 case 11: 837 case 11:
837 case 12: 838 case 12:
@@ -854,13 +855,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
854 break; 855 break;
855 default: 856 default:
856 DRM_ERROR("Invalid color buffer format (%d) !\n", 857 DRM_ERROR("Invalid color buffer format (%d) !\n",
857 ((ib_chunk->kdata[idx] >> 21) & 0xF)); 858 ((idx_value >> 21) & 0xF));
858 return -EINVAL; 859 return -EINVAL;
859 } 860 }
860 break; 861 break;
861 case 0x4F00: 862 case 0x4F00:
862 /* ZB_CNTL */ 863 /* ZB_CNTL */
863 if (ib_chunk->kdata[idx] & 2) { 864 if (idx_value & 2) {
864 track->z_enabled = true; 865 track->z_enabled = true;
865 } else { 866 } else {
866 track->z_enabled = false; 867 track->z_enabled = false;
@@ -868,7 +869,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
868 break; 869 break;
869 case 0x4F10: 870 case 0x4F10:
870 /* ZB_FORMAT */ 871 /* ZB_FORMAT */
871 switch ((ib_chunk->kdata[idx] & 0xF)) { 872 switch ((idx_value & 0xF)) {
872 case 0: 873 case 0:
873 case 1: 874 case 1:
874 track->zb.cpp = 2; 875 track->zb.cpp = 2;
@@ -878,7 +879,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
878 break; 879 break;
879 default: 880 default:
880 DRM_ERROR("Invalid z buffer format (%d) !\n", 881 DRM_ERROR("Invalid z buffer format (%d) !\n",
881 (ib_chunk->kdata[idx] & 0xF)); 882 (idx_value & 0xF));
882 return -EINVAL; 883 return -EINVAL;
883 } 884 }
884 break; 885 break;
@@ -897,17 +898,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
897 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 898 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
898 tile_flags |= R300_DEPTHMICROTILE_TILED;; 899 tile_flags |= R300_DEPTHMICROTILE_TILED;;
899 900
900 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 901 tmp = idx_value & ~(0x7 << 16);
901 tmp |= tile_flags; 902 tmp |= tile_flags;
902 ib[idx] = tmp; 903 ib[idx] = tmp;
903 904
904 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 905 track->zb.pitch = idx_value & 0x3FFC;
905 break; 906 break;
906 case 0x4104: 907 case 0x4104:
907 for (i = 0; i < 16; i++) { 908 for (i = 0; i < 16; i++) {
908 bool enabled; 909 bool enabled;
909 910
910 enabled = !!(ib_chunk->kdata[idx] & (1 << i)); 911 enabled = !!(idx_value & (1 << i));
911 track->textures[i].enabled = enabled; 912 track->textures[i].enabled = enabled;
912 } 913 }
913 break; 914 break;
@@ -929,9 +930,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
929 case 0x44FC: 930 case 0x44FC:
930 /* TX_FORMAT1_[0-15] */ 931 /* TX_FORMAT1_[0-15] */
931 i = (reg - 0x44C0) >> 2; 932 i = (reg - 0x44C0) >> 2;
932 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; 933 tmp = (idx_value >> 25) & 0x3;
933 track->textures[i].tex_coord_type = tmp; 934 track->textures[i].tex_coord_type = tmp;
934 switch ((ib_chunk->kdata[idx] & 0x1F)) { 935 switch ((idx_value & 0x1F)) {
935 case R300_TX_FORMAT_X8: 936 case R300_TX_FORMAT_X8:
936 case R300_TX_FORMAT_Y4X4: 937 case R300_TX_FORMAT_Y4X4:
937 case R300_TX_FORMAT_Z3Y3X2: 938 case R300_TX_FORMAT_Z3Y3X2:
@@ -971,7 +972,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
971 break; 972 break;
972 default: 973 default:
973 DRM_ERROR("Invalid texture format %u\n", 974 DRM_ERROR("Invalid texture format %u\n",
974 (ib_chunk->kdata[idx] & 0x1F)); 975 (idx_value & 0x1F));
975 return -EINVAL; 976 return -EINVAL;
976 break; 977 break;
977 } 978 }
@@ -994,11 +995,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
994 case 0x443C: 995 case 0x443C:
995 /* TX_FILTER0_[0-15] */ 996 /* TX_FILTER0_[0-15] */
996 i = (reg - 0x4400) >> 2; 997 i = (reg - 0x4400) >> 2;
997 tmp = ib_chunk->kdata[idx] & 0x7; 998 tmp = idx_value & 0x7;
998 if (tmp == 2 || tmp == 4 || tmp == 6) { 999 if (tmp == 2 || tmp == 4 || tmp == 6) {
999 track->textures[i].roundup_w = false; 1000 track->textures[i].roundup_w = false;
1000 } 1001 }
1001 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; 1002 tmp = (idx_value >> 3) & 0x7;
1002 if (tmp == 2 || tmp == 4 || tmp == 6) { 1003 if (tmp == 2 || tmp == 4 || tmp == 6) {
1003 track->textures[i].roundup_h = false; 1004 track->textures[i].roundup_h = false;
1004 } 1005 }
@@ -1021,12 +1022,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1021 case 0x453C: 1022 case 0x453C:
1022 /* TX_FORMAT2_[0-15] */ 1023 /* TX_FORMAT2_[0-15] */
1023 i = (reg - 0x4500) >> 2; 1024 i = (reg - 0x4500) >> 2;
1024 tmp = ib_chunk->kdata[idx] & 0x3FFF; 1025 tmp = idx_value & 0x3FFF;
1025 track->textures[i].pitch = tmp + 1; 1026 track->textures[i].pitch = tmp + 1;
1026 if (p->rdev->family >= CHIP_RV515) { 1027 if (p->rdev->family >= CHIP_RV515) {
1027 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; 1028 tmp = ((idx_value >> 15) & 1) << 11;
1028 track->textures[i].width_11 = tmp; 1029 track->textures[i].width_11 = tmp;
1029 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; 1030 tmp = ((idx_value >> 16) & 1) << 11;
1030 track->textures[i].height_11 = tmp; 1031 track->textures[i].height_11 = tmp;
1031 } 1032 }
1032 break; 1033 break;
@@ -1048,15 +1049,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1048 case 0x44BC: 1049 case 0x44BC:
1049 /* TX_FORMAT0_[0-15] */ 1050 /* TX_FORMAT0_[0-15] */
1050 i = (reg - 0x4480) >> 2; 1051 i = (reg - 0x4480) >> 2;
1051 tmp = ib_chunk->kdata[idx] & 0x7FF; 1052 tmp = idx_value & 0x7FF;
1052 track->textures[i].width = tmp + 1; 1053 track->textures[i].width = tmp + 1;
1053 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; 1054 tmp = (idx_value >> 11) & 0x7FF;
1054 track->textures[i].height = tmp + 1; 1055 track->textures[i].height = tmp + 1;
1055 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; 1056 tmp = (idx_value >> 26) & 0xF;
1056 track->textures[i].num_levels = tmp; 1057 track->textures[i].num_levels = tmp;
1057 tmp = ib_chunk->kdata[idx] & (1 << 31); 1058 tmp = idx_value & (1 << 31);
1058 track->textures[i].use_pitch = !!tmp; 1059 track->textures[i].use_pitch = !!tmp;
1059 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; 1060 tmp = (idx_value >> 22) & 0xF;
1060 track->textures[i].txdepth = tmp; 1061 track->textures[i].txdepth = tmp;
1061 break; 1062 break;
1062 case R300_ZB_ZPASS_ADDR: 1063 case R300_ZB_ZPASS_ADDR:
@@ -1067,7 +1068,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1067 r100_cs_dump_packet(p, pkt); 1068 r100_cs_dump_packet(p, pkt);
1068 return r; 1069 return r;
1069 } 1070 }
1070 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1071 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1071 break; 1072 break;
1072 case 0x4be8: 1073 case 0x4be8:
1073 /* valid register only on RV530 */ 1074 /* valid register only on RV530 */
@@ -1085,60 +1086,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1085static int r300_packet3_check(struct radeon_cs_parser *p, 1086static int r300_packet3_check(struct radeon_cs_parser *p,
1086 struct radeon_cs_packet *pkt) 1087 struct radeon_cs_packet *pkt)
1087{ 1088{
1088 struct radeon_cs_chunk *ib_chunk;
1089
1090 struct radeon_cs_reloc *reloc; 1089 struct radeon_cs_reloc *reloc;
1091 struct r100_cs_track *track; 1090 struct r100_cs_track *track;
1092 volatile uint32_t *ib; 1091 volatile uint32_t *ib;
1093 unsigned idx; 1092 unsigned idx;
1094 unsigned i, c;
1095 int r; 1093 int r;
1096 1094
1097 ib = p->ib->ptr; 1095 ib = p->ib->ptr;
1098 ib_chunk = &p->chunks[p->chunk_ib_idx];
1099 idx = pkt->idx + 1; 1096 idx = pkt->idx + 1;
1100 track = (struct r100_cs_track *)p->track; 1097 track = (struct r100_cs_track *)p->track;
1101 switch(pkt->opcode) { 1098 switch(pkt->opcode) {
1102 case PACKET3_3D_LOAD_VBPNTR: 1099 case PACKET3_3D_LOAD_VBPNTR:
1103 c = ib_chunk->kdata[idx++] & 0x1F; 1100 r = r100_packet3_load_vbpntr(p, pkt, idx);
1104 track->num_arrays = c; 1101 if (r)
1105 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1102 return r;
1106 r = r100_cs_packet_next_reloc(p, &reloc);
1107 if (r) {
1108 DRM_ERROR("No reloc for packet3 %d\n",
1109 pkt->opcode);
1110 r100_cs_dump_packet(p, pkt);
1111 return r;
1112 }
1113 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1114 track->arrays[i + 0].robj = reloc->robj;
1115 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1116 track->arrays[i + 0].esize &= 0x7F;
1117 r = r100_cs_packet_next_reloc(p, &reloc);
1118 if (r) {
1119 DRM_ERROR("No reloc for packet3 %d\n",
1120 pkt->opcode);
1121 r100_cs_dump_packet(p, pkt);
1122 return r;
1123 }
1124 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1125 track->arrays[i + 1].robj = reloc->robj;
1126 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1127 track->arrays[i + 1].esize &= 0x7F;
1128 }
1129 if (c & 1) {
1130 r = r100_cs_packet_next_reloc(p, &reloc);
1131 if (r) {
1132 DRM_ERROR("No reloc for packet3 %d\n",
1133 pkt->opcode);
1134 r100_cs_dump_packet(p, pkt);
1135 return r;
1136 }
1137 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1138 track->arrays[i + 0].robj = reloc->robj;
1139 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1140 track->arrays[i + 0].esize &= 0x7F;
1141 }
1142 break; 1103 break;
1143 case PACKET3_INDX_BUFFER: 1104 case PACKET3_INDX_BUFFER:
1144 r = r100_cs_packet_next_reloc(p, &reloc); 1105 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1147,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1147 r100_cs_dump_packet(p, pkt); 1108 r100_cs_dump_packet(p, pkt);
1148 return r; 1109 return r;
1149 } 1110 }
1150 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1111 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1151 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1112 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1152 if (r) { 1113 if (r) {
1153 return r; 1114 return r;
@@ -1158,11 +1119,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1158 /* Number of dwords is vtx_size * (num_vertices - 1) 1119 /* Number of dwords is vtx_size * (num_vertices - 1)
1159 * PRIM_WALK must be equal to 3 vertex data in embedded 1120 * PRIM_WALK must be equal to 3 vertex data in embedded
1160 * in cmd stream */ 1121 * in cmd stream */
1161 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1122 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1162 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1123 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1163 return -EINVAL; 1124 return -EINVAL;
1164 } 1125 }
1165 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1126 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1166 track->immd_dwords = pkt->count - 1; 1127 track->immd_dwords = pkt->count - 1;
1167 r = r100_cs_track_check(p->rdev, track); 1128 r = r100_cs_track_check(p->rdev, track);
1168 if (r) { 1129 if (r) {
@@ -1173,11 +1134,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1173 /* Number of dwords is vtx_size * (num_vertices - 1) 1134 /* Number of dwords is vtx_size * (num_vertices - 1)
1174 * PRIM_WALK must be equal to 3 vertex data in embedded 1135 * PRIM_WALK must be equal to 3 vertex data in embedded
1175 * in cmd stream */ 1136 * in cmd stream */
1176 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1137 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1177 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1138 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1178 return -EINVAL; 1139 return -EINVAL;
1179 } 1140 }
1180 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1141 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1181 track->immd_dwords = pkt->count; 1142 track->immd_dwords = pkt->count;
1182 r = r100_cs_track_check(p->rdev, track); 1143 r = r100_cs_track_check(p->rdev, track);
1183 if (r) { 1144 if (r) {
@@ -1185,28 +1146,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1185 } 1146 }
1186 break; 1147 break;
1187 case PACKET3_3D_DRAW_VBUF: 1148 case PACKET3_3D_DRAW_VBUF:
1188 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1149 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1189 r = r100_cs_track_check(p->rdev, track); 1150 r = r100_cs_track_check(p->rdev, track);
1190 if (r) { 1151 if (r) {
1191 return r; 1152 return r;
1192 } 1153 }
1193 break; 1154 break;
1194 case PACKET3_3D_DRAW_VBUF_2: 1155 case PACKET3_3D_DRAW_VBUF_2:
1195 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1156 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1196 r = r100_cs_track_check(p->rdev, track); 1157 r = r100_cs_track_check(p->rdev, track);
1197 if (r) { 1158 if (r) {
1198 return r; 1159 return r;
1199 } 1160 }
1200 break; 1161 break;
1201 case PACKET3_3D_DRAW_INDX: 1162 case PACKET3_3D_DRAW_INDX:
1202 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1163 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1203 r = r100_cs_track_check(p->rdev, track); 1164 r = r100_cs_track_check(p->rdev, track);
1204 if (r) { 1165 if (r) {
1205 return r; 1166 return r;
1206 } 1167 }
1207 break; 1168 break;
1208 case PACKET3_3D_DRAW_INDX_2: 1169 case PACKET3_3D_DRAW_INDX_2:
1209 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1170 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1210 r = r100_cs_track_check(p->rdev, track); 1171 r = r100_cs_track_check(p->rdev, track);
1211 if (r) { 1172 if (r) {
1212 return r; 1173 return r;
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index e1d5e0331e19..868add6e166d 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -445,6 +445,8 @@
445#define AVIVO_D1MODE_VBLANK_STATUS 0x6534 445#define AVIVO_D1MODE_VBLANK_STATUS 0x6534
446# define AVIVO_VBLANK_ACK (1 << 4) 446# define AVIVO_VBLANK_ACK (1 << 4)
447#define AVIVO_D1MODE_VLINE_START_END 0x6538 447#define AVIVO_D1MODE_VLINE_START_END 0x6538
448#define AVIVO_D1MODE_VLINE_STATUS 0x653c
449# define AVIVO_D1MODE_VLINE_STAT (1 << 12)
448#define AVIVO_DxMODE_INT_MASK 0x6540 450#define AVIVO_DxMODE_INT_MASK 0x6540
449# define AVIVO_D1MODE_INT_MASK (1 << 0) 451# define AVIVO_D1MODE_INT_MASK (1 << 0)
450# define AVIVO_D2MODE_INT_MASK (1 << 8) 452# define AVIVO_D2MODE_INT_MASK (1 << 8)
@@ -502,6 +504,7 @@
502 504
503#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 505#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34
504#define AVIVO_D2MODE_VLINE_START_END 0x6d38 506#define AVIVO_D2MODE_VLINE_START_END 0x6d38
507#define AVIVO_D2MODE_VLINE_STATUS 0x6d3c
505#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 508#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
506#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 509#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
507#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 510#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index d4b0b9d2e39b..0bf13fccdaf2 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -26,108 +26,13 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 29#include "radeon.h"
30#include "atom.h"
31#include "r520d.h"
31 32
32/* r520,rv530,rv560,rv570,r580 depends on : */ 33/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
33void r100_hdp_reset(struct radeon_device *rdev);
34void r420_pipes_init(struct radeon_device *rdev);
35void rs600_mc_disable_clients(struct radeon_device *rdev);
36void rs600_disable_vga(struct radeon_device *rdev);
37int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
38int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
39 34
40/* This files gather functions specifics to: 35static int r520_mc_wait_for_idle(struct radeon_device *rdev)
41 * r520,rv530,rv560,rv570,r580
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void r520_gpu_init(struct radeon_device *rdev);
46int r520_mc_wait_for_idle(struct radeon_device *rdev);
47
48
49/*
50 * MC
51 */
52int r520_mc_init(struct radeon_device *rdev)
53{
54 uint32_t tmp;
55 int r;
56
57 if (r100_debugfs_rbbm_init(rdev)) {
58 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
59 }
60 if (rv515_debugfs_pipes_info_init(rdev)) {
61 DRM_ERROR("Failed to register debugfs file for pipes !\n");
62 }
63 if (rv515_debugfs_ga_info_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for pipes !\n");
65 }
66
67 r520_gpu_init(rdev);
68 rv370_pcie_gart_disable(rdev);
69
70 /* Setup GPU memory space */
71 rdev->mc.vram_location = 0xFFFFFFFFUL;
72 rdev->mc.gtt_location = 0xFFFFFFFFUL;
73 if (rdev->flags & RADEON_IS_AGP) {
74 r = radeon_agp_init(rdev);
75 if (r) {
76 printk(KERN_WARNING "[drm] Disabling AGP\n");
77 rdev->flags &= ~RADEON_IS_AGP;
78 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
79 } else {
80 rdev->mc.gtt_location = rdev->mc.agp_base;
81 }
82 }
83 r = radeon_mc_setup(rdev);
84 if (r) {
85 return r;
86 }
87
88 /* Program GPU memory space */
89 rs600_mc_disable_clients(rdev);
90 if (r520_mc_wait_for_idle(rdev)) {
91 printk(KERN_WARNING "Failed to wait MC idle while "
92 "programming pipes. Bad things might happen.\n");
93 }
94 /* Write VRAM size in case we are limiting it */
95 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
96 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
97 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
98 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
99 WREG32_MC(R520_MC_FB_LOCATION, tmp);
100 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
101 WREG32(0x310, rdev->mc.vram_location);
102 if (rdev->flags & RADEON_IS_AGP) {
103 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
104 tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
105 tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
106 WREG32_MC(R520_MC_AGP_LOCATION, tmp);
107 WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
108 WREG32_MC(R520_MC_AGP_BASE_2, 0);
109 } else {
110 WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
111 WREG32_MC(R520_MC_AGP_BASE, 0);
112 WREG32_MC(R520_MC_AGP_BASE_2, 0);
113 }
114 return 0;
115}
116
117void r520_mc_fini(struct radeon_device *rdev)
118{
119}
120
121
122/*
123 * Global GPU functions
124 */
125void r520_errata(struct radeon_device *rdev)
126{
127 rdev->pll_errata = 0;
128}
129
130int r520_mc_wait_for_idle(struct radeon_device *rdev)
131{ 36{
132 unsigned i; 37 unsigned i;
133 uint32_t tmp; 38 uint32_t tmp;
@@ -143,12 +48,12 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev)
143 return -1; 48 return -1;
144} 49}
145 50
146void r520_gpu_init(struct radeon_device *rdev) 51static void r520_gpu_init(struct radeon_device *rdev)
147{ 52{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 53 unsigned pipe_select_current, gb_pipe_select, tmp;
149 54
150 r100_hdp_reset(rdev); 55 r100_hdp_reset(rdev);
151 rs600_disable_vga(rdev); 56 rv515_vga_render_disable(rdev);
152 /* 57 /*
153 * DST_PIPE_CONFIG 0x170C 58 * DST_PIPE_CONFIG 0x170C
154 * GB_TILE_CONFIG 0x4018 59 * GB_TILE_CONFIG 0x4018
@@ -186,10 +91,6 @@ void r520_gpu_init(struct radeon_device *rdev)
186 } 91 }
187} 92}
188 93
189
190/*
191 * VRAM info
192 */
193static void r520_vram_get_type(struct radeon_device *rdev) 94static void r520_vram_get_type(struct radeon_device *rdev)
194{ 95{
195 uint32_t tmp; 96 uint32_t tmp;
@@ -233,7 +134,168 @@ void r520_vram_info(struct radeon_device *rdev)
233 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 134 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
234} 135}
235 136
236void r520_bandwidth_update(struct radeon_device *rdev) 137void r520_mc_program(struct radeon_device *rdev)
138{
139 struct rv515_mc_save save;
140
141 /* Stops all mc clients */
142 rv515_mc_stop(rdev, &save);
143
144 /* Wait for mc idle */
145 if (r520_mc_wait_for_idle(rdev))
146 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
147 /* Write VRAM size in case we are limiting it */
148 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
149 /* Program MC, should be a 32bits limited address space */
150 WREG32_MC(R_000004_MC_FB_LOCATION,
151 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
152 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
153 WREG32(R_000134_HDP_FB_LOCATION,
154 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
155 if (rdev->flags & RADEON_IS_AGP) {
156 WREG32_MC(R_000005_MC_AGP_LOCATION,
157 S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
158 S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
159 WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
160 WREG32_MC(R_000007_AGP_BASE_2,
161 S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
162 } else {
163 WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
164 WREG32_MC(R_000006_AGP_BASE, 0);
165 WREG32_MC(R_000007_AGP_BASE_2, 0);
166 }
167
168 rv515_mc_resume(rdev, &save);
169}
170
171static int r520_startup(struct radeon_device *rdev)
172{
173 int r;
174
175 r520_mc_program(rdev);
176 /* Resume clock */
177 rv515_clock_startup(rdev);
178 /* Initialize GPU configuration (# pipes, ...) */
179 r520_gpu_init(rdev);
180 /* Initialize GART (initialize after TTM so we can allocate
181 * memory through TTM but finalize after TTM) */
182 if (rdev->flags & RADEON_IS_PCIE) {
183 r = rv370_pcie_gart_enable(rdev);
184 if (r)
185 return r;
186 }
187 /* Enable IRQ */
188 rdev->irq.sw_int = true;
189 r100_irq_set(rdev);
190 /* 1M ring buffer */
191 r = r100_cp_init(rdev, 1024 * 1024);
192 if (r) {
193 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
194 return r;
195 }
196 r = r100_wb_init(rdev);
197 if (r)
198 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
199 r = r100_ib_init(rdev);
200 if (r) {
201 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
202 return r;
203 }
204 return 0;
205}
206
207int r520_resume(struct radeon_device *rdev)
237{ 208{
238 rv515_bandwidth_avivo_update(rdev); 209 /* Make sur GART are not working */
210 if (rdev->flags & RADEON_IS_PCIE)
211 rv370_pcie_gart_disable(rdev);
212 /* Resume clock before doing reset */
213 rv515_clock_startup(rdev);
214 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
215 if (radeon_gpu_reset(rdev)) {
216 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
217 RREG32(R_000E40_RBBM_STATUS),
218 RREG32(R_0007C0_CP_STAT));
219 }
220 /* post */
221 atom_asic_init(rdev->mode_info.atom_context);
222 /* Resume clock after posting */
223 rv515_clock_startup(rdev);
224 return r520_startup(rdev);
225}
226
227int r520_init(struct radeon_device *rdev)
228{
229 int r;
230
231 rdev->new_init_path = true;
232 /* Initialize scratch registers */
233 radeon_scratch_init(rdev);
234 /* Initialize surface registers */
235 radeon_surface_init(rdev);
236 /* TODO: disable VGA need to use VGA request */
237 /* BIOS*/
238 if (!radeon_get_bios(rdev)) {
239 if (ASIC_IS_AVIVO(rdev))
240 return -EINVAL;
241 }
242 if (rdev->is_atom_bios) {
243 r = radeon_atombios_init(rdev);
244 if (r)
245 return r;
246 } else {
247 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
248 return -EINVAL;
249 }
250 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
251 if (radeon_gpu_reset(rdev)) {
252 dev_warn(rdev->dev,
253 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
254 RREG32(R_000E40_RBBM_STATUS),
255 RREG32(R_0007C0_CP_STAT));
256 }
257 /* check if cards are posted or not */
258 if (!radeon_card_posted(rdev) && rdev->bios) {
259 DRM_INFO("GPU not posted. posting now...\n");
260 atom_asic_init(rdev->mode_info.atom_context);
261 }
262 /* Initialize clocks */
263 radeon_get_clock_info(rdev->ddev);
264 /* Get vram informations */
265 r520_vram_info(rdev);
266 /* Initialize memory controller (also test AGP) */
267 r = r420_mc_init(rdev);
268 if (r)
269 return r;
270 rv515_debugfs(rdev);
271 /* Fence driver */
272 r = radeon_fence_driver_init(rdev);
273 if (r)
274 return r;
275 r = radeon_irq_kms_init(rdev);
276 if (r)
277 return r;
278 /* Memory manager */
279 r = radeon_object_init(rdev);
280 if (r)
281 return r;
282 r = rv370_pcie_gart_init(rdev);
283 if (r)
284 return r;
285 rv515_set_safe_registers(rdev);
286 rdev->accel_working = true;
287 r = r520_startup(rdev);
288 if (r) {
289 /* Somethings want wront with the accel init stop accel */
290 dev_err(rdev->dev, "Disabling GPU acceleration\n");
291 rv515_suspend(rdev);
292 r100_cp_fini(rdev);
293 r100_wb_fini(rdev);
294 r100_ib_fini(rdev);
295 rv370_pcie_gart_fini(rdev);
296 radeon_agp_fini(rdev);
297 radeon_irq_kms_fini(rdev);
298 rdev->accel_working = false;
299 }
300 return 0;
239} 301}
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h
new file mode 100644
index 000000000000..61af61f644bc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520d.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R520D_H__
29#define __R520D_H__
30
31/* Registers */
32#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
33#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
34#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
35#define C_0000F8_CONFIG_MEMSIZE 0x00000000
36#define R_000134_HDP_FB_LOCATION 0x000134
37#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
38#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
39#define C_000134_HDP_FB_START 0xFFFF0000
40#define R_0007C0_CP_STAT 0x0007C0
41#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
42#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
43#define C_0007C0_MRU_BUSY 0xFFFFFFFE
44#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
45#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
46#define C_0007C0_MWU_BUSY 0xFFFFFFFD
47#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
48#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
49#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
50#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
51#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
52#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
53#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
54#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
55#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
56#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
57#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
58#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
59#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
60#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
61#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
62#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
63#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
64#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
65#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
66#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
67#define C_0007C0_CSI_BUSY 0xFFFFDFFF
68#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
69#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
70#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
71#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
72#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
73#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
74#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
75#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
76#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
77#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
78#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
79#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
80#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
81#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
82#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
83#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
84#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
85#define C_0007C0_CP_BUSY 0x7FFFFFFF
86#define R_000E40_RBBM_STATUS 0x000E40
87#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
88#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
89#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
90#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
91#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
92#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
93#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
94#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
95#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
96#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
97#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
98#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
99#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
100#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
101#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
102#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
103#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
104#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
105#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
106#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
107#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
108#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
109#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
110#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
111#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
112#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
113#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
114#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
115#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
116#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
117#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
118#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
119#define C_000E40_E2_BUSY 0xFFFDFFFF
120#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
121#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
122#define C_000E40_RB2D_BUSY 0xFFFBFFFF
123#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
124#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
125#define C_000E40_RB3D_BUSY 0xFFF7FFFF
126#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
127#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
128#define C_000E40_VAP_BUSY 0xFFEFFFFF
129#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
130#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
131#define C_000E40_RE_BUSY 0xFFDFFFFF
132#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
133#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
134#define C_000E40_TAM_BUSY 0xFFBFFFFF
135#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
136#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
137#define C_000E40_TDM_BUSY 0xFF7FFFFF
138#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
139#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
140#define C_000E40_PB_BUSY 0xFEFFFFFF
141#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
142#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
143#define C_000E40_TIM_BUSY 0xFDFFFFFF
144#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
145#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
146#define C_000E40_GA_BUSY 0xFBFFFFFF
147#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
148#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
149#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
150#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
151#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
152#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
153#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
154#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
155#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
156#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
157#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
158#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
159#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
160#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
161#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
162
163
164#define R_000004_MC_FB_LOCATION 0x000004
165#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
166#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
167#define C_000004_MC_FB_START 0xFFFF0000
168#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
169#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
170#define C_000004_MC_FB_TOP 0x0000FFFF
171#define R_000005_MC_AGP_LOCATION 0x000005
172#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
173#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
174#define C_000005_MC_AGP_START 0xFFFF0000
175#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
176#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
177#define C_000005_MC_AGP_TOP 0x0000FFFF
178#define R_000006_AGP_BASE 0x000006
179#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
180#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
181#define C_000006_AGP_BASE_ADDR 0x00000000
182#define R_000007_AGP_BASE_2 0x000007
183#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
184#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
185#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
186
187#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index eab31c1d6df1..2e4e60edbff4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -33,8 +33,8 @@
33#include "radeon.h" 33#include "radeon.h"
34#include "radeon_mode.h" 34#include "radeon_mode.h"
35#include "r600d.h" 35#include "r600d.h"
36#include "avivod.h"
37#include "atom.h" 36#include "atom.h"
37#include "avivod.h"
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
@@ -342,7 +342,7 @@ static void r600_mc_resume(struct radeon_device *rdev)
342 342
343 /* we need to own VRAM, so turn off the VGA renderer here 343 /* we need to own VRAM, so turn off the VGA renderer here
344 * to stop it overwriting our objects */ 344 * to stop it overwriting our objects */
345 radeon_avivo_vga_render_disable(rdev); 345 rv515_vga_render_disable(rdev);
346} 346}
347 347
348int r600_mc_init(struct radeon_device *rdev) 348int r600_mc_init(struct radeon_device *rdev)
@@ -380,6 +380,13 @@ int r600_mc_init(struct radeon_device *rdev)
380 /* Setup GPU memory space */ 380 /* Setup GPU memory space */
381 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 381 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
382 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 382 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
383
384 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
385 rdev->mc.mc_vram_size = rdev->mc.aper_size;
386
387 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
388 rdev->mc.real_vram_size = rdev->mc.aper_size;
389
383 if (rdev->flags & RADEON_IS_AGP) { 390 if (rdev->flags & RADEON_IS_AGP) {
384 r = radeon_agp_init(rdev); 391 r = radeon_agp_init(rdev);
385 if (r) 392 if (r)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 33b89cd8743e..d28970db6a2d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,7 +28,6 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "avivod.h"
32 31
33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc); 33 struct radeon_cs_reloc **cs_reloc);
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
57 idx, ib_chunk->length_dw); 56 idx, ib_chunk->length_dw);
58 return -EINVAL; 57 return -EINVAL;
59 } 58 }
60 header = ib_chunk->kdata[idx]; 59 header = radeon_get_ib_value(p, idx);
61 pkt->idx = idx; 60 pkt->idx = idx;
62 pkt->type = CP_PACKET_GET_TYPE(header); 61 pkt->type = CP_PACKET_GET_TYPE(header);
63 pkt->count = CP_PACKET_GET_COUNT(header); 62 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
98static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 97static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
99 struct radeon_cs_reloc **cs_reloc) 98 struct radeon_cs_reloc **cs_reloc)
100{ 99{
101 struct radeon_cs_chunk *ib_chunk;
102 struct radeon_cs_chunk *relocs_chunk; 100 struct radeon_cs_chunk *relocs_chunk;
103 struct radeon_cs_packet p3reloc; 101 struct radeon_cs_packet p3reloc;
104 unsigned idx; 102 unsigned idx;
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
109 return -EINVAL; 107 return -EINVAL;
110 } 108 }
111 *cs_reloc = NULL; 109 *cs_reloc = NULL;
112 ib_chunk = &p->chunks[p->chunk_ib_idx];
113 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 110 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
114 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 111 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
115 if (r) { 112 if (r) {
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
121 p3reloc.idx); 118 p3reloc.idx);
122 return -EINVAL; 119 return -EINVAL;
123 } 120 }
124 idx = ib_chunk->kdata[p3reloc.idx + 1]; 121 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
125 if (idx >= relocs_chunk->length_dw) { 122 if (idx >= relocs_chunk->length_dw) {
126 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 123 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
127 idx, relocs_chunk->length_dw); 124 idx, relocs_chunk->length_dw);
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
146static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 143static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
147 struct radeon_cs_reloc **cs_reloc) 144 struct radeon_cs_reloc **cs_reloc)
148{ 145{
149 struct radeon_cs_chunk *ib_chunk;
150 struct radeon_cs_chunk *relocs_chunk; 146 struct radeon_cs_chunk *relocs_chunk;
151 struct radeon_cs_packet p3reloc; 147 struct radeon_cs_packet p3reloc;
152 unsigned idx; 148 unsigned idx;
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
157 return -EINVAL; 153 return -EINVAL;
158 } 154 }
159 *cs_reloc = NULL; 155 *cs_reloc = NULL;
160 ib_chunk = &p->chunks[p->chunk_ib_idx];
161 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 156 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
162 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 157 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
163 if (r) { 158 if (r) {
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
169 p3reloc.idx); 164 p3reloc.idx);
170 return -EINVAL; 165 return -EINVAL;
171 } 166 }
172 idx = ib_chunk->kdata[p3reloc.idx + 1]; 167 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
173 if (idx >= relocs_chunk->length_dw) { 168 if (idx >= relocs_chunk->length_dw) {
174 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 169 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
175 idx, relocs_chunk->length_dw); 170 idx, relocs_chunk->length_dw);
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
181 return 0; 176 return 0;
182} 177}
183 178
179/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context.
182 *
183 * Userspace sends a special sequence for VLINE waits.
184 * PACKET0 - VLINE_START_END + value
185 * PACKET3 - WAIT_REG_MEM poll vline status reg
186 * RELOC (P3) - crtc_id in reloc.
187 *
188 * This function parses this and relocates the VLINE START END
189 * and WAIT_REG_MEM packets to the correct crtc.
190 * It also detects a switched off crtc and nulls out the
191 * wait in that case.
192 */
193static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
194{
195 struct drm_mode_object *obj;
196 struct drm_crtc *crtc;
197 struct radeon_crtc *radeon_crtc;
198 struct radeon_cs_packet p3reloc, wait_reg_mem;
199 int crtc_id;
200 int r;
201 uint32_t header, h_idx, reg, wait_reg_mem_info;
202 volatile uint32_t *ib;
203
204 ib = p->ib->ptr;
205
206 /* parse the WAIT_REG_MEM */
207 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
208 if (r)
209 return r;
210
211 /* check its a WAIT_REG_MEM */
212 if (wait_reg_mem.type != PACKET_TYPE3 ||
213 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
214 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
215 r = -EINVAL;
216 return r;
217 }
218
219 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
220 /* bit 4 is reg (0) or mem (1) */
221 if (wait_reg_mem_info & 0x10) {
222 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
223 r = -EINVAL;
224 return r;
225 }
226 /* waiting for value to be equal */
227 if ((wait_reg_mem_info & 0x7) != 0x3) {
228 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
229 r = -EINVAL;
230 return r;
231 }
232 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
233 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
234 r = -EINVAL;
235 return r;
236 }
237
238 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
239 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
240 r = -EINVAL;
241 return r;
242 }
243
244 /* jump over the NOP */
245 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
246 if (r)
247 return r;
248
249 h_idx = p->idx - 2;
250 p->idx += wait_reg_mem.count + 2;
251 p->idx += p3reloc.count + 2;
252
253 header = radeon_get_ib_value(p, h_idx);
254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 reg = header >> 2;
256 mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 if (!obj) {
259 DRM_ERROR("cannot find crtc %d\n", crtc_id);
260 r = -EINVAL;
261 goto out;
262 }
263 crtc = obj_to_crtc(obj);
264 radeon_crtc = to_radeon_crtc(crtc);
265 crtc_id = radeon_crtc->crtc_id;
266
267 if (!crtc->enabled) {
268 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 ib[h_idx + 2] = PACKET2(0);
270 ib[h_idx + 3] = PACKET2(0);
271 ib[h_idx + 4] = PACKET2(0);
272 ib[h_idx + 5] = PACKET2(0);
273 ib[h_idx + 6] = PACKET2(0);
274 ib[h_idx + 7] = PACKET2(0);
275 ib[h_idx + 8] = PACKET2(0);
276 } else if (crtc_id == 1) {
277 switch (reg) {
278 case AVIVO_D1MODE_VLINE_START_END:
279 header &= ~R600_CP_PACKET0_REG_MASK;
280 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
281 break;
282 default:
283 DRM_ERROR("unknown crtc reloc\n");
284 r = -EINVAL;
285 goto out;
286 }
287 ib[h_idx] = header;
288 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
289 }
290out:
291 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
292 return r;
293}
294
184static int r600_packet0_check(struct radeon_cs_parser *p, 295static int r600_packet0_check(struct radeon_cs_parser *p,
185 struct radeon_cs_packet *pkt, 296 struct radeon_cs_packet *pkt,
186 unsigned idx, unsigned reg) 297 unsigned idx, unsigned reg)
187{ 298{
299 int r;
300
188 switch (reg) { 301 switch (reg) {
189 case AVIVO_D1MODE_VLINE_START_END: 302 case AVIVO_D1MODE_VLINE_START_END:
190 case AVIVO_D2MODE_VLINE_START_END: 303 r = r600_cs_packet_parse_vline(p);
304 if (r) {
305 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
306 idx, reg);
307 return r;
308 }
191 break; 309 break;
192 default: 310 default:
193 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 311 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
218static int r600_packet3_check(struct radeon_cs_parser *p, 336static int r600_packet3_check(struct radeon_cs_parser *p,
219 struct radeon_cs_packet *pkt) 337 struct radeon_cs_packet *pkt)
220{ 338{
221 struct radeon_cs_chunk *ib_chunk;
222 struct radeon_cs_reloc *reloc; 339 struct radeon_cs_reloc *reloc;
223 volatile u32 *ib; 340 volatile u32 *ib;
224 unsigned idx; 341 unsigned idx;
225 unsigned i; 342 unsigned i;
226 unsigned start_reg, end_reg, reg; 343 unsigned start_reg, end_reg, reg;
227 int r; 344 int r;
345 u32 idx_value;
228 346
229 ib = p->ib->ptr; 347 ib = p->ib->ptr;
230 ib_chunk = &p->chunks[p->chunk_ib_idx];
231 idx = pkt->idx + 1; 348 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx);
350
232 switch (pkt->opcode) { 351 switch (pkt->opcode) {
233 case PACKET3_START_3D_CMDBUF: 352 case PACKET3_START_3D_CMDBUF:
234 if (p->family >= CHIP_RV770 || pkt->count) { 353 if (p->family >= CHIP_RV770 || pkt->count) {
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
259 DRM_ERROR("bad DRAW_INDEX\n"); 378 DRM_ERROR("bad DRAW_INDEX\n");
260 return -EINVAL; 379 return -EINVAL;
261 } 380 }
262 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 381 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
263 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 382 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
264 break; 383 break;
265 case PACKET3_DRAW_INDEX_AUTO: 384 case PACKET3_DRAW_INDEX_AUTO:
266 if (pkt->count != 1) { 385 if (pkt->count != 1) {
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
281 return -EINVAL; 400 return -EINVAL;
282 } 401 }
283 /* bit 4 is reg (0) or mem (1) */ 402 /* bit 4 is reg (0) or mem (1) */
284 if (ib_chunk->kdata[idx+0] & 0x10) { 403 if (idx_value & 0x10) {
285 r = r600_cs_packet_next_reloc(p, &reloc); 404 r = r600_cs_packet_next_reloc(p, &reloc);
286 if (r) { 405 if (r) {
287 DRM_ERROR("bad WAIT_REG_MEM\n"); 406 DRM_ERROR("bad WAIT_REG_MEM\n");
288 return -EINVAL; 407 return -EINVAL;
289 } 408 }
290 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 409 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
291 ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 410 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
292 } 411 }
293 break; 412 break;
294 case PACKET3_SURFACE_SYNC: 413 case PACKET3_SURFACE_SYNC:
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
297 return -EINVAL; 416 return -EINVAL;
298 } 417 }
299 /* 0xffffffff/0x0 is flush all cache flag */ 418 /* 0xffffffff/0x0 is flush all cache flag */
300 if (ib_chunk->kdata[idx+1] != 0xffffffff || 419 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
301 ib_chunk->kdata[idx+2] != 0) { 420 radeon_get_ib_value(p, idx + 2) != 0) {
302 r = r600_cs_packet_next_reloc(p, &reloc); 421 r = r600_cs_packet_next_reloc(p, &reloc);
303 if (r) { 422 if (r) {
304 DRM_ERROR("bad SURFACE_SYNC\n"); 423 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
319 return -EINVAL; 438 return -EINVAL;
320 } 439 }
321 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 440 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
322 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 441 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
323 } 442 }
324 break; 443 break;
325 case PACKET3_EVENT_WRITE_EOP: 444 case PACKET3_EVENT_WRITE_EOP:
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
333 return -EINVAL; 452 return -EINVAL;
334 } 453 }
335 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 454 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
336 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 455 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
337 break; 456 break;
338 case PACKET3_SET_CONFIG_REG: 457 case PACKET3_SET_CONFIG_REG:
339 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 458 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
340 end_reg = 4 * pkt->count + start_reg - 4; 459 end_reg = 4 * pkt->count + start_reg - 4;
341 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 460 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
342 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 461 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
@@ -356,7 +475,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
356 } 475 }
357 break; 476 break;
358 case PACKET3_SET_CONTEXT_REG: 477 case PACKET3_SET_CONTEXT_REG:
359 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 478 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
360 end_reg = 4 * pkt->count + start_reg - 4; 479 end_reg = 4 * pkt->count + start_reg - 4;
361 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 480 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
362 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 481 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
@@ -421,7 +540,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad SET_RESOURCE\n"); 540 DRM_ERROR("bad SET_RESOURCE\n");
422 return -EINVAL; 541 return -EINVAL;
423 } 542 }
424 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET; 543 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
425 end_reg = 4 * pkt->count + start_reg - 4; 544 end_reg = 4 * pkt->count + start_reg - 4;
426 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 545 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
427 (start_reg >= PACKET3_SET_RESOURCE_END) || 546 (start_reg >= PACKET3_SET_RESOURCE_END) ||
@@ -430,7 +549,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
430 return -EINVAL; 549 return -EINVAL;
431 } 550 }
432 for (i = 0; i < (pkt->count / 7); i++) { 551 for (i = 0; i < (pkt->count / 7); i++) {
433 switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) { 552 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
434 case SQ_TEX_VTX_VALID_TEXTURE: 553 case SQ_TEX_VTX_VALID_TEXTURE:
435 /* tex base */ 554 /* tex base */
436 r = r600_cs_packet_next_reloc(p, &reloc); 555 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -455,7 +574,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
455 return -EINVAL; 574 return -EINVAL;
456 } 575 }
457 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 576 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
458 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 577 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
459 break; 578 break;
460 case SQ_TEX_VTX_INVALID_TEXTURE: 579 case SQ_TEX_VTX_INVALID_TEXTURE:
461 case SQ_TEX_VTX_INVALID_BUFFER: 580 case SQ_TEX_VTX_INVALID_BUFFER:
@@ -466,7 +585,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
466 } 585 }
467 break; 586 break;
468 case PACKET3_SET_ALU_CONST: 587 case PACKET3_SET_ALU_CONST:
469 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET; 588 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
470 end_reg = 4 * pkt->count + start_reg - 4; 589 end_reg = 4 * pkt->count + start_reg - 4;
471 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 590 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
472 (start_reg >= PACKET3_SET_ALU_CONST_END) || 591 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
@@ -476,7 +595,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
476 } 595 }
477 break; 596 break;
478 case PACKET3_SET_BOOL_CONST: 597 case PACKET3_SET_BOOL_CONST:
479 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 598 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
480 end_reg = 4 * pkt->count + start_reg - 4; 599 end_reg = 4 * pkt->count + start_reg - 4;
481 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 600 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
482 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 601 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
@@ -486,7 +605,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
486 } 605 }
487 break; 606 break;
488 case PACKET3_SET_LOOP_CONST: 607 case PACKET3_SET_LOOP_CONST:
489 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 608 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
490 end_reg = 4 * pkt->count + start_reg - 4; 609 end_reg = 4 * pkt->count + start_reg - 4;
491 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 610 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
492 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 611 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
@@ -496,7 +615,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
496 } 615 }
497 break; 616 break;
498 case PACKET3_SET_CTL_CONST: 617 case PACKET3_SET_CTL_CONST:
499 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET; 618 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
500 end_reg = 4 * pkt->count + start_reg - 4; 619 end_reg = 4 * pkt->count + start_reg - 4;
501 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 620 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
502 (start_reg >= PACKET3_SET_CTL_CONST_END) || 621 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
@@ -510,7 +629,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
510 DRM_ERROR("bad SET_SAMPLER\n"); 629 DRM_ERROR("bad SET_SAMPLER\n");
511 return -EINVAL; 630 return -EINVAL;
512 } 631 }
513 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET; 632 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
514 end_reg = 4 * pkt->count + start_reg - 4; 633 end_reg = 4 * pkt->count + start_reg - 4;
515 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 634 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
516 (start_reg >= PACKET3_SET_SAMPLER_END) || 635 (start_reg >= PACKET3_SET_SAMPLER_END) ||
@@ -602,6 +721,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
602 kfree(parser->relocs); 721 kfree(parser->relocs);
603 for (i = 0; i < parser->nchunks; i++) { 722 for (i = 0; i < parser->nchunks; i++) {
604 kfree(parser->chunks[i].kdata); 723 kfree(parser->chunks[i].kdata);
724 kfree(parser->chunks[i].kpage[0]);
725 kfree(parser->chunks[i].kpage[1]);
605 } 726 }
606 kfree(parser->chunks); 727 kfree(parser->chunks);
607 kfree(parser->chunks_array); 728 kfree(parser->chunks_array);
@@ -639,7 +760,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
639 * uncached). */ 760 * uncached). */
640 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 761 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
641 parser.ib->length_dw = ib_chunk->length_dw; 762 parser.ib->length_dw = ib_chunk->length_dw;
642 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
643 *l = parser.ib->length_dw; 763 *l = parser.ib->length_dw;
644 r = r600_cs_parse(&parser); 764 r = r600_cs_parse(&parser);
645 if (r) { 765 if (r) {
@@ -647,6 +767,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
647 r600_cs_parser_fini(&parser, r); 767 r600_cs_parser_fini(&parser, r);
648 return r; 768 return r;
649 } 769 }
770 r = radeon_cs_finish_pages(&parser);
771 if (r) {
772 DRM_ERROR("Invalid command stream !\n");
773 r600_cs_parser_fini(&parser, r);
774 return r;
775 }
650 r600_cs_parser_fini(&parser, r); 776 r600_cs_parser_fini(&parser, r);
651 return r; 777 return r;
652} 778}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 6311b1362594..950b346e343f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -44,6 +44,24 @@
44 * - TESTING, TESTING, TESTING 44 * - TESTING, TESTING, TESTING
45 */ 45 */
46 46
47/* Initialization path:
48 * We expect that acceleration initialization might fail for various
49 * reasons even thought we work hard to make it works on most
50 * configurations. In order to still have a working userspace in such
51 * situation the init path must succeed up to the memory controller
52 * initialization point. Failure before this point are considered as
53 * fatal error. Here is the init callchain :
54 * radeon_device_init perform common structure, mutex initialization
55 * asic_init setup the GPU memory layout and perform all
56 * one time initialization (failure in this
57 * function are considered fatal)
58 * asic_startup setup the GPU acceleration, in order to
59 * follow guideline the first thing this
60 * function should do is setting the GPU
61 * memory controller (only MC setup failure
62 * are considered as fatal)
63 */
64
47#include <asm/atomic.h> 65#include <asm/atomic.h>
48#include <linux/wait.h> 66#include <linux/wait.h>
49#include <linux/list.h> 67#include <linux/list.h>
@@ -342,7 +360,7 @@ struct radeon_ib {
342 unsigned long idx; 360 unsigned long idx;
343 uint64_t gpu_addr; 361 uint64_t gpu_addr;
344 struct radeon_fence *fence; 362 struct radeon_fence *fence;
345 volatile uint32_t *ptr; 363 uint32_t *ptr;
346 uint32_t length_dw; 364 uint32_t length_dw;
347}; 365};
348 366
@@ -415,7 +433,12 @@ struct radeon_cs_reloc {
415struct radeon_cs_chunk { 433struct radeon_cs_chunk {
416 uint32_t chunk_id; 434 uint32_t chunk_id;
417 uint32_t length_dw; 435 uint32_t length_dw;
436 int kpage_idx[2];
437 uint32_t *kpage[2];
418 uint32_t *kdata; 438 uint32_t *kdata;
439 void __user *user_ptr;
440 int last_copied_page;
441 int last_page_index;
419}; 442};
420 443
421struct radeon_cs_parser { 444struct radeon_cs_parser {
@@ -438,8 +461,38 @@ struct radeon_cs_parser {
438 struct radeon_ib *ib; 461 struct radeon_ib *ib;
439 void *track; 462 void *track;
440 unsigned family; 463 unsigned family;
464 int parser_error;
441}; 465};
442 466
467extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
468extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
469
470
471static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
472{
473 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
474 u32 pg_idx, pg_offset;
475 u32 idx_value = 0;
476 int new_page;
477
478 pg_idx = (idx * 4) / PAGE_SIZE;
479 pg_offset = (idx * 4) % PAGE_SIZE;
480
481 if (ibc->kpage_idx[0] == pg_idx)
482 return ibc->kpage[0][pg_offset/4];
483 if (ibc->kpage_idx[1] == pg_idx)
484 return ibc->kpage[1][pg_offset/4];
485
486 new_page = radeon_cs_update_pages(p, pg_idx);
487 if (new_page < 0) {
488 p->parser_error = new_page;
489 return 0;
490 }
491
492 idx_value = ibc->kpage[new_page][pg_offset/4];
493 return idx_value;
494}
495
443struct radeon_cs_packet { 496struct radeon_cs_packet {
444 unsigned idx; 497 unsigned idx;
445 unsigned type; 498 unsigned type;
@@ -943,6 +996,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev);
943extern void radeon_scratch_init(struct radeon_device *rdev); 996extern void radeon_scratch_init(struct radeon_device *rdev);
944extern void radeon_surface_init(struct radeon_device *rdev); 997extern void radeon_surface_init(struct radeon_device *rdev);
945extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 998extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
999extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
946 1000
947/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1001/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
948struct r100_mc_save { 1002struct r100_mc_save {
@@ -974,6 +1028,9 @@ extern void r100_vram_init_sizes(struct radeon_device *rdev);
974extern void r100_wb_disable(struct radeon_device *rdev); 1028extern void r100_wb_disable(struct radeon_device *rdev);
975extern void r100_wb_fini(struct radeon_device *rdev); 1029extern void r100_wb_fini(struct radeon_device *rdev);
976extern int r100_wb_init(struct radeon_device *rdev); 1030extern int r100_wb_init(struct radeon_device *rdev);
1031extern void r100_hdp_reset(struct radeon_device *rdev);
1032extern int r100_rb2d_reset(struct radeon_device *rdev);
1033extern int r100_cp_reset(struct radeon_device *rdev);
977 1034
978/* r300,r350,rv350,rv370,rv380 */ 1035/* r300,r350,rv350,rv370,rv380 */
979extern void r300_set_reg_safe(struct radeon_device *rdev); 1036extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -985,12 +1042,29 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
985extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1042extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
986 1043
987/* r420,r423,rv410 */ 1044/* r420,r423,rv410 */
1045extern int r420_mc_init(struct radeon_device *rdev);
988extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1046extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
989extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1047extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
990extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1048extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
1049extern void r420_pipes_init(struct radeon_device *rdev);
991 1050
992/* rv515 */ 1051/* rv515 */
1052struct rv515_mc_save {
1053 u32 d1vga_control;
1054 u32 d2vga_control;
1055 u32 vga_render_control;
1056 u32 vga_hdp_control;
1057 u32 d1crtc_control;
1058 u32 d2crtc_control;
1059};
993extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); 1060extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
1061extern void rv515_vga_render_disable(struct radeon_device *rdev);
1062extern void rv515_set_safe_registers(struct radeon_device *rdev);
1063extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
1064extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
1065extern void rv515_clock_startup(struct radeon_device *rdev);
1066extern void rv515_debugfs(struct radeon_device *rdev);
1067extern int rv515_suspend(struct radeon_device *rdev);
994 1068
995/* rs690, rs740 */ 1069/* rs690, rs740 */
996extern void rs690_line_buffer_adjust(struct radeon_device *rdev, 1070extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8968f78fa1e3..c8a4e7b5663d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -420,41 +420,43 @@ static struct radeon_asic rs690_asic = {
420 * rv515 420 * rv515
421 */ 421 */
422int rv515_init(struct radeon_device *rdev); 422int rv515_init(struct radeon_device *rdev);
423void rv515_errata(struct radeon_device *rdev); 423void rv515_fini(struct radeon_device *rdev);
424void rv515_vram_info(struct radeon_device *rdev);
425int rv515_gpu_reset(struct radeon_device *rdev); 424int rv515_gpu_reset(struct radeon_device *rdev);
426int rv515_mc_init(struct radeon_device *rdev);
427void rv515_mc_fini(struct radeon_device *rdev);
428uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 425uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
429void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 426void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
430void rv515_ring_start(struct radeon_device *rdev); 427void rv515_ring_start(struct radeon_device *rdev);
431uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 428uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
432void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 429void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
433void rv515_bandwidth_update(struct radeon_device *rdev); 430void rv515_bandwidth_update(struct radeon_device *rdev);
431int rv515_resume(struct radeon_device *rdev);
432int rv515_suspend(struct radeon_device *rdev);
434static struct radeon_asic rv515_asic = { 433static struct radeon_asic rv515_asic = {
435 .init = &rv515_init, 434 .init = &rv515_init,
436 .errata = &rv515_errata, 435 .fini = &rv515_fini,
437 .vram_info = &rv515_vram_info, 436 .suspend = &rv515_suspend,
437 .resume = &rv515_resume,
438 .errata = NULL,
439 .vram_info = NULL,
438 .vga_set_state = &r100_vga_set_state, 440 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset, 441 .gpu_reset = &rv515_gpu_reset,
440 .mc_init = &rv515_mc_init, 442 .mc_init = NULL,
441 .mc_fini = &rv515_mc_fini, 443 .mc_fini = NULL,
442 .wb_init = &r100_wb_init, 444 .wb_init = NULL,
443 .wb_fini = &r100_wb_fini, 445 .wb_fini = NULL,
444 .gart_init = &rv370_pcie_gart_init, 446 .gart_init = &rv370_pcie_gart_init,
445 .gart_fini = &rv370_pcie_gart_fini, 447 .gart_fini = &rv370_pcie_gart_fini,
446 .gart_enable = &rv370_pcie_gart_enable, 448 .gart_enable = NULL,
447 .gart_disable = &rv370_pcie_gart_disable, 449 .gart_disable = NULL,
448 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 450 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
449 .gart_set_page = &rv370_pcie_gart_set_page, 451 .gart_set_page = &rv370_pcie_gart_set_page,
450 .cp_init = &r100_cp_init, 452 .cp_init = NULL,
451 .cp_fini = &r100_cp_fini, 453 .cp_fini = NULL,
452 .cp_disable = &r100_cp_disable, 454 .cp_disable = NULL,
453 .cp_commit = &r100_cp_commit, 455 .cp_commit = &r100_cp_commit,
454 .ring_start = &rv515_ring_start, 456 .ring_start = &rv515_ring_start,
455 .ring_test = &r100_ring_test, 457 .ring_test = &r100_ring_test,
456 .ring_ib_execute = &r100_ring_ib_execute, 458 .ring_ib_execute = &r100_ring_ib_execute,
457 .ib_test = &r100_ib_test, 459 .ib_test = NULL,
458 .irq_set = &rs600_irq_set, 460 .irq_set = &rs600_irq_set,
459 .irq_process = &rs600_irq_process, 461 .irq_process = &rs600_irq_process,
460 .get_vblank_counter = &rs600_get_vblank_counter, 462 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -476,35 +478,35 @@ static struct radeon_asic rv515_asic = {
476/* 478/*
477 * r520,rv530,rv560,rv570,r580 479 * r520,rv530,rv560,rv570,r580
478 */ 480 */
479void r520_errata(struct radeon_device *rdev); 481int r520_init(struct radeon_device *rdev);
480void r520_vram_info(struct radeon_device *rdev); 482int r520_resume(struct radeon_device *rdev);
481int r520_mc_init(struct radeon_device *rdev);
482void r520_mc_fini(struct radeon_device *rdev);
483void r520_bandwidth_update(struct radeon_device *rdev);
484static struct radeon_asic r520_asic = { 483static struct radeon_asic r520_asic = {
485 .init = &rv515_init, 484 .init = &r520_init,
486 .errata = &r520_errata, 485 .fini = &rv515_fini,
487 .vram_info = &r520_vram_info, 486 .suspend = &rv515_suspend,
487 .resume = &r520_resume,
488 .errata = NULL,
489 .vram_info = NULL,
488 .vga_set_state = &r100_vga_set_state, 490 .vga_set_state = &r100_vga_set_state,
489 .gpu_reset = &rv515_gpu_reset, 491 .gpu_reset = &rv515_gpu_reset,
490 .mc_init = &r520_mc_init, 492 .mc_init = NULL,
491 .mc_fini = &r520_mc_fini, 493 .mc_fini = NULL,
492 .wb_init = &r100_wb_init, 494 .wb_init = NULL,
493 .wb_fini = &r100_wb_fini, 495 .wb_fini = NULL,
494 .gart_init = &rv370_pcie_gart_init, 496 .gart_init = NULL,
495 .gart_fini = &rv370_pcie_gart_fini, 497 .gart_fini = NULL,
496 .gart_enable = &rv370_pcie_gart_enable, 498 .gart_enable = NULL,
497 .gart_disable = &rv370_pcie_gart_disable, 499 .gart_disable = NULL,
498 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 500 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
499 .gart_set_page = &rv370_pcie_gart_set_page, 501 .gart_set_page = &rv370_pcie_gart_set_page,
500 .cp_init = &r100_cp_init, 502 .cp_init = NULL,
501 .cp_fini = &r100_cp_fini, 503 .cp_fini = NULL,
502 .cp_disable = &r100_cp_disable, 504 .cp_disable = NULL,
503 .cp_commit = &r100_cp_commit, 505 .cp_commit = &r100_cp_commit,
504 .ring_start = &rv515_ring_start, 506 .ring_start = &rv515_ring_start,
505 .ring_test = &r100_ring_test, 507 .ring_test = &r100_ring_test,
506 .ring_ib_execute = &r100_ring_ib_execute, 508 .ring_ib_execute = &r100_ring_ib_execute,
507 .ib_test = &r100_ib_test, 509 .ib_test = NULL,
508 .irq_set = &rs600_irq_set, 510 .irq_set = &rs600_irq_set,
509 .irq_process = &rs600_irq_process, 511 .irq_process = &rs600_irq_process,
510 .get_vblank_counter = &rs600_get_vblank_counter, 512 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -519,7 +521,7 @@ static struct radeon_asic r520_asic = {
519 .set_clock_gating = &radeon_atom_set_clock_gating, 521 .set_clock_gating = &radeon_atom_set_clock_gating,
520 .set_surface_reg = r100_set_surface_reg, 522 .set_surface_reg = r100_set_surface_reg,
521 .clear_surface_reg = r100_clear_surface_reg, 523 .clear_surface_reg = r100_clear_surface_reg,
522 .bandwidth_update = &r520_bandwidth_update, 524 .bandwidth_update = &rv515_bandwidth_update,
523}; 525};
524 526
525/* 527/*
@@ -596,7 +598,7 @@ static struct radeon_asic r600_asic = {
596 .set_clock_gating = &radeon_atom_set_clock_gating, 598 .set_clock_gating = &radeon_atom_set_clock_gating,
597 .set_surface_reg = r600_set_surface_reg, 599 .set_surface_reg = r600_set_surface_reg,
598 .clear_surface_reg = r600_clear_surface_reg, 600 .clear_surface_reg = r600_clear_surface_reg,
599 .bandwidth_update = &r520_bandwidth_update, 601 .bandwidth_update = &rv515_bandwidth_update,
600}; 602};
601 603
602/* 604/*
@@ -646,7 +648,7 @@ static struct radeon_asic rv770_asic = {
646 .set_clock_gating = &radeon_atom_set_clock_gating, 648 .set_clock_gating = &radeon_atom_set_clock_gating,
647 .set_surface_reg = r600_set_surface_reg, 649 .set_surface_reg = r600_set_surface_reg,
648 .clear_surface_reg = r600_clear_surface_reg, 650 .clear_surface_reg = r600_clear_surface_reg,
649 .bandwidth_update = &r520_bandwidth_update, 651 .bandwidth_update = &rv515_bandwidth_update,
650}; 652};
651 653
652#endif 654#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 743742128307..5b6c08cee40e 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -272,12 +272,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
272 (le16_to_cpu(path->usConnObjectId) & 272 (le16_to_cpu(path->usConnObjectId) &
273 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 273 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
274 274
275 if ((le16_to_cpu(path->usDeviceTag) == 275 /* TODO CV support */
276 ATOM_DEVICE_TV1_SUPPORT) 276 if (le16_to_cpu(path->usDeviceTag) ==
277 || (le16_to_cpu(path->usDeviceTag) == 277 ATOM_DEVICE_CV_SUPPORT)
278 ATOM_DEVICE_TV2_SUPPORT)
279 || (le16_to_cpu(path->usDeviceTag) ==
280 ATOM_DEVICE_CV_SUPPORT))
281 continue; 278 continue;
282 279
283 if ((rdev->family == CHIP_RS780) && 280 if ((rdev->family == CHIP_RS780) &&
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index af1d551f1a8f..e376be47a4a0 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "drm_edid.h" 27#include "drm_edid.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "drm_fb_helper.h"
29#include "radeon_drm.h" 30#include "radeon_drm.h"
30#include "radeon.h" 31#include "radeon.h"
31#include "atom.h" 32#include "atom.h"
@@ -245,7 +246,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
245 if (common_modes[i].w < 320 || common_modes[i].h < 200) 246 if (common_modes[i].w < 320 || common_modes[i].h < 200)
246 continue; 247 continue;
247 248
248 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false); 249 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
249 drm_mode_probed_add(connector, mode); 250 drm_mode_probed_add(connector, mode);
250 } 251 }
251} 252}
@@ -559,7 +560,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
559 radeon_add_common_modes(encoder, connector); 560 radeon_add_common_modes(encoder, connector);
560 else { 561 else {
561 /* only 800x600 is supported right now on pre-avivo chips */ 562 /* only 800x600 is supported right now on pre-avivo chips */
562 tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false); 563 tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
563 tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 564 tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
564 drm_mode_probed_add(connector, tv_mode); 565 drm_mode_probed_add(connector, tv_mode);
565 } 566 }
@@ -743,6 +744,15 @@ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
743 return NULL; 744 return NULL;
744} 745}
745 746
747static void radeon_dvi_force(struct drm_connector *connector)
748{
749 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
750 if (connector->force == DRM_FORCE_ON)
751 radeon_connector->use_digital = false;
752 if (connector->force == DRM_FORCE_ON_DIGITAL)
753 radeon_connector->use_digital = true;
754}
755
746struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { 756struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
747 .get_modes = radeon_dvi_get_modes, 757 .get_modes = radeon_dvi_get_modes,
748 .mode_valid = radeon_vga_mode_valid, 758 .mode_valid = radeon_vga_mode_valid,
@@ -755,6 +765,7 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
755 .fill_modes = drm_helper_probe_single_connector_modes, 765 .fill_modes = drm_helper_probe_single_connector_modes,
756 .set_property = radeon_connector_set_property, 766 .set_property = radeon_connector_set_property,
757 .destroy = radeon_connector_destroy, 767 .destroy = radeon_connector_destroy,
768 .force = radeon_dvi_force,
758}; 769};
759 770
760void 771void
@@ -771,6 +782,7 @@ radeon_add_atom_connector(struct drm_device *dev,
771 struct radeon_connector *radeon_connector; 782 struct radeon_connector *radeon_connector;
772 struct radeon_connector_atom_dig *radeon_dig_connector; 783 struct radeon_connector_atom_dig *radeon_dig_connector;
773 uint32_t subpixel_order = SubPixelNone; 784 uint32_t subpixel_order = SubPixelNone;
785 int ret;
774 786
775 /* fixme - tv/cv/din */ 787 /* fixme - tv/cv/din */
776 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 788 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -796,24 +808,30 @@ radeon_add_atom_connector(struct drm_device *dev,
796 switch (connector_type) { 808 switch (connector_type) {
797 case DRM_MODE_CONNECTOR_VGA: 809 case DRM_MODE_CONNECTOR_VGA:
798 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 810 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
799 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 811 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
812 if (ret)
813 goto failed;
800 if (i2c_bus->valid) { 814 if (i2c_bus->valid) {
801 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 815 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
802 if (!radeon_connector->ddc_bus) 816 if (!radeon_connector->ddc_bus)
803 goto failed; 817 goto failed;
804 } 818 }
819 radeon_connector->dac_load_detect = true;
805 drm_connector_attach_property(&radeon_connector->base, 820 drm_connector_attach_property(&radeon_connector->base,
806 rdev->mode_info.load_detect_property, 821 rdev->mode_info.load_detect_property,
807 1); 822 1);
808 break; 823 break;
809 case DRM_MODE_CONNECTOR_DVIA: 824 case DRM_MODE_CONNECTOR_DVIA:
810 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 825 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
811 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 826 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
827 if (ret)
828 goto failed;
812 if (i2c_bus->valid) { 829 if (i2c_bus->valid) {
813 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 830 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
814 if (!radeon_connector->ddc_bus) 831 if (!radeon_connector->ddc_bus)
815 goto failed; 832 goto failed;
816 } 833 }
834 radeon_connector->dac_load_detect = true;
817 drm_connector_attach_property(&radeon_connector->base, 835 drm_connector_attach_property(&radeon_connector->base,
818 rdev->mode_info.load_detect_property, 836 rdev->mode_info.load_detect_property,
819 1); 837 1);
@@ -827,7 +845,9 @@ radeon_add_atom_connector(struct drm_device *dev,
827 radeon_dig_connector->igp_lane_info = igp_lane_info; 845 radeon_dig_connector->igp_lane_info = igp_lane_info;
828 radeon_connector->con_priv = radeon_dig_connector; 846 radeon_connector->con_priv = radeon_dig_connector;
829 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 847 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
830 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 848 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
849 if (ret)
850 goto failed;
831 if (i2c_bus->valid) { 851 if (i2c_bus->valid) {
832 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 852 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
833 if (!radeon_connector->ddc_bus) 853 if (!radeon_connector->ddc_bus)
@@ -837,6 +857,7 @@ radeon_add_atom_connector(struct drm_device *dev,
837 drm_connector_attach_property(&radeon_connector->base, 857 drm_connector_attach_property(&radeon_connector->base,
838 rdev->mode_info.coherent_mode_property, 858 rdev->mode_info.coherent_mode_property,
839 1); 859 1);
860 radeon_connector->dac_load_detect = true;
840 drm_connector_attach_property(&radeon_connector->base, 861 drm_connector_attach_property(&radeon_connector->base,
841 rdev->mode_info.load_detect_property, 862 rdev->mode_info.load_detect_property,
842 1); 863 1);
@@ -850,7 +871,9 @@ radeon_add_atom_connector(struct drm_device *dev,
850 radeon_dig_connector->igp_lane_info = igp_lane_info; 871 radeon_dig_connector->igp_lane_info = igp_lane_info;
851 radeon_connector->con_priv = radeon_dig_connector; 872 radeon_connector->con_priv = radeon_dig_connector;
852 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 873 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
853 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 874 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
875 if (ret)
876 goto failed;
854 if (i2c_bus->valid) { 877 if (i2c_bus->valid) {
855 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); 878 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
856 if (!radeon_connector->ddc_bus) 879 if (!radeon_connector->ddc_bus)
@@ -869,7 +892,9 @@ radeon_add_atom_connector(struct drm_device *dev,
869 radeon_dig_connector->igp_lane_info = igp_lane_info; 892 radeon_dig_connector->igp_lane_info = igp_lane_info;
870 radeon_connector->con_priv = radeon_dig_connector; 893 radeon_connector->con_priv = radeon_dig_connector;
871 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 894 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
872 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 895 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
896 if (ret)
897 goto failed;
873 if (i2c_bus->valid) { 898 if (i2c_bus->valid) {
874 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 899 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
875 if (!radeon_connector->ddc_bus) 900 if (!radeon_connector->ddc_bus)
@@ -882,11 +907,14 @@ radeon_add_atom_connector(struct drm_device *dev,
882 case DRM_MODE_CONNECTOR_9PinDIN: 907 case DRM_MODE_CONNECTOR_9PinDIN:
883 if (radeon_tv == 1) { 908 if (radeon_tv == 1) {
884 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 909 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
885 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 910 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
911 if (ret)
912 goto failed;
913 radeon_connector->dac_load_detect = true;
914 drm_connector_attach_property(&radeon_connector->base,
915 rdev->mode_info.load_detect_property,
916 1);
886 } 917 }
887 drm_connector_attach_property(&radeon_connector->base,
888 rdev->mode_info.load_detect_property,
889 1);
890 break; 918 break;
891 case DRM_MODE_CONNECTOR_LVDS: 919 case DRM_MODE_CONNECTOR_LVDS:
892 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 920 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -896,7 +924,9 @@ radeon_add_atom_connector(struct drm_device *dev,
896 radeon_dig_connector->igp_lane_info = igp_lane_info; 924 radeon_dig_connector->igp_lane_info = igp_lane_info;
897 radeon_connector->con_priv = radeon_dig_connector; 925 radeon_connector->con_priv = radeon_dig_connector;
898 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 926 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
899 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 927 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
928 if (ret)
929 goto failed;
900 if (i2c_bus->valid) { 930 if (i2c_bus->valid) {
901 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 931 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
902 if (!radeon_connector->ddc_bus) 932 if (!radeon_connector->ddc_bus)
@@ -932,6 +962,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
932 struct drm_connector *connector; 962 struct drm_connector *connector;
933 struct radeon_connector *radeon_connector; 963 struct radeon_connector *radeon_connector;
934 uint32_t subpixel_order = SubPixelNone; 964 uint32_t subpixel_order = SubPixelNone;
965 int ret;
935 966
936 /* fixme - tv/cv/din */ 967 /* fixme - tv/cv/din */
937 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 968 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -957,24 +988,30 @@ radeon_add_legacy_connector(struct drm_device *dev,
957 switch (connector_type) { 988 switch (connector_type) {
958 case DRM_MODE_CONNECTOR_VGA: 989 case DRM_MODE_CONNECTOR_VGA:
959 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 990 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
960 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 991 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
992 if (ret)
993 goto failed;
961 if (i2c_bus->valid) { 994 if (i2c_bus->valid) {
962 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 995 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
963 if (!radeon_connector->ddc_bus) 996 if (!radeon_connector->ddc_bus)
964 goto failed; 997 goto failed;
965 } 998 }
999 radeon_connector->dac_load_detect = true;
966 drm_connector_attach_property(&radeon_connector->base, 1000 drm_connector_attach_property(&radeon_connector->base,
967 rdev->mode_info.load_detect_property, 1001 rdev->mode_info.load_detect_property,
968 1); 1002 1);
969 break; 1003 break;
970 case DRM_MODE_CONNECTOR_DVIA: 1004 case DRM_MODE_CONNECTOR_DVIA:
971 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1005 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
972 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1006 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1007 if (ret)
1008 goto failed;
973 if (i2c_bus->valid) { 1009 if (i2c_bus->valid) {
974 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1010 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
975 if (!radeon_connector->ddc_bus) 1011 if (!radeon_connector->ddc_bus)
976 goto failed; 1012 goto failed;
977 } 1013 }
1014 radeon_connector->dac_load_detect = true;
978 drm_connector_attach_property(&radeon_connector->base, 1015 drm_connector_attach_property(&radeon_connector->base,
979 rdev->mode_info.load_detect_property, 1016 rdev->mode_info.load_detect_property,
980 1); 1017 1);
@@ -982,11 +1019,14 @@ radeon_add_legacy_connector(struct drm_device *dev,
982 case DRM_MODE_CONNECTOR_DVII: 1019 case DRM_MODE_CONNECTOR_DVII:
983 case DRM_MODE_CONNECTOR_DVID: 1020 case DRM_MODE_CONNECTOR_DVID:
984 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1021 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
985 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1022 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1023 if (ret)
1024 goto failed;
986 if (i2c_bus->valid) { 1025 if (i2c_bus->valid) {
987 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1026 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
988 if (!radeon_connector->ddc_bus) 1027 if (!radeon_connector->ddc_bus)
989 goto failed; 1028 goto failed;
1029 radeon_connector->dac_load_detect = true;
990 drm_connector_attach_property(&radeon_connector->base, 1030 drm_connector_attach_property(&radeon_connector->base,
991 rdev->mode_info.load_detect_property, 1031 rdev->mode_info.load_detect_property,
992 1); 1032 1);
@@ -998,7 +1038,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
998 case DRM_MODE_CONNECTOR_9PinDIN: 1038 case DRM_MODE_CONNECTOR_9PinDIN:
999 if (radeon_tv == 1) { 1039 if (radeon_tv == 1) {
1000 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1040 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1001 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1041 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1042 if (ret)
1043 goto failed;
1044 radeon_connector->dac_load_detect = true;
1002 drm_connector_attach_property(&radeon_connector->base, 1045 drm_connector_attach_property(&radeon_connector->base,
1003 rdev->mode_info.load_detect_property, 1046 rdev->mode_info.load_detect_property,
1004 1); 1047 1);
@@ -1006,7 +1049,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
1006 break; 1049 break;
1007 case DRM_MODE_CONNECTOR_LVDS: 1050 case DRM_MODE_CONNECTOR_LVDS:
1008 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1051 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1009 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1052 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1053 if (ret)
1054 goto failed;
1010 if (i2c_bus->valid) { 1055 if (i2c_bus->valid) {
1011 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1056 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1012 if (!radeon_connector->ddc_bus) 1057 if (!radeon_connector->ddc_bus)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 12f5990c2d2a..5ab2cf96a264 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
142 } 142 }
143 143
144 p->chunks[i].length_dw = user_chunk.length_dw; 144 p->chunks[i].length_dw = user_chunk.length_dw;
145 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
146 146
147 size = p->chunks[i].length_dw * sizeof(uint32_t); 147 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 148 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
149 if (p->chunks[i].kdata == NULL) { 149 size = p->chunks[i].length_dw * sizeof(uint32_t);
150 return -ENOMEM; 150 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151 } 151 if (p->chunks[i].kdata == NULL) {
152 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { 152 return -ENOMEM;
153 return -EFAULT; 153 }
154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155 p->chunks[i].user_ptr, size)) {
156 return -EFAULT;
157 }
158 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
162 kfree(p->chunks[i].kpage[0]);
163 kfree(p->chunks[i].kpage[1]);
164 return -ENOMEM;
165 }
166 p->chunks[i].kpage_idx[0] = -1;
167 p->chunks[i].kpage_idx[1] = -1;
168 p->chunks[i].last_copied_page = -1;
169 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
154 } 170 }
155 } 171 }
156 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 172 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
@@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
190 kfree(parser->relocs_ptr); 206 kfree(parser->relocs_ptr);
191 for (i = 0; i < parser->nchunks; i++) { 207 for (i = 0; i < parser->nchunks; i++) {
192 kfree(parser->chunks[i].kdata); 208 kfree(parser->chunks[i].kdata);
209 kfree(parser->chunks[i].kpage[0]);
210 kfree(parser->chunks[i].kpage[1]);
193 } 211 }
194 kfree(parser->chunks); 212 kfree(parser->chunks);
195 kfree(parser->chunks_array); 213 kfree(parser->chunks_array);
@@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
238 * uncached). */ 256 * uncached). */
239 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 257 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
240 parser.ib->length_dw = ib_chunk->length_dw; 258 parser.ib->length_dw = ib_chunk->length_dw;
241 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
242 r = radeon_cs_parse(&parser); 259 r = radeon_cs_parse(&parser);
260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex);
264 return r;
265 }
266 r = radeon_cs_finish_pages(&parser);
243 if (r) { 267 if (r) {
244 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
245 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
@@ -254,3 +278,64 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
254 mutex_unlock(&rdev->cs_mutex); 278 mutex_unlock(&rdev->cs_mutex);
255 return r; 279 return r;
256} 280}
281
282int radeon_cs_finish_pages(struct radeon_cs_parser *p)
283{
284 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285 int i;
286 int size = PAGE_SIZE;
287
288 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289 if (i == ibc->last_page_index) {
290 size = (ibc->length_dw * 4) % PAGE_SIZE;
291 if (size == 0)
292 size = PAGE_SIZE;
293 }
294
295 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296 ibc->user_ptr + (i * PAGE_SIZE),
297 size))
298 return -EFAULT;
299 }
300 return 0;
301}
302
303int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304{
305 int new_page;
306 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307 int i;
308 int size = PAGE_SIZE;
309
310 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312 ibc->user_ptr + (i * PAGE_SIZE),
313 PAGE_SIZE)) {
314 p->parser_error = -EFAULT;
315 return 0;
316 }
317 }
318
319 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
320
321 if (pg_idx == ibc->last_page_index) {
322 size = (ibc->length_dw * 4) % PAGE_SIZE;
323 if (size == 0)
324 size = PAGE_SIZE;
325 }
326
327 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328 ibc->user_ptr + (pg_idx * PAGE_SIZE),
329 size)) {
330 p->parser_error = -EFAULT;
331 return 0;
332 }
333
334 /* copy to IB here */
335 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
336
337 ibc->last_copied_page = pg_idx;
338 ibc->kpage_idx[new_page] = pg_idx;
339
340 return new_page;
341}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index daf5db780956..ec835d56d30a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -532,10 +532,13 @@ int radeon_device_init(struct radeon_device *rdev,
532 532
533 if (radeon_agpmode == -1) { 533 if (radeon_agpmode == -1) {
534 rdev->flags &= ~RADEON_IS_AGP; 534 rdev->flags &= ~RADEON_IS_AGP;
535 if (rdev->family >= CHIP_RV515 || 535 if (rdev->family >= CHIP_R600) {
536 rdev->family == CHIP_RV380 || 536 DRM_INFO("Forcing AGP to PCIE mode\n");
537 rdev->family == CHIP_RV410 || 537 rdev->flags |= RADEON_IS_PCIE;
538 rdev->family == CHIP_R423) { 538 } else if (rdev->family >= CHIP_RV515 ||
539 rdev->family == CHIP_RV380 ||
540 rdev->family == CHIP_RV410 ||
541 rdev->family == CHIP_R423) {
539 DRM_INFO("Forcing AGP to PCIE mode\n"); 542 DRM_INFO("Forcing AGP to PCIE mode\n");
540 rdev->flags |= RADEON_IS_PCIE; 543 rdev->flags |= RADEON_IS_PCIE;
541 rdev->asic->gart_init = &rv370_pcie_gart_init; 544 rdev->asic->gart_init = &rv370_pcie_gart_init;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 50fce498910c..7f50fb864af8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -62,9 +62,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
62int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 62int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
63void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 63void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
64irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 64irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
65int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
66void radeon_master_destroy_kms(struct drm_device *dev,
67 struct drm_master *master);
68int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 65int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
69 struct drm_file *file_priv); 66 struct drm_file *file_priv);
70int radeon_gem_object_init(struct drm_gem_object *obj); 67int radeon_gem_object_init(struct drm_gem_object *obj);
@@ -260,8 +257,6 @@ static struct drm_driver kms_driver = {
260 .get_vblank_counter = radeon_get_vblank_counter_kms, 257 .get_vblank_counter = radeon_get_vblank_counter_kms,
261 .enable_vblank = radeon_enable_vblank_kms, 258 .enable_vblank = radeon_enable_vblank_kms,
262 .disable_vblank = radeon_disable_vblank_kms, 259 .disable_vblank = radeon_disable_vblank_kms,
263 .master_create = radeon_master_create_kms,
264 .master_destroy = radeon_master_destroy_kms,
265#if defined(CONFIG_DEBUG_FS) 260#if defined(CONFIG_DEBUG_FS)
266 .debugfs_init = radeon_debugfs_init, 261 .debugfs_init = radeon_debugfs_init,
267 .debugfs_cleanup = radeon_debugfs_cleanup, 262 .debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 944e4fa78db5..1ba704eedefb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -128,6 +128,7 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
128int radeonfb_create(struct drm_device *dev, 128int radeonfb_create(struct drm_device *dev,
129 uint32_t fb_width, uint32_t fb_height, 129 uint32_t fb_width, uint32_t fb_height,
130 uint32_t surface_width, uint32_t surface_height, 130 uint32_t surface_width, uint32_t surface_height,
131 uint32_t surface_depth, uint32_t surface_bpp,
131 struct drm_framebuffer **fb_p) 132 struct drm_framebuffer **fb_p)
132{ 133{
133 struct radeon_device *rdev = dev->dev_private; 134 struct radeon_device *rdev = dev->dev_private;
@@ -148,10 +149,10 @@ int radeonfb_create(struct drm_device *dev,
148 149
149 mode_cmd.width = surface_width; 150 mode_cmd.width = surface_width;
150 mode_cmd.height = surface_height; 151 mode_cmd.height = surface_height;
151 mode_cmd.bpp = 32; 152 mode_cmd.bpp = surface_bpp;
152 /* need to align pitch with crtc limits */ 153 /* need to align pitch with crtc limits */
153 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); 154 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
154 mode_cmd.depth = 24; 155 mode_cmd.depth = surface_depth;
155 156
156 size = mode_cmd.pitch * mode_cmd.height; 157 size = mode_cmd.pitch * mode_cmd.height;
157 aligned_size = ALIGN(size, PAGE_SIZE); 158 aligned_size = ALIGN(size, PAGE_SIZE);
@@ -290,13 +291,26 @@ out:
290 return ret; 291 return ret;
291} 292}
292 293
294static char *mode_option;
295int radeon_parse_options(char *options)
296{
297 char *this_opt;
298
299 if (!options || !*options)
300 return 0;
301
302 while ((this_opt = strsep(&options, ",")) != NULL) {
303 if (!*this_opt)
304 continue;
305 mode_option = this_opt;
306 }
307 return 0;
308}
309
293int radeonfb_probe(struct drm_device *dev) 310int radeonfb_probe(struct drm_device *dev)
294{ 311{
295 int ret; 312 return drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
296 ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
297 return ret;
298} 313}
299EXPORT_SYMBOL(radeonfb_probe);
300 314
301int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 315int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
302{ 316{
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 709bd892b3a9..ba128621057a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,55 +201,6 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
201 201
202 202
203/* 203/*
204 * For multiple master (like multiple X).
205 */
206struct drm_radeon_master_private {
207 drm_local_map_t *sarea;
208 drm_radeon_sarea_t *sarea_priv;
209};
210
211int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
212{
213 struct drm_radeon_master_private *master_priv;
214 unsigned long sareapage;
215 int ret;
216
217 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
218 if (master_priv == NULL) {
219 return -ENOMEM;
220 }
221 /* prebuild the SAREA */
222 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
223 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
224 _DRM_CONTAINS_LOCK,
225 &master_priv->sarea);
226 if (ret) {
227 DRM_ERROR("SAREA setup failed\n");
228 return ret;
229 }
230 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
231 master_priv->sarea_priv->pfCurrentPage = 0;
232 master->driver_priv = master_priv;
233 return 0;
234}
235
236void radeon_master_destroy_kms(struct drm_device *dev,
237 struct drm_master *master)
238{
239 struct drm_radeon_master_private *master_priv = master->driver_priv;
240
241 if (master_priv == NULL) {
242 return;
243 }
244 if (master_priv->sarea) {
245 drm_rmmap_locked(dev, master_priv->sarea);
246 }
247 kfree(master_priv);
248 master->driver_priv = NULL;
249}
250
251
252/*
253 * IOCTL. 204 * IOCTL.
254 */ 205 */
255int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 206int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 21da871a793c..bfa1ab9c93e1 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3333,6 +3333,7 @@
3333# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) 3333# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
3334# define RADEON_CP_PACKET0_REG_MASK 0x000007ff 3334# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
3335# define R300_CP_PACKET0_REG_MASK 0x00001fff 3335# define R300_CP_PACKET0_REG_MASK 0x00001fff
3336# define R600_CP_PACKET0_REG_MASK 0x0000ffff
3336# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff 3337# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
3337# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 3338# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
3338 3339
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index acd889c94549..765bd184b6fc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -530,7 +530,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
530} 530}
531 531
532static struct vm_operations_struct radeon_ttm_vm_ops; 532static struct vm_operations_struct radeon_ttm_vm_ops;
533static struct vm_operations_struct *ttm_vm_ops = NULL; 533static const struct vm_operations_struct *ttm_vm_ops = NULL;
534 534
535static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 535static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
536{ 536{
@@ -689,9 +689,6 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
689 689
690#define RADEON_DEBUGFS_MEM_TYPES 2 690#define RADEON_DEBUGFS_MEM_TYPES 2
691 691
692static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
693static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
694
695#if defined(CONFIG_DEBUG_FS) 692#if defined(CONFIG_DEBUG_FS)
696static int radeon_mm_dump_table(struct seq_file *m, void *data) 693static int radeon_mm_dump_table(struct seq_file *m, void *data)
697{ 694{
@@ -711,9 +708,11 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
711 708
712static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 709static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
713{ 710{
711#if defined(CONFIG_DEBUG_FS)
712 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
713 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
714 unsigned i; 714 unsigned i;
715 715
716#if defined(CONFIG_DEBUG_FS)
717 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 716 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
718 if (i == 0) 717 if (i == 0)
719 sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); 718 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 0e791e26def3..4a4fe1cb131c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -28,7 +28,6 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "avivod.h"
32 31
33#include "rs600_reg_safe.h" 32#include "rs600_reg_safe.h"
34 33
@@ -45,7 +44,6 @@ void r420_pipes_init(struct radeon_device *rdev);
45 */ 44 */
46void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48void rs600_disable_vga(struct radeon_device *rdev);
49 47
50 48
51/* 49/*
@@ -198,7 +196,7 @@ void rs600_mc_disable_clients(struct radeon_device *rdev)
198 "programming pipes. Bad things might happen.\n"); 196 "programming pipes. Bad things might happen.\n");
199 } 197 }
200 198
201 radeon_avivo_vga_render_disable(rdev); 199 rv515_vga_render_disable(rdev);
202 200
203 tmp = RREG32(AVIVO_D1VGA_CONTROL); 201 tmp = RREG32(AVIVO_D1VGA_CONTROL);
204 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); 202 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
@@ -346,20 +344,6 @@ u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
346/* 344/*
347 * Global GPU functions 345 * Global GPU functions
348 */ 346 */
349void rs600_disable_vga(struct radeon_device *rdev)
350{
351 unsigned tmp;
352
353 WREG32(0x330, 0);
354 WREG32(0x338, 0);
355 tmp = RREG32(0x300);
356 tmp &= ~(3 << 16);
357 WREG32(0x300, tmp);
358 WREG32(0x308, (1 << 8));
359 WREG32(0x310, rdev->mc.vram_location);
360 WREG32(0x594, 0);
361}
362
363int rs600_mc_wait_for_idle(struct radeon_device *rdev) 347int rs600_mc_wait_for_idle(struct radeon_device *rdev)
364{ 348{
365 unsigned i; 349 unsigned i;
@@ -385,7 +369,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
385{ 369{
386 /* FIXME: HDP same place on rs600 ? */ 370 /* FIXME: HDP same place on rs600 ? */
387 r100_hdp_reset(rdev); 371 r100_hdp_reset(rdev);
388 rs600_disable_vga(rdev); 372 rv515_vga_render_disable(rdev);
389 /* FIXME: is this correct ? */ 373 /* FIXME: is this correct ? */
390 r420_pipes_init(rdev); 374 r420_pipes_init(rdev);
391 if (rs600_mc_wait_for_idle(rdev)) { 375 if (rs600_mc_wait_for_idle(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0f585ca8276d..7a0098ddf977 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -40,7 +40,6 @@ void rs400_gart_disable(struct radeon_device *rdev);
40int rs400_gart_enable(struct radeon_device *rdev); 40int rs400_gart_enable(struct radeon_device *rdev);
41void rs400_gart_adjust_size(struct radeon_device *rdev); 41void rs400_gart_adjust_size(struct radeon_device *rdev);
42void rs600_mc_disable_clients(struct radeon_device *rdev); 42void rs600_mc_disable_clients(struct radeon_device *rdev);
43void rs600_disable_vga(struct radeon_device *rdev);
44 43
45/* This files gather functions specifics to : 44/* This files gather functions specifics to :
46 * rs690,rs740 45 * rs690,rs740
@@ -125,7 +124,7 @@ void rs690_gpu_init(struct radeon_device *rdev)
125{ 124{
126 /* FIXME: HDP same place on rs690 ? */ 125 /* FIXME: HDP same place on rs690 ? */
127 r100_hdp_reset(rdev); 126 r100_hdp_reset(rdev);
128 rs600_disable_vga(rdev); 127 rv515_vga_render_disable(rdev);
129 /* FIXME: is this correct ? */ 128 /* FIXME: is this correct ? */
130 r420_pipes_init(rdev); 129 r420_pipes_init(rdev);
131 if (rs690_mc_wait_for_idle(rdev)) { 130 if (rs690_mc_wait_for_idle(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index fd799748e7d8..e53b5ca7a253 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -29,37 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "rv515d.h" 30#include "rv515d.h"
31#include "radeon.h" 31#include "radeon.h"
32 32#include "atom.h"
33#include "rv515_reg_safe.h" 33#include "rv515_reg_safe.h"
34/* rv515 depends on : */ 34
35void r100_hdp_reset(struct radeon_device *rdev); 35/* This files gather functions specifics to: rv515 */
36int r100_cp_reset(struct radeon_device *rdev);
37int r100_rb2d_reset(struct radeon_device *rdev);
38int r100_gui_wait_for_idle(struct radeon_device *rdev);
39int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40void r420_pipes_init(struct radeon_device *rdev);
41void rs600_mc_disable_clients(struct radeon_device *rdev);
42void rs600_disable_vga(struct radeon_device *rdev);
43
44/* This files gather functions specifics to:
45 * rv515
46 *
47 * Some of these functions might be used by newer ASICs.
48 */
49int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); 36int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
50int rv515_debugfs_ga_info_init(struct radeon_device *rdev); 37int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
51void rv515_gpu_init(struct radeon_device *rdev); 38void rv515_gpu_init(struct radeon_device *rdev);
52int rv515_mc_wait_for_idle(struct radeon_device *rdev); 39int rv515_mc_wait_for_idle(struct radeon_device *rdev);
53 40
54 41void rv515_debugfs(struct radeon_device *rdev)
55/*
56 * MC
57 */
58int rv515_mc_init(struct radeon_device *rdev)
59{ 42{
60 uint32_t tmp;
61 int r;
62
63 if (r100_debugfs_rbbm_init(rdev)) { 43 if (r100_debugfs_rbbm_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 44 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
65 } 45 }
@@ -69,67 +49,8 @@ int rv515_mc_init(struct radeon_device *rdev)
69 if (rv515_debugfs_ga_info_init(rdev)) { 49 if (rv515_debugfs_ga_info_init(rdev)) {
70 DRM_ERROR("Failed to register debugfs file for pipes !\n"); 50 DRM_ERROR("Failed to register debugfs file for pipes !\n");
71 } 51 }
72
73 rv515_gpu_init(rdev);
74 rv370_pcie_gart_disable(rdev);
75
76 /* Setup GPU memory space */
77 rdev->mc.vram_location = 0xFFFFFFFFUL;
78 rdev->mc.gtt_location = 0xFFFFFFFFUL;
79 if (rdev->flags & RADEON_IS_AGP) {
80 r = radeon_agp_init(rdev);
81 if (r) {
82 printk(KERN_WARNING "[drm] Disabling AGP\n");
83 rdev->flags &= ~RADEON_IS_AGP;
84 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
85 } else {
86 rdev->mc.gtt_location = rdev->mc.agp_base;
87 }
88 }
89 r = radeon_mc_setup(rdev);
90 if (r) {
91 return r;
92 }
93
94 /* Program GPU memory space */
95 rs600_mc_disable_clients(rdev);
96 if (rv515_mc_wait_for_idle(rdev)) {
97 printk(KERN_WARNING "Failed to wait MC idle while "
98 "programming pipes. Bad things might happen.\n");
99 }
100 /* Write VRAM size in case we are limiting it */
101 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
102 tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
103 WREG32(0x134, tmp);
104 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
105 tmp = REG_SET(MC_FB_TOP, tmp >> 16);
106 tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
107 WREG32_MC(MC_FB_LOCATION, tmp);
108 WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
109 WREG32(0x310, rdev->mc.vram_location);
110 if (rdev->flags & RADEON_IS_AGP) {
111 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
112 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
113 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
114 WREG32_MC(MC_AGP_LOCATION, tmp);
115 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
116 WREG32_MC(MC_AGP_BASE_2, 0);
117 } else {
118 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
119 WREG32_MC(MC_AGP_BASE, 0);
120 WREG32_MC(MC_AGP_BASE_2, 0);
121 }
122 return 0;
123}
124
125void rv515_mc_fini(struct radeon_device *rdev)
126{
127} 52}
128 53
129
130/*
131 * Global GPU functions
132 */
133void rv515_ring_start(struct radeon_device *rdev) 54void rv515_ring_start(struct radeon_device *rdev)
134{ 55{
135 int r; 56 int r;
@@ -198,11 +119,6 @@ void rv515_ring_start(struct radeon_device *rdev)
198 radeon_ring_unlock_commit(rdev); 119 radeon_ring_unlock_commit(rdev);
199} 120}
200 121
201void rv515_errata(struct radeon_device *rdev)
202{
203 rdev->pll_errata = 0;
204}
205
206int rv515_mc_wait_for_idle(struct radeon_device *rdev) 122int rv515_mc_wait_for_idle(struct radeon_device *rdev)
207{ 123{
208 unsigned i; 124 unsigned i;
@@ -219,6 +135,12 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
219 return -1; 135 return -1;
220} 136}
221 137
138void rv515_vga_render_disable(struct radeon_device *rdev)
139{
140 WREG32(R_000300_VGA_RENDER_CONTROL,
141 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
142}
143
222void rv515_gpu_init(struct radeon_device *rdev) 144void rv515_gpu_init(struct radeon_device *rdev)
223{ 145{
224 unsigned pipe_select_current, gb_pipe_select, tmp; 146 unsigned pipe_select_current, gb_pipe_select, tmp;
@@ -231,7 +153,7 @@ void rv515_gpu_init(struct radeon_device *rdev)
231 "reseting GPU. Bad things might happen.\n"); 153 "reseting GPU. Bad things might happen.\n");
232 } 154 }
233 155
234 rs600_disable_vga(rdev); 156 rv515_vga_render_disable(rdev);
235 157
236 r420_pipes_init(rdev); 158 r420_pipes_init(rdev);
237 gb_pipe_select = RREG32(0x402C); 159 gb_pipe_select = RREG32(0x402C);
@@ -335,10 +257,6 @@ int rv515_gpu_reset(struct radeon_device *rdev)
335 return 0; 257 return 0;
336} 258}
337 259
338
339/*
340 * VRAM info
341 */
342static void rv515_vram_get_type(struct radeon_device *rdev) 260static void rv515_vram_get_type(struct radeon_device *rdev)
343{ 261{
344 uint32_t tmp; 262 uint32_t tmp;
@@ -374,10 +292,6 @@ void rv515_vram_info(struct radeon_device *rdev)
374 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 292 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
375} 293}
376 294
377
378/*
379 * Indirect registers accessor
380 */
381uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 295uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
382{ 296{
383 uint32_t r; 297 uint32_t r;
@@ -395,9 +309,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
395 WREG32(MC_IND_INDEX, 0); 309 WREG32(MC_IND_INDEX, 0);
396} 310}
397 311
398/*
399 * Debugfs info
400 */
401#if defined(CONFIG_DEBUG_FS) 312#if defined(CONFIG_DEBUG_FS)
402static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) 313static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
403{ 314{
@@ -459,13 +370,258 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
459#endif 370#endif
460} 371}
461 372
462/* 373void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
463 * Asic initialization 374{
464 */ 375 save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
465int rv515_init(struct radeon_device *rdev) 376 save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
377 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
378 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
379 save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
380 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
381
382 /* Stop all video */
383 WREG32(R_000330_D1VGA_CONTROL, 0);
384 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
385 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
386 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
387 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
388 WREG32(R_006080_D1CRTC_CONTROL, 0);
389 WREG32(R_006880_D2CRTC_CONTROL, 0);
390 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
391 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
392}
393
394void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
395{
396 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
397 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
398 WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
399 WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
400 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
401 /* Unlock host access */
402 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
403 mdelay(1);
404 /* Restore video state */
405 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
406 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
407 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
408 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
409 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
410 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
411 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
412 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
413 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
414}
415
416void rv515_mc_program(struct radeon_device *rdev)
417{
418 struct rv515_mc_save save;
419
420 /* Stops all mc clients */
421 rv515_mc_stop(rdev, &save);
422
423 /* Wait for mc idle */
424 if (rv515_mc_wait_for_idle(rdev))
425 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
426 /* Write VRAM size in case we are limiting it */
427 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
428 /* Program MC, should be a 32bits limited address space */
429 WREG32_MC(R_000001_MC_FB_LOCATION,
430 S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
431 S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
432 WREG32(R_000134_HDP_FB_LOCATION,
433 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
434 if (rdev->flags & RADEON_IS_AGP) {
435 WREG32_MC(R_000002_MC_AGP_LOCATION,
436 S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
437 S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
438 WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
439 WREG32_MC(R_000004_MC_AGP_BASE_2,
440 S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
441 } else {
442 WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
443 WREG32_MC(R_000003_MC_AGP_BASE, 0);
444 WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
445 }
446
447 rv515_mc_resume(rdev, &save);
448}
449
450void rv515_clock_startup(struct radeon_device *rdev)
451{
452 if (radeon_dynclks != -1 && radeon_dynclks)
453 radeon_atom_set_clock_gating(rdev, 1);
454 /* We need to force on some of the block */
455 WREG32_PLL(R_00000F_CP_DYN_CNTL,
456 RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
457 WREG32_PLL(R_000011_E2_DYN_CNTL,
458 RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
459 WREG32_PLL(R_000013_IDCT_DYN_CNTL,
460 RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
461}
462
463static int rv515_startup(struct radeon_device *rdev)
464{
465 int r;
466
467 rv515_mc_program(rdev);
468 /* Resume clock */
469 rv515_clock_startup(rdev);
470 /* Initialize GPU configuration (# pipes, ...) */
471 rv515_gpu_init(rdev);
472 /* Initialize GART (initialize after TTM so we can allocate
473 * memory through TTM but finalize after TTM) */
474 if (rdev->flags & RADEON_IS_PCIE) {
475 r = rv370_pcie_gart_enable(rdev);
476 if (r)
477 return r;
478 }
479 /* Enable IRQ */
480 rdev->irq.sw_int = true;
481 r100_irq_set(rdev);
482 /* 1M ring buffer */
483 r = r100_cp_init(rdev, 1024 * 1024);
484 if (r) {
485 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
486 return r;
487 }
488 r = r100_wb_init(rdev);
489 if (r)
490 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
491 r = r100_ib_init(rdev);
492 if (r) {
493 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
494 return r;
495 }
496 return 0;
497}
498
499int rv515_resume(struct radeon_device *rdev)
500{
501 /* Make sur GART are not working */
502 if (rdev->flags & RADEON_IS_PCIE)
503 rv370_pcie_gart_disable(rdev);
504 /* Resume clock before doing reset */
505 rv515_clock_startup(rdev);
506 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
507 if (radeon_gpu_reset(rdev)) {
508 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
509 RREG32(R_000E40_RBBM_STATUS),
510 RREG32(R_0007C0_CP_STAT));
511 }
512 /* post */
513 atom_asic_init(rdev->mode_info.atom_context);
514 /* Resume clock after posting */
515 rv515_clock_startup(rdev);
516 return rv515_startup(rdev);
517}
518
519int rv515_suspend(struct radeon_device *rdev)
520{
521 r100_cp_disable(rdev);
522 r100_wb_disable(rdev);
523 r100_irq_disable(rdev);
524 if (rdev->flags & RADEON_IS_PCIE)
525 rv370_pcie_gart_disable(rdev);
526 return 0;
527}
528
529void rv515_set_safe_registers(struct radeon_device *rdev)
466{ 530{
467 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; 531 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
468 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); 532 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
533}
534
535void rv515_fini(struct radeon_device *rdev)
536{
537 rv515_suspend(rdev);
538 r100_cp_fini(rdev);
539 r100_wb_fini(rdev);
540 r100_ib_fini(rdev);
541 radeon_gem_fini(rdev);
542 rv370_pcie_gart_fini(rdev);
543 radeon_agp_fini(rdev);
544 radeon_irq_kms_fini(rdev);
545 radeon_fence_driver_fini(rdev);
546 radeon_object_fini(rdev);
547 radeon_atombios_fini(rdev);
548 kfree(rdev->bios);
549 rdev->bios = NULL;
550}
551
552int rv515_init(struct radeon_device *rdev)
553{
554 int r;
555
556 rdev->new_init_path = true;
557 /* Initialize scratch registers */
558 radeon_scratch_init(rdev);
559 /* Initialize surface registers */
560 radeon_surface_init(rdev);
561 /* TODO: disable VGA need to use VGA request */
562 /* BIOS*/
563 if (!radeon_get_bios(rdev)) {
564 if (ASIC_IS_AVIVO(rdev))
565 return -EINVAL;
566 }
567 if (rdev->is_atom_bios) {
568 r = radeon_atombios_init(rdev);
569 if (r)
570 return r;
571 } else {
572 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
573 return -EINVAL;
574 }
575 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
576 if (radeon_gpu_reset(rdev)) {
577 dev_warn(rdev->dev,
578 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
579 RREG32(R_000E40_RBBM_STATUS),
580 RREG32(R_0007C0_CP_STAT));
581 }
582 /* check if cards are posted or not */
583 if (!radeon_card_posted(rdev) && rdev->bios) {
584 DRM_INFO("GPU not posted. posting now...\n");
585 atom_asic_init(rdev->mode_info.atom_context);
586 }
587 /* Initialize clocks */
588 radeon_get_clock_info(rdev->ddev);
589 /* Get vram informations */
590 rv515_vram_info(rdev);
591 /* Initialize memory controller (also test AGP) */
592 r = r420_mc_init(rdev);
593 if (r)
594 return r;
595 rv515_debugfs(rdev);
596 /* Fence driver */
597 r = radeon_fence_driver_init(rdev);
598 if (r)
599 return r;
600 r = radeon_irq_kms_init(rdev);
601 if (r)
602 return r;
603 /* Memory manager */
604 r = radeon_object_init(rdev);
605 if (r)
606 return r;
607 r = rv370_pcie_gart_init(rdev);
608 if (r)
609 return r;
610 rv515_set_safe_registers(rdev);
611 rdev->accel_working = true;
612 r = rv515_startup(rdev);
613 if (r) {
614 /* Somethings want wront with the accel init stop accel */
615 dev_err(rdev->dev, "Disabling GPU acceleration\n");
616 rv515_suspend(rdev);
617 r100_cp_fini(rdev);
618 r100_wb_fini(rdev);
619 r100_ib_fini(rdev);
620 rv370_pcie_gart_fini(rdev);
621 radeon_agp_fini(rdev);
622 radeon_irq_kms_fini(rdev);
623 rdev->accel_working = false;
624 }
469 return 0; 625 return 0;
470} 626}
471 627
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index a65e17ec1c08..fc216e49384d 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -216,5 +216,388 @@
216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) 216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218 218
219#endif 219/* Registers */
220#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
221#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
222#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
223#define C_0000F8_CONFIG_MEMSIZE 0x00000000
224#define R_000134_HDP_FB_LOCATION 0x000134
225#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
226#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
227#define C_000134_HDP_FB_START 0xFFFF0000
228#define R_000300_VGA_RENDER_CONTROL 0x000300
229#define S_000300_VGA_BLINK_RATE(x) (((x) & 0x1F) << 0)
230#define G_000300_VGA_BLINK_RATE(x) (((x) >> 0) & 0x1F)
231#define C_000300_VGA_BLINK_RATE 0xFFFFFFE0
232#define S_000300_VGA_BLINK_MODE(x) (((x) & 0x3) << 5)
233#define G_000300_VGA_BLINK_MODE(x) (((x) >> 5) & 0x3)
234#define C_000300_VGA_BLINK_MODE 0xFFFFFF9F
235#define S_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) & 0x1) << 7)
236#define G_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) >> 7) & 0x1)
237#define C_000300_VGA_CURSOR_BLINK_INVERT 0xFFFFFF7F
238#define S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) & 0x1) << 8)
239#define G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) >> 8) & 0x1)
240#define C_000300_VGA_EXTD_ADDR_COUNT_ENABLE 0xFFFFFEFF
241#define S_000300_VGA_VSTATUS_CNTL(x) (((x) & 0x3) << 16)
242#define G_000300_VGA_VSTATUS_CNTL(x) (((x) >> 16) & 0x3)
243#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
244#define S_000300_VGA_LOCK_8DOT(x) (((x) & 0x1) << 24)
245#define G_000300_VGA_LOCK_8DOT(x) (((x) >> 24) & 0x1)
246#define C_000300_VGA_LOCK_8DOT 0xFEFFFFFF
247#define S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
248#define G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
249#define C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL 0xFDFFFFFF
250#define R_000310_VGA_MEMORY_BASE_ADDRESS 0x000310
251#define S_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
252#define G_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
253#define C_000310_VGA_MEMORY_BASE_ADDRESS 0x00000000
254#define R_000328_VGA_HDP_CONTROL 0x000328
255#define S_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) & 0x1) << 0)
256#define G_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) >> 0) & 0x1)
257#define C_000328_VGA_MEM_PAGE_SELECT_EN 0xFFFFFFFE
258#define S_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) & 0x1) << 8)
259#define G_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) >> 8) & 0x1)
260#define C_000328_VGA_RBBM_LOCK_DISABLE 0xFFFFFEFF
261#define S_000328_VGA_SOFT_RESET(x) (((x) & 0x1) << 16)
262#define G_000328_VGA_SOFT_RESET(x) (((x) >> 16) & 0x1)
263#define C_000328_VGA_SOFT_RESET 0xFFFEFFFF
264#define S_000328_VGA_TEST_RESET_CONTROL(x) (((x) & 0x1) << 24)
265#define G_000328_VGA_TEST_RESET_CONTROL(x) (((x) >> 24) & 0x1)
266#define C_000328_VGA_TEST_RESET_CONTROL 0xFEFFFFFF
267#define R_000330_D1VGA_CONTROL 0x000330
268#define S_000330_D1VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
269#define G_000330_D1VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
270#define C_000330_D1VGA_MODE_ENABLE 0xFFFFFFFE
271#define S_000330_D1VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
272#define G_000330_D1VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
273#define C_000330_D1VGA_TIMING_SELECT 0xFFFFFEFF
274#define S_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
275#define G_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
276#define C_000330_D1VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
277#define S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
278#define G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
279#define C_000330_D1VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
280#define S_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
281#define G_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
282#define C_000330_D1VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
283#define S_000330_D1VGA_ROTATE(x) (((x) & 0x3) << 24)
284#define G_000330_D1VGA_ROTATE(x) (((x) >> 24) & 0x3)
285#define C_000330_D1VGA_ROTATE 0xFCFFFFFF
286#define R_000338_D2VGA_CONTROL 0x000338
287#define S_000338_D2VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
288#define G_000338_D2VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
289#define C_000338_D2VGA_MODE_ENABLE 0xFFFFFFFE
290#define S_000338_D2VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
291#define G_000338_D2VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
292#define C_000338_D2VGA_TIMING_SELECT 0xFFFFFEFF
293#define S_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
294#define G_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
295#define C_000338_D2VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
296#define S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
297#define G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
298#define C_000338_D2VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
299#define S_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
300#define G_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
301#define C_000338_D2VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
302#define S_000338_D2VGA_ROTATE(x) (((x) & 0x3) << 24)
303#define G_000338_D2VGA_ROTATE(x) (((x) >> 24) & 0x3)
304#define C_000338_D2VGA_ROTATE 0xFCFFFFFF
305#define R_0007C0_CP_STAT 0x0007C0
306#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
307#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
308#define C_0007C0_MRU_BUSY 0xFFFFFFFE
309#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
310#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
311#define C_0007C0_MWU_BUSY 0xFFFFFFFD
312#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
313#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
314#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
315#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
316#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
317#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
318#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
319#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
320#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
321#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
322#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
323#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
324#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
325#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
326#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
327#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
328#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
329#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
330#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
331#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
332#define C_0007C0_CSI_BUSY 0xFFFFDFFF
333#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
334#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
335#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
336#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
337#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
338#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
339#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
340#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
341#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
342#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
343#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
344#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
345#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
346#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
347#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
348#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
349#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
350#define C_0007C0_CP_BUSY 0x7FFFFFFF
351#define R_000E40_RBBM_STATUS 0x000E40
352#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
353#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
354#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
355#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
356#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
357#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
358#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
359#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
360#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
361#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
362#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
363#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
364#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
365#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
366#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
367#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
368#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
369#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
370#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
371#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
372#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
373#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
374#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
375#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
376#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
377#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
378#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
379#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
380#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
381#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
382#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
383#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
384#define C_000E40_E2_BUSY 0xFFFDFFFF
385#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
386#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
387#define C_000E40_RB2D_BUSY 0xFFFBFFFF
388#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
389#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
390#define C_000E40_RB3D_BUSY 0xFFF7FFFF
391#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
392#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
393#define C_000E40_VAP_BUSY 0xFFEFFFFF
394#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
395#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
396#define C_000E40_RE_BUSY 0xFFDFFFFF
397#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
398#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
399#define C_000E40_TAM_BUSY 0xFFBFFFFF
400#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
401#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
402#define C_000E40_TDM_BUSY 0xFF7FFFFF
403#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
404#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
405#define C_000E40_PB_BUSY 0xFEFFFFFF
406#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
407#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
408#define C_000E40_TIM_BUSY 0xFDFFFFFF
409#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
410#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
411#define C_000E40_GA_BUSY 0xFBFFFFFF
412#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
413#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
414#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
415#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
416#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
417#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
418#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
419#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
420#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
421#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
422#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
423#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
424#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
425#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
426#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
427#define R_006080_D1CRTC_CONTROL 0x006080
428#define S_006080_D1CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
429#define G_006080_D1CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
430#define C_006080_D1CRTC_MASTER_EN 0xFFFFFFFE
431#define S_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
432#define G_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
433#define C_006080_D1CRTC_SYNC_RESET_SEL 0xFFFFFFEF
434#define S_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
435#define G_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
436#define C_006080_D1CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
437#define S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
438#define G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
439#define C_006080_D1CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
440#define S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
441#define G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
442#define C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
443#define R_0060E8_D1CRTC_UPDATE_LOCK 0x0060E8
444#define S_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
445#define G_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
446#define C_0060E8_D1CRTC_UPDATE_LOCK 0xFFFFFFFE
447#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x006110
448#define S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
449#define G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
450#define C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
451#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x006118
452#define S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
453#define G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
454#define C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
455#define R_006880_D2CRTC_CONTROL 0x006880
456#define S_006880_D2CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
457#define G_006880_D2CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
458#define C_006880_D2CRTC_MASTER_EN 0xFFFFFFFE
459#define S_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
460#define G_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
461#define C_006880_D2CRTC_SYNC_RESET_SEL 0xFFFFFFEF
462#define S_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
463#define G_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
464#define C_006880_D2CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
465#define S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
466#define G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
467#define C_006880_D2CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
468#define S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
469#define G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
470#define C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
471#define R_0068E8_D2CRTC_UPDATE_LOCK 0x0068E8
472#define S_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
473#define G_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
474#define C_0068E8_D2CRTC_UPDATE_LOCK 0xFFFFFFFE
475#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x006910
476#define S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
477#define G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
478#define C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
479#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x006918
480#define S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
481#define G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
482#define C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
483
484
485#define R_000001_MC_FB_LOCATION 0x000001
486#define S_000001_MC_FB_START(x) (((x) & 0xFFFF) << 0)
487#define G_000001_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
488#define C_000001_MC_FB_START 0xFFFF0000
489#define S_000001_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
490#define G_000001_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
491#define C_000001_MC_FB_TOP 0x0000FFFF
492#define R_000002_MC_AGP_LOCATION 0x000002
493#define S_000002_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
494#define G_000002_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
495#define C_000002_MC_AGP_START 0xFFFF0000
496#define S_000002_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
497#define G_000002_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
498#define C_000002_MC_AGP_TOP 0x0000FFFF
499#define R_000003_MC_AGP_BASE 0x000003
500#define S_000003_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
501#define G_000003_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
502#define C_000003_AGP_BASE_ADDR 0x00000000
503#define R_000004_MC_AGP_BASE_2 0x000004
504#define S_000004_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
505#define G_000004_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
506#define C_000004_AGP_BASE_ADDR_2 0xFFFFFFF0
220 507
508
509#define R_00000F_CP_DYN_CNTL 0x00000F
510#define S_00000F_CP_FORCEON(x) (((x) & 0x1) << 0)
511#define G_00000F_CP_FORCEON(x) (((x) >> 0) & 0x1)
512#define C_00000F_CP_FORCEON 0xFFFFFFFE
513#define S_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
514#define G_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
515#define C_00000F_CP_MAX_DYN_STOP_LAT 0xFFFFFFFD
516#define S_00000F_CP_CLOCK_STATUS(x) (((x) & 0x1) << 2)
517#define G_00000F_CP_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
518#define C_00000F_CP_CLOCK_STATUS 0xFFFFFFFB
519#define S_00000F_CP_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
520#define G_00000F_CP_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
521#define C_00000F_CP_PROG_SHUTOFF 0xFFFFFFF7
522#define S_00000F_CP_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
523#define G_00000F_CP_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
524#define C_00000F_CP_PROG_DELAY_VALUE 0xFFFFF00F
525#define S_00000F_CP_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
526#define G_00000F_CP_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
527#define C_00000F_CP_LOWER_POWER_IDLE 0xFFF00FFF
528#define S_00000F_CP_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
529#define G_00000F_CP_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
530#define C_00000F_CP_LOWER_POWER_IGNORE 0xFFEFFFFF
531#define S_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
532#define G_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
533#define C_00000F_CP_NORMAL_POWER_IGNORE 0xFFDFFFFF
534#define S_00000F_SPARE(x) (((x) & 0x3) << 22)
535#define G_00000F_SPARE(x) (((x) >> 22) & 0x3)
536#define C_00000F_SPARE 0xFF3FFFFF
537#define S_00000F_CP_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
538#define G_00000F_CP_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
539#define C_00000F_CP_NORMAL_POWER_BUSY 0x00FFFFFF
540#define R_000011_E2_DYN_CNTL 0x000011
541#define S_000011_E2_FORCEON(x) (((x) & 0x1) << 0)
542#define G_000011_E2_FORCEON(x) (((x) >> 0) & 0x1)
543#define C_000011_E2_FORCEON 0xFFFFFFFE
544#define S_000011_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
545#define G_000011_E2_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
546#define C_000011_E2_MAX_DYN_STOP_LAT 0xFFFFFFFD
547#define S_000011_E2_CLOCK_STATUS(x) (((x) & 0x1) << 2)
548#define G_000011_E2_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
549#define C_000011_E2_CLOCK_STATUS 0xFFFFFFFB
550#define S_000011_E2_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
551#define G_000011_E2_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
552#define C_000011_E2_PROG_SHUTOFF 0xFFFFFFF7
553#define S_000011_E2_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
554#define G_000011_E2_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
555#define C_000011_E2_PROG_DELAY_VALUE 0xFFFFF00F
556#define S_000011_E2_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
557#define G_000011_E2_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
558#define C_000011_E2_LOWER_POWER_IDLE 0xFFF00FFF
559#define S_000011_E2_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
560#define G_000011_E2_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
561#define C_000011_E2_LOWER_POWER_IGNORE 0xFFEFFFFF
562#define S_000011_E2_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
563#define G_000011_E2_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
564#define C_000011_E2_NORMAL_POWER_IGNORE 0xFFDFFFFF
565#define S_000011_SPARE(x) (((x) & 0x3) << 22)
566#define G_000011_SPARE(x) (((x) >> 22) & 0x3)
567#define C_000011_SPARE 0xFF3FFFFF
568#define S_000011_E2_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
569#define G_000011_E2_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
570#define C_000011_E2_NORMAL_POWER_BUSY 0x00FFFFFF
571#define R_000013_IDCT_DYN_CNTL 0x000013
572#define S_000013_IDCT_FORCEON(x) (((x) & 0x1) << 0)
573#define G_000013_IDCT_FORCEON(x) (((x) >> 0) & 0x1)
574#define C_000013_IDCT_FORCEON 0xFFFFFFFE
575#define S_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
576#define G_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
577#define C_000013_IDCT_MAX_DYN_STOP_LAT 0xFFFFFFFD
578#define S_000013_IDCT_CLOCK_STATUS(x) (((x) & 0x1) << 2)
579#define G_000013_IDCT_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
580#define C_000013_IDCT_CLOCK_STATUS 0xFFFFFFFB
581#define S_000013_IDCT_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
582#define G_000013_IDCT_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
583#define C_000013_IDCT_PROG_SHUTOFF 0xFFFFFFF7
584#define S_000013_IDCT_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
585#define G_000013_IDCT_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
586#define C_000013_IDCT_PROG_DELAY_VALUE 0xFFFFF00F
587#define S_000013_IDCT_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
588#define G_000013_IDCT_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
589#define C_000013_IDCT_LOWER_POWER_IDLE 0xFFF00FFF
590#define S_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
591#define G_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
592#define C_000013_IDCT_LOWER_POWER_IGNORE 0xFFEFFFFF
593#define S_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
594#define G_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
595#define C_000013_IDCT_NORMAL_POWER_IGNORE 0xFFDFFFFF
596#define S_000013_SPARE(x) (((x) & 0x3) << 22)
597#define G_000013_SPARE(x) (((x) >> 22) & 0x3)
598#define C_000013_SPARE 0xFF3FFFFF
599#define S_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
600#define G_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
601#define C_000013_IDCT_NORMAL_POWER_BUSY 0x00FFFFFF
602
603#endif
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b574c73a5109..e0b97d161397 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -31,8 +31,8 @@
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_drm.h" 32#include "radeon_drm.h"
33#include "rv770d.h" 33#include "rv770d.h"
34#include "avivod.h"
35#include "atom.h" 34#include "atom.h"
35#include "avivod.h"
36 36
37#define R700_PFP_UCODE_SIZE 848 37#define R700_PFP_UCODE_SIZE 848
38#define R700_PM4_UCODE_SIZE 1360 38#define R700_PM4_UCODE_SIZE 1360
@@ -231,7 +231,7 @@ static void rv770_mc_resume(struct radeon_device *rdev)
231 231
232 /* we need to own VRAM, so turn off the VGA renderer here 232 /* we need to own VRAM, so turn off the VGA renderer here
233 * to stop it overwriting our objects */ 233 * to stop it overwriting our objects */
234 radeon_avivo_vga_render_disable(rdev); 234 rv515_vga_render_disable(rdev);
235} 235}
236 236
237 237
@@ -801,6 +801,13 @@ int rv770_mc_init(struct radeon_device *rdev)
801 /* Setup GPU memory space */ 801 /* Setup GPU memory space */
802 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 802 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
803 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 803 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
804
805 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
806 rdev->mc.mc_vram_size = rdev->mc.aper_size;
807
808 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
809 rdev->mc.real_vram_size = rdev->mc.aper_size;
810
804 if (rdev->flags & RADEON_IS_AGP) { 811 if (rdev->flags & RADEON_IS_AGP) {
805 r = radeon_agp_init(rdev); 812 r = radeon_agp_init(rdev);
806 if (r) 813 if (r)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 33de7637c0c6..1c040d040338 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -228,7 +228,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
228 vma->vm_private_data = NULL; 228 vma->vm_private_data = NULL;
229} 229}
230 230
231static struct vm_operations_struct ttm_bo_vm_ops = { 231static const struct vm_operations_struct ttm_bo_vm_ops = {
232 .fault = ttm_bo_vm_fault, 232 .fault = ttm_bo_vm_fault,
233 .open = ttm_bo_vm_open, 233 .open = ttm_bo_vm_open,
234 .close = ttm_bo_vm_close 234 .close = ttm_bo_vm_close
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index ea955edde87e..2a7a85a6dc36 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
915 return ret; 915 return ret;
916} 916}
917 917
918static struct file_operations watchdog_fops = { 918static const struct file_operations watchdog_fops = {
919 .owner = THIS_MODULE, 919 .owner = THIS_MODULE,
920 .llseek = no_llseek, 920 .llseek = no_llseek,
921 .open = watchdog_open, 921 .open = watchdog_open,
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index 1aba8c13fe8f..8e7e3344c4b3 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -247,7 +247,7 @@ static int dma_region_pagefault(struct vm_area_struct *vma,
247 return 0; 247 return 0;
248} 248}
249 249
250static struct vm_operations_struct dma_region_vm_ops = { 250static const struct vm_operations_struct dma_region_vm_ops = {
251 .fault = dma_region_pagefault, 251 .fault = dma_region_pagefault,
252}; 252};
253 253
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 3cb688d29131..f1565cae8ec6 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -95,7 +95,7 @@ static void ehca_mm_close(struct vm_area_struct *vma)
95 vma->vm_start, vma->vm_end, *count); 95 vma->vm_start, vma->vm_end, *count);
96} 96}
97 97
98static struct vm_operations_struct vm_ops = { 98static const struct vm_operations_struct vm_ops = {
99 .open = ehca_mm_open, 99 .open = ehca_mm_open,
100 .close = ehca_mm_close, 100 .close = ehca_mm_close,
101}; 101};
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 38a287006612..40dbe54056c7 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1151,7 +1151,7 @@ static int ipath_file_vma_fault(struct vm_area_struct *vma,
1151 return 0; 1151 return 0;
1152} 1152}
1153 1153
1154static struct vm_operations_struct ipath_file_vm_ops = { 1154static const struct vm_operations_struct ipath_file_vm_ops = {
1155 .fault = ipath_file_vma_fault, 1155 .fault = ipath_file_vma_fault,
1156}; 1156};
1157 1157
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index fa830e22002f..b28865faf435 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -74,7 +74,7 @@ static void ipath_vma_close(struct vm_area_struct *vma)
74 kref_put(&ip->ref, ipath_release_mmap_info); 74 kref_put(&ip->ref, ipath_release_mmap_info);
75} 75}
76 76
77static struct vm_operations_struct ipath_vm_ops = { 77static const struct vm_operations_struct ipath_vm_ops = {
78 .open = ipath_vma_open, 78 .open = ipath_vma_open,
79 .close = ipath_vma_close, 79 .close = ipath_vma_close,
80}; 80};
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e828aab7dace..16ec33f27c5d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1273,6 +1273,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1273 } \ 1273 } \
1274 } while (0) 1274 } while (0)
1275 1275
1276#ifdef CONFIG_PM
1276static void input_dev_reset(struct input_dev *dev, bool activate) 1277static void input_dev_reset(struct input_dev *dev, bool activate)
1277{ 1278{
1278 if (!dev->event) 1279 if (!dev->event)
@@ -1287,7 +1288,6 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1287 } 1288 }
1288} 1289}
1289 1290
1290#ifdef CONFIG_PM
1291static int input_dev_suspend(struct device *dev) 1291static int input_dev_suspend(struct device *dev)
1292{ 1292{
1293 struct input_dev *input_dev = to_input_dev(dev); 1293 struct input_dev *input_dev = to_input_dev(dev);
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index bde55d7287fa..eadc1cd34a20 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -78,6 +78,7 @@ config MISDN_NETJET
78 depends on PCI 78 depends on PCI
79 select MISDN_IPAC 79 select MISDN_IPAC
80 select ISDN_HDLC 80 select ISDN_HDLC
81 select ISDN_I4L
81 help 82 help
82 Enable support for Traverse Technologies NETJet PCI cards. 83 Enable support for Traverse Technologies NETJet PCI cards.
83 84
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index dd744ffd240b..07c4e49f9e77 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -141,8 +141,7 @@ endmenu
141endif 141endif
142 142
143config ISDN_HDLC 143config ISDN_HDLC
144 tristate 144 tristate
145 depends on HISAX_ST5481
146 select CRC_CCITT 145 select CRC_CCITT
147 select BITREVERSE 146 select BITREVERSE
148 147
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index c36f52137456..feb0fa45b664 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -415,7 +415,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
415} 415}
416 416
417static int data_sock_setsockopt(struct socket *sock, int level, int optname, 417static int data_sock_setsockopt(struct socket *sock, int level, int optname,
418 char __user *optval, int len) 418 char __user *optval, unsigned int len)
419{ 419{
420 struct sock *sk = sock->sk; 420 struct sock *sk = sock->sk;
421 int err = 0, opt = 0; 421 int err = 0, opt = 0;
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b4d3f7ca554f..bd1632388e4a 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -508,7 +508,7 @@ static int close(struct inode *inode, struct file *file)
508 * uses: reading and writing a character device called /dev/lguest. All the 508 * uses: reading and writing a character device called /dev/lguest. All the
509 * work happens in the read(), write() and close() routines: 509 * work happens in the read(), write() and close() routines:
510 */ 510 */
511static struct file_operations lguest_fops = { 511static const struct file_operations lguest_fops = {
512 .owner = THIS_MODULE, 512 .owner = THIS_MODULE,
513 .release = close, 513 .release = close,
514 .write = write, 514 .write = write,
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index ba0edad2d048..54abf9e303b7 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -129,11 +129,13 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
129 * This is the connector callback that delivers data 129 * This is the connector callback that delivers data
130 * that was sent from userspace. 130 * that was sent from userspace.
131 */ 131 */
132static void cn_ulog_callback(void *data) 132static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
133{ 133{
134 struct cn_msg *msg = (struct cn_msg *)data;
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 134 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 135
136 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
137 return;
138
137 spin_lock(&receiving_list_lock); 139 spin_lock(&receiving_list_lock);
138 if (msg->len == 0) 140 if (msg->len == 0)
139 fill_pkg(msg, NULL); 141 fill_pkg(msg, NULL);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 3750ff48cba1..516414983593 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -1203,7 +1203,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
1203 return mask; 1203 return mask;
1204} 1204}
1205 1205
1206static struct file_operations dvb_dvr_fops = { 1206static const struct file_operations dvb_dvr_fops = {
1207 .owner = THIS_MODULE, 1207 .owner = THIS_MODULE,
1208 .read = dvb_dvr_read, 1208 .read = dvb_dvr_read,
1209 .write = dvb_dvr_write, 1209 .write = dvb_dvr_write,
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index eeb80d0ea3ff..853e04b7cb36 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
215 return POLLIN; 215 return POLLIN;
216} 216}
217 217
218static struct file_operations fdtv_ca_fops = { 218static const struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl, 220 .ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open, 221 .open = dvb_generic_open,
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 657c481d255c..10230cb3d210 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1325,7 +1325,7 @@ static void cafe_v4l_vm_close(struct vm_area_struct *vma)
1325 mutex_unlock(&sbuf->cam->s_mutex); 1325 mutex_unlock(&sbuf->cam->s_mutex);
1326} 1326}
1327 1327
1328static struct vm_operations_struct cafe_v4l_vm_ops = { 1328static const struct vm_operations_struct cafe_v4l_vm_ops = {
1329 .open = cafe_v4l_vm_open, 1329 .open = cafe_v4l_vm_open,
1330 .close = cafe_v4l_vm_close 1330 .close = cafe_v4l_vm_close
1331}; 1331};
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 74092f436be6..88987a57cf7b 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1496,7 +1496,7 @@ static void et61x251_vm_close(struct vm_area_struct* vma)
1496} 1496}
1497 1497
1498 1498
1499static struct vm_operations_struct et61x251_vm_ops = { 1499static const struct vm_operations_struct et61x251_vm_ops = {
1500 .open = et61x251_vm_open, 1500 .open = et61x251_vm_open,
1501 .close = et61x251_vm_close, 1501 .close = et61x251_vm_close,
1502}; 1502};
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index cf6540da1e42..23d3fb776918 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -99,7 +99,7 @@ static void gspca_vm_close(struct vm_area_struct *vma)
99 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED; 99 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED;
100} 100}
101 101
102static struct vm_operations_struct gspca_vm_ops = { 102static const struct vm_operations_struct gspca_vm_ops = {
103 .open = gspca_vm_open, 103 .open = gspca_vm_open,
104 .close = gspca_vm_close, 104 .close = gspca_vm_close,
105}; 105};
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index d0765bed79c9..4b1bc05a462c 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1589,7 +1589,7 @@ static void meye_vm_close(struct vm_area_struct *vma)
1589 meye.vma_use_count[idx]--; 1589 meye.vma_use_count[idx]--;
1590} 1590}
1591 1591
1592static struct vm_operations_struct meye_vm_ops = { 1592static const struct vm_operations_struct meye_vm_ops = {
1593 .open = meye_vm_open, 1593 .open = meye_vm_open,
1594 .close = meye_vm_close, 1594 .close = meye_vm_close,
1595}; 1595};
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 9d84c94e8a40..4a7711c3e745 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2077,7 +2077,7 @@ static void sn9c102_vm_close(struct vm_area_struct* vma)
2077} 2077}
2078 2078
2079 2079
2080static struct vm_operations_struct sn9c102_vm_ops = { 2080static const struct vm_operations_struct sn9c102_vm_ops = {
2081 .open = sn9c102_vm_open, 2081 .open = sn9c102_vm_open,
2082 .close = sn9c102_vm_close, 2082 .close = sn9c102_vm_close,
2083}; 2083};
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 0b996ea4134e..6b41865f42bd 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -790,7 +790,7 @@ static void stk_v4l_vm_close(struct vm_area_struct *vma)
790 if (sbuf->mapcount == 0) 790 if (sbuf->mapcount == 0)
791 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED; 791 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
792} 792}
793static struct vm_operations_struct stk_v4l_vm_ops = { 793static const struct vm_operations_struct stk_v4l_vm_ops = {
794 .open = stk_v4l_vm_open, 794 .open = stk_v4l_vm_open,
795 .close = stk_v4l_vm_close 795 .close = stk_v4l_vm_close
796}; 796};
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 9e7351569b5d..a2bdd806efab 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -1069,7 +1069,7 @@ static void uvc_vm_close(struct vm_area_struct *vma)
1069 buffer->vma_use_count--; 1069 buffer->vma_use_count--;
1070} 1070}
1071 1071
1072static struct vm_operations_struct uvc_vm_ops = { 1072static const struct vm_operations_struct uvc_vm_ops = {
1073 .open = uvc_vm_open, 1073 .open = uvc_vm_open,
1074 .close = uvc_vm_close, 1074 .close = uvc_vm_close,
1075}; 1075};
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index d09ce83a9429..635ffc7b0391 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -105,7 +105,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
105 } 105 }
106} 106}
107 107
108static struct vm_operations_struct videobuf_vm_ops = { 108static const struct vm_operations_struct videobuf_vm_ops = {
109 .open = videobuf_vm_open, 109 .open = videobuf_vm_open,
110 .close = videobuf_vm_close, 110 .close = videobuf_vm_close,
111}; 111};
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index a8dd22ace3fb..53cdd67cebe1 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -394,7 +394,7 @@ videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
394 return 0; 394 return 0;
395} 395}
396 396
397static struct vm_operations_struct videobuf_vm_ops = 397static const struct vm_operations_struct videobuf_vm_ops =
398{ 398{
399 .open = videobuf_vm_open, 399 .open = videobuf_vm_open,
400 .close = videobuf_vm_close, 400 .close = videobuf_vm_close,
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 30ae30f99ccc..35f3900c5633 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -116,7 +116,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
116 return; 116 return;
117} 117}
118 118
119static struct vm_operations_struct videobuf_vm_ops = 119static const struct vm_operations_struct videobuf_vm_ops =
120{ 120{
121 .open = videobuf_vm_open, 121 .open = videobuf_vm_open,
122 .close = videobuf_vm_close, 122 .close = videobuf_vm_close,
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index cd6a3446ab7e..b034a81d2b1c 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -3857,7 +3857,7 @@ static void vino_vm_close(struct vm_area_struct *vma)
3857 dprintk("vino_vm_close(): count = %d\n", fb->map_count); 3857 dprintk("vino_vm_close(): count = %d\n", fb->map_count);
3858} 3858}
3859 3859
3860static struct vm_operations_struct vino_vm_ops = { 3860static const struct vm_operations_struct vino_vm_ops = {
3861 .open = vino_vm_open, 3861 .open = vino_vm_open,
3862 .close = vino_vm_close, 3862 .close = vino_vm_close,
3863}; 3863};
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index b3c6436b33ba..312a71336fd0 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -935,7 +935,7 @@ static void zc0301_vm_close(struct vm_area_struct* vma)
935} 935}
936 936
937 937
938static struct vm_operations_struct zc0301_vm_ops = { 938static const struct vm_operations_struct zc0301_vm_ops = {
939 .open = zc0301_vm_open, 939 .open = zc0301_vm_open,
940 .close = zc0301_vm_close, 940 .close = zc0301_vm_close,
941}; 941};
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index bcdefb1bcb3d..47137deafcfd 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -3172,7 +3172,7 @@ zoran_vm_close (struct vm_area_struct *vma)
3172 mutex_unlock(&zr->resource_lock); 3172 mutex_unlock(&zr->resource_lock);
3173} 3173}
3174 3174
3175static struct vm_operations_struct zoran_vm_ops = { 3175static const struct vm_operations_struct zoran_vm_ops = {
3176 .open = zoran_vm_open, 3176 .open = zoran_vm_open,
3177 .close = zoran_vm_close, 3177 .close = zoran_vm_close,
3178}; 3178};
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index fa57b67593ae..90a95ce8dc34 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -271,7 +271,7 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
271 return mask; 271 return mask;
272} 272}
273 273
274static struct file_operations phantom_file_ops = { 274static const struct file_operations phantom_file_ops = {
275 .open = phantom_open, 275 .open = phantom_open,
276 .release = phantom_release, 276 .release = phantom_release,
277 .unlocked_ioctl = phantom_ioctl, 277 .unlocked_ioctl = phantom_ioctl,
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index aed609832bc2..41c8fe2a928c 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -53,7 +53,6 @@ struct gru_stats_s gru_stats;
53/* Guaranteed user available resources on each node */ 53/* Guaranteed user available resources on each node */
54static int max_user_cbrs, max_user_dsr_bytes; 54static int max_user_cbrs, max_user_dsr_bytes;
55 55
56static struct file_operations gru_fops;
57static struct miscdevice gru_miscdev; 56static struct miscdevice gru_miscdev;
58 57
59 58
@@ -426,7 +425,7 @@ static void __exit gru_exit(void)
426 gru_proc_exit(); 425 gru_proc_exit();
427} 426}
428 427
429static struct file_operations gru_fops = { 428static const struct file_operations gru_fops = {
430 .owner = THIS_MODULE, 429 .owner = THIS_MODULE,
431 .unlocked_ioctl = gru_file_unlocked_ioctl, 430 .unlocked_ioctl = gru_file_unlocked_ioctl,
432 .mmap = gru_file_mmap, 431 .mmap = gru_file_mmap,
@@ -438,7 +437,7 @@ static struct miscdevice gru_miscdev = {
438 .fops = &gru_fops, 437 .fops = &gru_fops,
439}; 438};
440 439
441struct vm_operations_struct gru_vm_ops = { 440const struct vm_operations_struct gru_vm_ops = {
442 .close = gru_vma_close, 441 .close = gru_vma_close,
443 .fault = gru_fault, 442 .fault = gru_fault,
444}; 443};
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 34ab3d453919..46990bcfa536 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -624,7 +624,7 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
624 */ 624 */
625struct gru_unload_context_req; 625struct gru_unload_context_req;
626 626
627extern struct vm_operations_struct gru_vm_ops; 627extern const struct vm_operations_struct gru_vm_ops;
628extern struct device *grudev; 628extern struct device *grudev;
629 629
630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, 630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 610dbd1fcc82..96d10f40fb23 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct inode *inode, struct file *file)
240 return 0; 240 return 0;
241} 241}
242 242
243static struct file_operations mmc_dbg_ext_csd_fops = { 243static const struct file_operations mmc_dbg_ext_csd_fops = {
244 .open = mmc_ext_csd_open, 244 .open = mmc_ext_csd_open,
245 .read = mmc_ext_csd_read, 245 .read = mmc_ext_csd_read,
246 .release = mmc_ext_csd_release, 246 .release = mmc_ext_csd_release,
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6636354b48ce..e1035c895808 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -98,6 +98,22 @@ static const unsigned char speed_val[16] =
98static const unsigned int speed_unit[8] = 98static const unsigned int speed_unit[8] =
99 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; 99 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
100 100
101/* FUNCE tuples with these types get passed to SDIO drivers */
102static const unsigned char funce_type_whitelist[] = {
103 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
104};
105
106static int cistpl_funce_whitelisted(unsigned char type)
107{
108 int i;
109
110 for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
111 if (funce_type_whitelist[i] == type)
112 return 1;
113 }
114 return 0;
115}
116
101static int cistpl_funce_common(struct mmc_card *card, 117static int cistpl_funce_common(struct mmc_card *card,
102 const unsigned char *buf, unsigned size) 118 const unsigned char *buf, unsigned size)
103{ 119{
@@ -120,6 +136,10 @@ static int cistpl_funce_func(struct sdio_func *func,
120 unsigned vsn; 136 unsigned vsn;
121 unsigned min_size; 137 unsigned min_size;
122 138
139 /* let SDIO drivers take care of whitelisted FUNCE tuples */
140 if (cistpl_funce_whitelisted(buf[0]))
141 return -EILSEQ;
142
123 vsn = func->card->cccr.sdio_vsn; 143 vsn = func->card->cccr.sdio_vsn;
124 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; 144 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
125 145
@@ -154,13 +174,12 @@ static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
154 else 174 else
155 ret = cistpl_funce_common(card, buf, size); 175 ret = cistpl_funce_common(card, buf, size);
156 176
157 if (ret) { 177 if (ret && ret != -EILSEQ) {
158 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " 178 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
159 "type %u\n", mmc_hostname(card->host), size, buf[0]); 179 "type %u\n", mmc_hostname(card->host), size, buf[0]);
160 return ret;
161 } 180 }
162 181
163 return 0; 182 return ret;
164} 183}
165 184
166typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, 185typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
@@ -253,21 +272,12 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
253 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) 272 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
254 if (cis_tpl_list[i].code == tpl_code) 273 if (cis_tpl_list[i].code == tpl_code)
255 break; 274 break;
256 if (i >= ARRAY_SIZE(cis_tpl_list)) { 275 if (i < ARRAY_SIZE(cis_tpl_list)) {
257 /* this tuple is unknown to the core */
258 this->next = NULL;
259 this->code = tpl_code;
260 this->size = tpl_link;
261 *prev = this;
262 prev = &this->next;
263 printk(KERN_DEBUG
264 "%s: queuing CIS tuple 0x%02x length %u\n",
265 mmc_hostname(card->host), tpl_code, tpl_link);
266 } else {
267 const struct cis_tpl *tpl = cis_tpl_list + i; 276 const struct cis_tpl *tpl = cis_tpl_list + i;
268 if (tpl_link < tpl->min_size) { 277 if (tpl_link < tpl->min_size) {
269 printk(KERN_ERR 278 printk(KERN_ERR
270 "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n", 279 "%s: bad CIS tuple 0x%02x"
280 " (length = %u, expected >= %u)\n",
271 mmc_hostname(card->host), 281 mmc_hostname(card->host),
272 tpl_code, tpl_link, tpl->min_size); 282 tpl_code, tpl_link, tpl->min_size);
273 ret = -EINVAL; 283 ret = -EINVAL;
@@ -275,7 +285,30 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
275 ret = tpl->parse(card, func, 285 ret = tpl->parse(card, func,
276 this->data, tpl_link); 286 this->data, tpl_link);
277 } 287 }
278 kfree(this); 288 /*
289 * We don't need the tuple anymore if it was
290 * successfully parsed by the SDIO core or if it is
291 * not going to be parsed by SDIO drivers.
292 */
293 if (!ret || ret != -EILSEQ)
294 kfree(this);
295 } else {
296 /* unknown tuple */
297 ret = -EILSEQ;
298 }
299
300 if (ret == -EILSEQ) {
301 /* this tuple is unknown to the core or whitelisted */
302 this->next = NULL;
303 this->code = tpl_code;
304 this->size = tpl_link;
305 *prev = this;
306 prev = &this->next;
307 printk(KERN_DEBUG
308 "%s: queuing CIS tuple 0x%02x length %u\n",
309 mmc_hostname(card->host), tpl_code, tpl_link);
310 /* keep on analyzing tuples */
311 ret = 0;
279 } 312 }
280 313
281 ptr += tpl_link; 314 ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7cb057f3f883..432ae8358c86 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -276,6 +276,47 @@ config MMC_S3C
276 276
277 If unsure, say N. 277 If unsure, say N.
278 278
279config MMC_S3C_HW_SDIO_IRQ
280 bool "Hardware support for SDIO IRQ"
281 depends on MMC_S3C
282 help
283 Enable the hardware support for SDIO interrupts instead of using
284 the generic polling code.
285
286choice
287 prompt "Samsung S3C SD/MMC transfer code"
288 depends on MMC_S3C
289
290config MMC_S3C_PIO
291 bool "Use PIO transfers only"
292 help
293 Use PIO to transfer data between memory and the hardware.
294
295 PIO is slower than DMA as it requires CPU instructions to
296 move the data. This has been the traditional default for
297 the S3C MCI driver.
298
299config MMC_S3C_DMA
300 bool "Use DMA transfers only (EXPERIMENTAL)"
301 depends on EXPERIMENTAL
302 help
303 Use DMA to transfer data between memory and the hardare.
304
305 Currently, the DMA support in this driver seems to not be
306 working properly and needs to be debugged before this
307 option is useful.
308
309config MMC_S3C_PIODMA
310 bool "Support for both PIO and DMA (EXPERIMENTAL)"
311 help
312 Compile both the PIO and DMA transfer routines into the
313 driver and let the platform select at run-time which one
314 is best.
315
316 See notes for the DMA option.
317
318endchoice
319
279config MMC_SDRICOH_CS 320config MMC_SDRICOH_CS
280 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 321 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
281 depends on EXPERIMENTAL && PCI && PCMCIA 322 depends on EXPERIMENTAL && PCI && PCMCIA
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 8c08cd7efa7f..99b74a351020 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
20#include <linux/gpio.h> 22#include <linux/gpio.h>
21#include <linux/irq.h> 23#include <linux/irq.h>
22#include <linux/io.h> 24#include <linux/io.h>
@@ -58,8 +60,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug;
58 dev_dbg(&host->pdev->dev, args); \ 60 dev_dbg(&host->pdev->dev, args); \
59 } while (0) 61 } while (0)
60 62
61#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
62
63static struct s3c2410_dma_client s3cmci_dma_client = { 63static struct s3c2410_dma_client s3cmci_dma_client = {
64 .name = "s3c-mci", 64 .name = "s3c-mci",
65}; 65};
@@ -164,6 +164,40 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
164 164
165#endif /* CONFIG_MMC_DEBUG */ 165#endif /* CONFIG_MMC_DEBUG */
166 166
167/**
168 * s3cmci_host_usedma - return whether the host is using dma or pio
169 * @host: The host state
170 *
171 * Return true if the host is using DMA to transfer data, else false
172 * to use PIO mode. Will return static data depending on the driver
173 * configuration.
174 */
175static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
176{
177#ifdef CONFIG_MMC_S3C_PIO
178 return false;
179#elif defined(CONFIG_MMC_S3C_DMA)
180 return true;
181#else
182 return host->dodma;
183#endif
184}
185
186/**
187 * s3cmci_host_canpio - return true if host has pio code available
188 *
189 * Return true if the driver has been compiled with the PIO support code
190 * available.
191 */
192static inline bool s3cmci_host_canpio(void)
193{
194#ifdef CONFIG_MMC_S3C_PIO
195 return true;
196#else
197 return false;
198#endif
199}
200
167static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) 201static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
168{ 202{
169 u32 newmask; 203 u32 newmask;
@@ -190,7 +224,33 @@ static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
190 224
191static inline void clear_imask(struct s3cmci_host *host) 225static inline void clear_imask(struct s3cmci_host *host)
192{ 226{
193 writel(0, host->base + host->sdiimsk); 227 u32 mask = readl(host->base + host->sdiimsk);
228
229 /* preserve the SDIO IRQ mask state */
230 mask &= S3C2410_SDIIMSK_SDIOIRQ;
231 writel(mask, host->base + host->sdiimsk);
232}
233
234/**
235 * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
236 * @host: The host to check.
237 *
238 * Test to see if the SDIO interrupt is being signalled in case the
239 * controller has failed to re-detect a card interrupt. Read GPE8 and
240 * see if it is low and if so, signal a SDIO interrupt.
241 *
242 * This is currently called if a request is finished (we assume that the
243 * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
244 * already being indicated.
245*/
246static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
247{
248 if (host->sdio_irqen) {
249 if (gpio_get_value(S3C2410_GPE(8)) == 0) {
250 printk(KERN_DEBUG "%s: signalling irq\n", __func__);
251 mmc_signal_sdio_irq(host->mmc);
252 }
253 }
194} 254}
195 255
196static inline int get_data_buffer(struct s3cmci_host *host, 256static inline int get_data_buffer(struct s3cmci_host *host,
@@ -238,6 +298,64 @@ static inline u32 fifo_free(struct s3cmci_host *host)
238 return 63 - fifostat; 298 return 63 - fifostat;
239} 299}
240 300
301/**
302 * s3cmci_enable_irq - enable IRQ, after having disabled it.
303 * @host: The device state.
304 * @more: True if more IRQs are expected from transfer.
305 *
306 * Enable the main IRQ if needed after it has been disabled.
307 *
308 * The IRQ can be one of the following states:
309 * - disabled during IDLE
310 * - disabled whilst processing data
311 * - enabled during transfer
312 * - enabled whilst awaiting SDIO interrupt detection
313 */
314static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
315{
316 unsigned long flags;
317 bool enable = false;
318
319 local_irq_save(flags);
320
321 host->irq_enabled = more;
322 host->irq_disabled = false;
323
324 enable = more | host->sdio_irqen;
325
326 if (host->irq_state != enable) {
327 host->irq_state = enable;
328
329 if (enable)
330 enable_irq(host->irq);
331 else
332 disable_irq(host->irq);
333 }
334
335 local_irq_restore(flags);
336}
337
338/**
339 *
340 */
341static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
342{
343 unsigned long flags;
344
345 local_irq_save(flags);
346
347 //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer);
348
349 host->irq_disabled = transfer;
350
351 if (transfer && host->irq_state) {
352 host->irq_state = false;
353 disable_irq(host->irq);
354 }
355
356 local_irq_restore(flags);
357}
358
241static void do_pio_read(struct s3cmci_host *host) 359static void do_pio_read(struct s3cmci_host *host)
242{ 360{
243 int res; 361 int res;
@@ -374,8 +492,7 @@ static void pio_tasklet(unsigned long data)
374{ 492{
375 struct s3cmci_host *host = (struct s3cmci_host *) data; 493 struct s3cmci_host *host = (struct s3cmci_host *) data;
376 494
377 495 s3cmci_disable_irq(host, true);
378 disable_irq(host->irq);
379 496
380 if (host->pio_active == XFER_WRITE) 497 if (host->pio_active == XFER_WRITE)
381 do_pio_write(host); 498 do_pio_write(host);
@@ -395,9 +512,10 @@ static void pio_tasklet(unsigned long data)
395 host->mrq->data->error = -EINVAL; 512 host->mrq->data->error = -EINVAL;
396 } 513 }
397 514
515 s3cmci_enable_irq(host, false);
398 finalize_request(host); 516 finalize_request(host);
399 } else 517 } else
400 enable_irq(host->irq); 518 s3cmci_enable_irq(host, true);
401} 519}
402 520
403/* 521/*
@@ -432,17 +550,27 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
432 struct s3cmci_host *host = dev_id; 550 struct s3cmci_host *host = dev_id;
433 struct mmc_command *cmd; 551 struct mmc_command *cmd;
434 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; 552 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
435 u32 mci_cclear, mci_dclear; 553 u32 mci_cclear = 0, mci_dclear;
436 unsigned long iflags; 554 unsigned long iflags;
437 555
556 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
557 mci_imsk = readl(host->base + host->sdiimsk);
558
559 if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
560 if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
561 mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
562 writel(mci_dclear, host->base + S3C2410_SDIDSTA);
563
564 mmc_signal_sdio_irq(host->mmc);
565 return IRQ_HANDLED;
566 }
567 }
568
438 spin_lock_irqsave(&host->complete_lock, iflags); 569 spin_lock_irqsave(&host->complete_lock, iflags);
439 570
440 mci_csta = readl(host->base + S3C2410_SDICMDSTAT); 571 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
441 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
442 mci_dcnt = readl(host->base + S3C2410_SDIDCNT); 572 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
443 mci_fsta = readl(host->base + S3C2410_SDIFSTA); 573 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
444 mci_imsk = readl(host->base + host->sdiimsk);
445 mci_cclear = 0;
446 mci_dclear = 0; 574 mci_dclear = 0;
447 575
448 if ((host->complete_what == COMPLETION_NONE) || 576 if ((host->complete_what == COMPLETION_NONE) ||
@@ -466,7 +594,7 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
466 goto irq_out; 594 goto irq_out;
467 } 595 }
468 596
469 if (!host->dodma) { 597 if (!s3cmci_host_usedma(host)) {
470 if ((host->pio_active == XFER_WRITE) && 598 if ((host->pio_active == XFER_WRITE) &&
471 (mci_fsta & S3C2410_SDIFSTA_TFDET)) { 599 (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
472 600
@@ -673,6 +801,7 @@ static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
673 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", 801 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
674 size, mci_dsta, mci_dcnt); 802 size, mci_dsta, mci_dcnt);
675 803
804 host->dma_complete = 1;
676 host->complete_what = COMPLETION_FINALIZE; 805 host->complete_what = COMPLETION_FINALIZE;
677 806
678out: 807out:
@@ -683,9 +812,9 @@ out:
683fail_request: 812fail_request:
684 host->mrq->data->error = -EINVAL; 813 host->mrq->data->error = -EINVAL;
685 host->complete_what = COMPLETION_FINALIZE; 814 host->complete_what = COMPLETION_FINALIZE;
686 writel(0, host->base + host->sdiimsk); 815 clear_imask(host);
687 goto out;
688 816
817 goto out;
689} 818}
690 819
691static void finalize_request(struct s3cmci_host *host) 820static void finalize_request(struct s3cmci_host *host)
@@ -702,8 +831,9 @@ static void finalize_request(struct s3cmci_host *host)
702 831
703 if (cmd->data && (cmd->error == 0) && 832 if (cmd->data && (cmd->error == 0) &&
704 (cmd->data->error == 0)) { 833 (cmd->data->error == 0)) {
705 if (host->dodma && (!host->dma_complete)) { 834 if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
706 dbg(host, dbg_dma, "DMA Missing!\n"); 835 dbg(host, dbg_dma, "DMA Missing (%d)!\n",
836 host->dma_complete);
707 return; 837 return;
708 } 838 }
709 } 839 }
@@ -728,7 +858,7 @@ static void finalize_request(struct s3cmci_host *host)
728 writel(0, host->base + S3C2410_SDICMDARG); 858 writel(0, host->base + S3C2410_SDICMDARG);
729 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); 859 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
730 writel(0, host->base + S3C2410_SDICMDCON); 860 writel(0, host->base + S3C2410_SDICMDCON);
731 writel(0, host->base + host->sdiimsk); 861 clear_imask(host);
732 862
733 if (cmd->data && cmd->error) 863 if (cmd->data && cmd->error)
734 cmd->data->error = cmd->error; 864 cmd->data->error = cmd->error;
@@ -754,7 +884,7 @@ static void finalize_request(struct s3cmci_host *host)
754 /* If we had an error while transfering data we flush the 884 /* If we had an error while transfering data we flush the
755 * DMA channel and the fifo to clear out any garbage. */ 885 * DMA channel and the fifo to clear out any garbage. */
756 if (mrq->data->error != 0) { 886 if (mrq->data->error != 0) {
757 if (host->dodma) 887 if (s3cmci_host_usedma(host))
758 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 888 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
759 889
760 if (host->is2440) { 890 if (host->is2440) {
@@ -776,6 +906,8 @@ static void finalize_request(struct s3cmci_host *host)
776request_done: 906request_done:
777 host->complete_what = COMPLETION_NONE; 907 host->complete_what = COMPLETION_NONE;
778 host->mrq = NULL; 908 host->mrq = NULL;
909
910 s3cmci_check_sdio_irq(host);
779 mmc_request_done(host->mmc, mrq); 911 mmc_request_done(host->mmc, mrq);
780} 912}
781 913
@@ -872,7 +1004,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
872 1004
873 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; 1005 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
874 1006
875 if (host->dodma) 1007 if (s3cmci_host_usedma(host))
876 dcon |= S3C2410_SDIDCON_DMAEN; 1008 dcon |= S3C2410_SDIDCON_DMAEN;
877 1009
878 if (host->bus_width == MMC_BUS_WIDTH_4) 1010 if (host->bus_width == MMC_BUS_WIDTH_4)
@@ -950,7 +1082,7 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
950static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) 1082static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
951{ 1083{
952 int dma_len, i; 1084 int dma_len, i;
953 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 1085 int rw = data->flags & MMC_DATA_WRITE;
954 1086
955 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1087 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
956 1088
@@ -958,7 +1090,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
958 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1090 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
959 1091
960 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1092 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
961 (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1093 rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
962 1094
963 if (dma_len == 0) 1095 if (dma_len == 0)
964 return -ENOMEM; 1096 return -ENOMEM;
@@ -969,11 +1101,11 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
969 for (i = 0; i < dma_len; i++) { 1101 for (i = 0; i < dma_len; i++) {
970 int res; 1102 int res;
971 1103
972 dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, 1104 dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i,
973 sg_dma_address(&data->sg[i]), 1105 sg_dma_address(&data->sg[i]),
974 sg_dma_len(&data->sg[i])); 1106 sg_dma_len(&data->sg[i]));
975 1107
976 res = s3c2410_dma_enqueue(host->dma, (void *) host, 1108 res = s3c2410_dma_enqueue(host->dma, host,
977 sg_dma_address(&data->sg[i]), 1109 sg_dma_address(&data->sg[i]),
978 sg_dma_len(&data->sg[i])); 1110 sg_dma_len(&data->sg[i]));
979 1111
@@ -1018,7 +1150,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1018 return; 1150 return;
1019 } 1151 }
1020 1152
1021 if (host->dodma) 1153 if (s3cmci_host_usedma(host))
1022 res = s3cmci_prepare_dma(host, cmd->data); 1154 res = s3cmci_prepare_dma(host, cmd->data);
1023 else 1155 else
1024 res = s3cmci_prepare_pio(host, cmd->data); 1156 res = s3cmci_prepare_pio(host, cmd->data);
@@ -1037,7 +1169,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1037 s3cmci_send_command(host, cmd); 1169 s3cmci_send_command(host, cmd);
1038 1170
1039 /* Enable Interrupt */ 1171 /* Enable Interrupt */
1040 enable_irq(host->irq); 1172 s3cmci_enable_irq(host, true);
1041} 1173}
1042 1174
1043static int s3cmci_card_present(struct mmc_host *mmc) 1175static int s3cmci_card_present(struct mmc_host *mmc)
@@ -1049,7 +1181,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
1049 if (pdata->gpio_detect == 0) 1181 if (pdata->gpio_detect == 0)
1050 return -ENOSYS; 1182 return -ENOSYS;
1051 1183
1052 ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; 1184 ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
1053 return ret ^ pdata->detect_invert; 1185 return ret ^ pdata->detect_invert;
1054} 1186}
1055 1187
@@ -1104,12 +1236,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1104 switch (ios->power_mode) { 1236 switch (ios->power_mode) {
1105 case MMC_POWER_ON: 1237 case MMC_POWER_ON:
1106 case MMC_POWER_UP: 1238 case MMC_POWER_UP:
1107 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); 1239 s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
1108 s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); 1240 s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
1109 s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); 1241 s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
1110 s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); 1242 s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
1111 s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); 1243 s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
1112 s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); 1244 s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
1113 1245
1114 if (host->pdata->set_power) 1246 if (host->pdata->set_power)
1115 host->pdata->set_power(ios->power_mode, ios->vdd); 1247 host->pdata->set_power(ios->power_mode, ios->vdd);
@@ -1121,8 +1253,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1121 1253
1122 case MMC_POWER_OFF: 1254 case MMC_POWER_OFF:
1123 default: 1255 default:
1124 s3c2410_gpio_setpin(S3C2410_GPE5, 0); 1256 gpio_direction_output(S3C2410_GPE(5), 0);
1125 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
1126 1257
1127 if (host->is2440) 1258 if (host->is2440)
1128 mci_con |= S3C2440_SDICON_SDRESET; 1259 mci_con |= S3C2440_SDICON_SDRESET;
@@ -1168,7 +1299,7 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1168 struct s3c24xx_mci_pdata *pdata = host->pdata; 1299 struct s3c24xx_mci_pdata *pdata = host->pdata;
1169 int ret; 1300 int ret;
1170 1301
1171 if (pdata->gpio_wprotect == 0) 1302 if (pdata->no_wprotect)
1172 return 0; 1303 return 0;
1173 1304
1174 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1305 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
@@ -1179,11 +1310,52 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1179 return ret; 1310 return ret;
1180} 1311}
1181 1312
1313static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1314{
1315 struct s3cmci_host *host = mmc_priv(mmc);
1316 unsigned long flags;
1317 u32 con;
1318
1319 local_irq_save(flags);
1320
1321 con = readl(host->base + S3C2410_SDICON);
1322 host->sdio_irqen = enable;
1323
1324 if (enable == host->sdio_irqen)
1325 goto same_state;
1326
1327 if (enable) {
1328 con |= S3C2410_SDICON_SDIOIRQ;
1329 enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1330
1331 if (!host->irq_state && !host->irq_disabled) {
1332 host->irq_state = true;
1333 enable_irq(host->irq);
1334 }
1335 } else {
1336 disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1337 con &= ~S3C2410_SDICON_SDIOIRQ;
1338
1339 if (!host->irq_enabled && host->irq_state) {
1340 disable_irq_nosync(host->irq);
1341 host->irq_state = false;
1342 }
1343 }
1344
1345 writel(con, host->base + S3C2410_SDICON);
1346
1347 same_state:
1348 local_irq_restore(flags);
1349
1350 s3cmci_check_sdio_irq(host);
1351}
1352
1182static struct mmc_host_ops s3cmci_ops = { 1353static struct mmc_host_ops s3cmci_ops = {
1183 .request = s3cmci_request, 1354 .request = s3cmci_request,
1184 .set_ios = s3cmci_set_ios, 1355 .set_ios = s3cmci_set_ios,
1185 .get_ro = s3cmci_get_ro, 1356 .get_ro = s3cmci_get_ro,
1186 .get_cd = s3cmci_card_present, 1357 .get_cd = s3cmci_card_present,
1358 .enable_sdio_irq = s3cmci_enable_sdio_irq,
1187}; 1359};
1188 1360
1189static struct s3c24xx_mci_pdata s3cmci_def_pdata = { 1361static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1246,11 +1418,140 @@ static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
1246} 1418}
1247#endif 1419#endif
1248 1420
1249static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) 1421
1422#ifdef CONFIG_DEBUG_FS
1423
1424static int s3cmci_state_show(struct seq_file *seq, void *v)
1425{
1426 struct s3cmci_host *host = seq->private;
1427
1428 seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
1429 seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
1430 seq_printf(seq, "Prescale = %d\n", host->prescaler);
1431 seq_printf(seq, "is2440 = %d\n", host->is2440);
1432 seq_printf(seq, "IRQ = %d\n", host->irq);
1433 seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
1434 seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
1435 seq_printf(seq, "IRQ state = %d\n", host->irq_state);
1436 seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
1437 seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
1438 seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
1439 seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
1440
1441 return 0;
1442}
1443
1444static int s3cmci_state_open(struct inode *inode, struct file *file)
1445{
1446 return single_open(file, s3cmci_state_show, inode->i_private);
1447}
1448
1449static const struct file_operations s3cmci_fops_state = {
1450 .owner = THIS_MODULE,
1451 .open = s3cmci_state_open,
1452 .read = seq_read,
1453 .llseek = seq_lseek,
1454 .release = single_release,
1455};
1456
1457#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
1458
1459struct s3cmci_reg {
1460 unsigned short addr;
1461 unsigned char *name;
1462} debug_regs[] = {
1463 DBG_REG(CON),
1464 DBG_REG(PRE),
1465 DBG_REG(CMDARG),
1466 DBG_REG(CMDCON),
1467 DBG_REG(CMDSTAT),
1468 DBG_REG(RSP0),
1469 DBG_REG(RSP1),
1470 DBG_REG(RSP2),
1471 DBG_REG(RSP3),
1472 DBG_REG(TIMER),
1473 DBG_REG(BSIZE),
1474 DBG_REG(DCON),
1475 DBG_REG(DCNT),
1476 DBG_REG(DSTA),
1477 DBG_REG(FSTA),
1478 {}
1479};
1480
1481static int s3cmci_regs_show(struct seq_file *seq, void *v)
1482{
1483 struct s3cmci_host *host = seq->private;
1484 struct s3cmci_reg *rptr = debug_regs;
1485
1486 for (; rptr->name; rptr++)
1487 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
1488 readl(host->base + rptr->addr));
1489
1490 seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
1491
1492 return 0;
1493}
1494
1495static int s3cmci_regs_open(struct inode *inode, struct file *file)
1496{
1497 return single_open(file, s3cmci_regs_show, inode->i_private);
1498}
1499
1500static const struct file_operations s3cmci_fops_regs = {
1501 .owner = THIS_MODULE,
1502 .open = s3cmci_regs_open,
1503 .read = seq_read,
1504 .llseek = seq_lseek,
1505 .release = single_release,
1506};
1507
1508static void s3cmci_debugfs_attach(struct s3cmci_host *host)
1509{
1510 struct device *dev = &host->pdev->dev;
1511
1512 host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
1513 if (IS_ERR(host->debug_root)) {
1514 dev_err(dev, "failed to create debugfs root\n");
1515 return;
1516 }
1517
1518 host->debug_state = debugfs_create_file("state", 0444,
1519 host->debug_root, host,
1520 &s3cmci_fops_state);
1521
1522 if (IS_ERR(host->debug_state))
1523 dev_err(dev, "failed to create debug state file\n");
1524
1525 host->debug_regs = debugfs_create_file("regs", 0444,
1526 host->debug_root, host,
1527 &s3cmci_fops_regs);
1528
1529 if (IS_ERR(host->debug_regs))
1530 dev_err(dev, "failed to create debug regs file\n");
1531}
1532
1533static void s3cmci_debugfs_remove(struct s3cmci_host *host)
1534{
1535 debugfs_remove(host->debug_regs);
1536 debugfs_remove(host->debug_state);
1537 debugfs_remove(host->debug_root);
1538}
1539
1540#else
1541static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
1542static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
1543
1544#endif /* CONFIG_DEBUG_FS */
1545
1546static int __devinit s3cmci_probe(struct platform_device *pdev)
1250{ 1547{
1251 struct s3cmci_host *host; 1548 struct s3cmci_host *host;
1252 struct mmc_host *mmc; 1549 struct mmc_host *mmc;
1253 int ret; 1550 int ret;
1551 int is2440;
1552 int i;
1553
1554 is2440 = platform_get_device_id(pdev)->driver_data;
1254 1555
1255 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); 1556 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1256 if (!mmc) { 1557 if (!mmc) {
@@ -1258,6 +1559,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1258 goto probe_out; 1559 goto probe_out;
1259 } 1560 }
1260 1561
1562 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
1563 ret = gpio_request(i, dev_name(&pdev->dev));
1564 if (ret) {
1565 dev_err(&pdev->dev, "failed to get gpio %d\n", i);
1566
1567 for (i--; i >= S3C2410_GPE(5); i--)
1568 gpio_free(i);
1569
1570 goto probe_free_host;
1571 }
1572 }
1573
1261 host = mmc_priv(mmc); 1574 host = mmc_priv(mmc);
1262 host->mmc = mmc; 1575 host->mmc = mmc;
1263 host->pdev = pdev; 1576 host->pdev = pdev;
@@ -1282,11 +1595,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1282 host->clk_div = 2; 1595 host->clk_div = 2;
1283 } 1596 }
1284 1597
1285 host->dodma = 0;
1286 host->complete_what = COMPLETION_NONE; 1598 host->complete_what = COMPLETION_NONE;
1287 host->pio_active = XFER_NONE; 1599 host->pio_active = XFER_NONE;
1288 1600
1289 host->dma = S3CMCI_DMA; 1601#ifdef CONFIG_MMC_S3C_PIODMA
1602 host->dodma = host->pdata->dma;
1603#endif
1290 1604
1291 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1605 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 if (!host->mem) { 1606 if (!host->mem) {
@@ -1294,19 +1608,19 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1294 "failed to get io memory region resouce.\n"); 1608 "failed to get io memory region resouce.\n");
1295 1609
1296 ret = -ENOENT; 1610 ret = -ENOENT;
1297 goto probe_free_host; 1611 goto probe_free_gpio;
1298 } 1612 }
1299 1613
1300 host->mem = request_mem_region(host->mem->start, 1614 host->mem = request_mem_region(host->mem->start,
1301 RESSIZE(host->mem), pdev->name); 1615 resource_size(host->mem), pdev->name);
1302 1616
1303 if (!host->mem) { 1617 if (!host->mem) {
1304 dev_err(&pdev->dev, "failed to request io memory region.\n"); 1618 dev_err(&pdev->dev, "failed to request io memory region.\n");
1305 ret = -ENOENT; 1619 ret = -ENOENT;
1306 goto probe_free_host; 1620 goto probe_free_gpio;
1307 } 1621 }
1308 1622
1309 host->base = ioremap(host->mem->start, RESSIZE(host->mem)); 1623 host->base = ioremap(host->mem->start, resource_size(host->mem));
1310 if (!host->base) { 1624 if (!host->base) {
1311 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); 1625 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1312 ret = -EINVAL; 1626 ret = -EINVAL;
@@ -1331,31 +1645,60 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 * ensure we don't lock the system with un-serviceable requests. */ 1645 * ensure we don't lock the system with un-serviceable requests. */
1332 1646
1333 disable_irq(host->irq); 1647 disable_irq(host->irq);
1648 host->irq_state = false;
1334 1649
1335 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1650 if (!host->pdata->no_detect) {
1336 1651 ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
1337 if (host->irq_cd >= 0) { 1652 if (ret) {
1338 if (request_irq(host->irq_cd, s3cmci_irq_cd, 1653 dev_err(&pdev->dev, "failed to get detect gpio\n");
1339 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1340 DRIVER_NAME, host)) {
1341 dev_err(&pdev->dev, "can't get card detect irq.\n");
1342 ret = -ENOENT;
1343 goto probe_free_irq; 1654 goto probe_free_irq;
1344 } 1655 }
1345 } else { 1656
1346 dev_warn(&pdev->dev, "host detect has no irq available\n"); 1657 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
1347 s3c2410_gpio_cfgpin(host->pdata->gpio_detect, 1658
1348 S3C2410_GPIO_INPUT); 1659 if (host->irq_cd >= 0) {
1660 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1661 IRQF_TRIGGER_RISING |
1662 IRQF_TRIGGER_FALLING,
1663 DRIVER_NAME, host)) {
1664 dev_err(&pdev->dev,
1665 "can't get card detect irq.\n");
1666 ret = -ENOENT;
1667 goto probe_free_gpio_cd;
1668 }
1669 } else {
1670 dev_warn(&pdev->dev,
1671 "host detect has no irq available\n");
1672 gpio_direction_input(host->pdata->gpio_detect);
1673 }
1674 } else
1675 host->irq_cd = -1;
1676
1677 if (!host->pdata->no_wprotect) {
1678 ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
1679 if (ret) {
1680 dev_err(&pdev->dev, "failed to get writeprotect\n");
1681 goto probe_free_irq_cd;
1682 }
1683
1684 gpio_direction_input(host->pdata->gpio_wprotect);
1349 } 1685 }
1350 1686
1351 if (host->pdata->gpio_wprotect) 1687 /* depending on the dma state, get a dma channel to use. */
1352 s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
1353 S3C2410_GPIO_INPUT);
1354 1688
1355 if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { 1689 if (s3cmci_host_usedma(host)) {
1356 dev_err(&pdev->dev, "unable to get DMA channel.\n"); 1690 host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client,
1357 ret = -EBUSY; 1691 host);
1358 goto probe_free_irq_cd; 1692 if (host->dma < 0) {
1693 dev_err(&pdev->dev, "cannot get DMA channel.\n");
1694 if (!s3cmci_host_canpio()) {
1695 ret = -EBUSY;
1696 goto probe_free_gpio_wp;
1697 } else {
1698 dev_warn(&pdev->dev, "falling back to PIO.\n");
1699 host->dodma = 0;
1700 }
1701 }
1359 } 1702 }
1360 1703
1361 host->clk = clk_get(&pdev->dev, "sdi"); 1704 host->clk = clk_get(&pdev->dev, "sdi");
@@ -1363,7 +1706,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1363 dev_err(&pdev->dev, "failed to find clock source.\n"); 1706 dev_err(&pdev->dev, "failed to find clock source.\n");
1364 ret = PTR_ERR(host->clk); 1707 ret = PTR_ERR(host->clk);
1365 host->clk = NULL; 1708 host->clk = NULL;
1366 goto probe_free_host; 1709 goto probe_free_dma;
1367 } 1710 }
1368 1711
1369 ret = clk_enable(host->clk); 1712 ret = clk_enable(host->clk);
@@ -1376,7 +1719,11 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1376 1719
1377 mmc->ops = &s3cmci_ops; 1720 mmc->ops = &s3cmci_ops;
1378 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1721 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1722#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
1723 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1724#else
1379 mmc->caps = MMC_CAP_4_BIT_DATA; 1725 mmc->caps = MMC_CAP_4_BIT_DATA;
1726#endif
1380 mmc->f_min = host->clk_rate / (host->clk_div * 256); 1727 mmc->f_min = host->clk_rate / (host->clk_div * 256);
1381 mmc->f_max = host->clk_rate / host->clk_div; 1728 mmc->f_max = host->clk_rate / host->clk_div;
1382 1729
@@ -1408,8 +1755,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1408 goto free_cpufreq; 1755 goto free_cpufreq;
1409 } 1756 }
1410 1757
1758 s3cmci_debugfs_attach(host);
1759
1411 platform_set_drvdata(pdev, mmc); 1760 platform_set_drvdata(pdev, mmc);
1412 dev_info(&pdev->dev, "initialisation done.\n"); 1761 dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
1762 s3cmci_host_usedma(host) ? "dma" : "pio",
1763 mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
1413 1764
1414 return 0; 1765 return 0;
1415 1766
@@ -1422,6 +1773,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1422 clk_free: 1773 clk_free:
1423 clk_put(host->clk); 1774 clk_put(host->clk);
1424 1775
1776 probe_free_dma:
1777 if (s3cmci_host_usedma(host))
1778 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1779
1780 probe_free_gpio_wp:
1781 if (!host->pdata->no_wprotect)
1782 gpio_free(host->pdata->gpio_wprotect);
1783
1784 probe_free_gpio_cd:
1785 if (!host->pdata->no_detect)
1786 gpio_free(host->pdata->gpio_detect);
1787
1425 probe_free_irq_cd: 1788 probe_free_irq_cd:
1426 if (host->irq_cd >= 0) 1789 if (host->irq_cd >= 0)
1427 free_irq(host->irq_cd, host); 1790 free_irq(host->irq_cd, host);
@@ -1433,10 +1796,15 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1433 iounmap(host->base); 1796 iounmap(host->base);
1434 1797
1435 probe_free_mem_region: 1798 probe_free_mem_region:
1436 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1799 release_mem_region(host->mem->start, resource_size(host->mem));
1800
1801 probe_free_gpio:
1802 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1803 gpio_free(i);
1437 1804
1438 probe_free_host: 1805 probe_free_host:
1439 mmc_free_host(mmc); 1806 mmc_free_host(mmc);
1807
1440 probe_out: 1808 probe_out:
1441 return ret; 1809 return ret;
1442} 1810}
@@ -1449,6 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)
1449 if (host->irq_cd >= 0) 1817 if (host->irq_cd >= 0)
1450 free_irq(host->irq_cd, host); 1818 free_irq(host->irq_cd, host);
1451 1819
1820 s3cmci_debugfs_remove(host);
1452 s3cmci_cpufreq_deregister(host); 1821 s3cmci_cpufreq_deregister(host);
1453 mmc_remove_host(mmc); 1822 mmc_remove_host(mmc);
1454 clk_disable(host->clk); 1823 clk_disable(host->clk);
@@ -1458,104 +1827,102 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1458{ 1827{
1459 struct mmc_host *mmc = platform_get_drvdata(pdev); 1828 struct mmc_host *mmc = platform_get_drvdata(pdev);
1460 struct s3cmci_host *host = mmc_priv(mmc); 1829 struct s3cmci_host *host = mmc_priv(mmc);
1830 struct s3c24xx_mci_pdata *pd = host->pdata;
1831 int i;
1461 1832
1462 s3cmci_shutdown(pdev); 1833 s3cmci_shutdown(pdev);
1463 1834
1464 clk_put(host->clk); 1835 clk_put(host->clk);
1465 1836
1466 tasklet_disable(&host->pio_tasklet); 1837 tasklet_disable(&host->pio_tasklet);
1467 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1838
1839 if (s3cmci_host_usedma(host))
1840 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1468 1841
1469 free_irq(host->irq, host); 1842 free_irq(host->irq, host);
1470 1843
1844 if (!pd->no_wprotect)
1845 gpio_free(pd->gpio_wprotect);
1846
1847 if (!pd->no_detect)
1848 gpio_free(pd->gpio_detect);
1849
1850 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1851 gpio_free(i);
1852
1853
1471 iounmap(host->base); 1854 iounmap(host->base);
1472 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1855 release_mem_region(host->mem->start, resource_size(host->mem));
1473 1856
1474 mmc_free_host(mmc); 1857 mmc_free_host(mmc);
1475 return 0; 1858 return 0;
1476} 1859}
1477 1860
1478static int __devinit s3cmci_2410_probe(struct platform_device *dev) 1861static struct platform_device_id s3cmci_driver_ids[] = {
1479{ 1862 {
1480 return s3cmci_probe(dev, 0); 1863 .name = "s3c2410-sdi",
1481} 1864 .driver_data = 0,
1865 }, {
1866 .name = "s3c2412-sdi",
1867 .driver_data = 1,
1868 }, {
1869 .name = "s3c2440-sdi",
1870 .driver_data = 1,
1871 },
1872 { }
1873};
1482 1874
1483static int __devinit s3cmci_2412_probe(struct platform_device *dev) 1875MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1484{
1485 return s3cmci_probe(dev, 1);
1486}
1487 1876
1488static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1489{
1490 return s3cmci_probe(dev, 1);
1491}
1492 1877
1493#ifdef CONFIG_PM 1878#ifdef CONFIG_PM
1494 1879
1495static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) 1880static int s3cmci_suspend(struct device *dev)
1496{ 1881{
1497 struct mmc_host *mmc = platform_get_drvdata(dev); 1882 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1883 struct pm_message event = { PM_EVENT_SUSPEND };
1498 1884
1499 return mmc_suspend_host(mmc, state); 1885 return mmc_suspend_host(mmc, event);
1500} 1886}
1501 1887
1502static int s3cmci_resume(struct platform_device *dev) 1888static int s3cmci_resume(struct device *dev)
1503{ 1889{
1504 struct mmc_host *mmc = platform_get_drvdata(dev); 1890 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1505 1891
1506 return mmc_resume_host(mmc); 1892 return mmc_resume_host(mmc);
1507} 1893}
1508 1894
1509#else /* CONFIG_PM */ 1895static struct dev_pm_ops s3cmci_pm = {
1510#define s3cmci_suspend NULL
1511#define s3cmci_resume NULL
1512#endif /* CONFIG_PM */
1513
1514
1515static struct platform_driver s3cmci_2410_driver = {
1516 .driver.name = "s3c2410-sdi",
1517 .driver.owner = THIS_MODULE,
1518 .probe = s3cmci_2410_probe,
1519 .remove = __devexit_p(s3cmci_remove),
1520 .shutdown = s3cmci_shutdown,
1521 .suspend = s3cmci_suspend, 1896 .suspend = s3cmci_suspend,
1522 .resume = s3cmci_resume, 1897 .resume = s3cmci_resume,
1523}; 1898};
1524 1899
1525static struct platform_driver s3cmci_2412_driver = { 1900#define s3cmci_pm_ops &s3cmci_pm
1526 .driver.name = "s3c2412-sdi", 1901#else /* CONFIG_PM */
1527 .driver.owner = THIS_MODULE, 1902#define s3cmci_pm_ops NULL
1528 .probe = s3cmci_2412_probe, 1903#endif /* CONFIG_PM */
1529 .remove = __devexit_p(s3cmci_remove),
1530 .shutdown = s3cmci_shutdown,
1531 .suspend = s3cmci_suspend,
1532 .resume = s3cmci_resume,
1533};
1534 1904
1535static struct platform_driver s3cmci_2440_driver = { 1905
1536 .driver.name = "s3c2440-sdi", 1906static struct platform_driver s3cmci_driver = {
1537 .driver.owner = THIS_MODULE, 1907 .driver = {
1538 .probe = s3cmci_2440_probe, 1908 .name = "s3c-sdi",
1909 .owner = THIS_MODULE,
1910 .pm = s3cmci_pm_ops,
1911 },
1912 .id_table = s3cmci_driver_ids,
1913 .probe = s3cmci_probe,
1539 .remove = __devexit_p(s3cmci_remove), 1914 .remove = __devexit_p(s3cmci_remove),
1540 .shutdown = s3cmci_shutdown, 1915 .shutdown = s3cmci_shutdown,
1541 .suspend = s3cmci_suspend,
1542 .resume = s3cmci_resume,
1543}; 1916};
1544 1917
1545
1546static int __init s3cmci_init(void) 1918static int __init s3cmci_init(void)
1547{ 1919{
1548 platform_driver_register(&s3cmci_2410_driver); 1920 return platform_driver_register(&s3cmci_driver);
1549 platform_driver_register(&s3cmci_2412_driver);
1550 platform_driver_register(&s3cmci_2440_driver);
1551 return 0;
1552} 1921}
1553 1922
1554static void __exit s3cmci_exit(void) 1923static void __exit s3cmci_exit(void)
1555{ 1924{
1556 platform_driver_unregister(&s3cmci_2410_driver); 1925 platform_driver_unregister(&s3cmci_driver);
1557 platform_driver_unregister(&s3cmci_2412_driver);
1558 platform_driver_unregister(&s3cmci_2440_driver);
1559} 1926}
1560 1927
1561module_init(s3cmci_init); 1928module_init(s3cmci_init);
@@ -1564,6 +1931,3 @@ module_exit(s3cmci_exit);
1564MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); 1931MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
1565MODULE_LICENSE("GPL v2"); 1932MODULE_LICENSE("GPL v2");
1566MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>"); 1933MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
1567MODULE_ALIAS("platform:s3c2410-sdi");
1568MODULE_ALIAS("platform:s3c2412-sdi");
1569MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index ca1ba3d58cfd..c76b53dbeb61 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -8,9 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11/* FIXME: DMA Resource management ?! */
12#define S3CMCI_DMA 0
13
14enum s3cmci_waitfor { 11enum s3cmci_waitfor {
15 COMPLETION_NONE, 12 COMPLETION_NONE,
16 COMPLETION_FINALIZE, 13 COMPLETION_FINALIZE,
@@ -42,6 +39,11 @@ struct s3cmci_host {
42 int dodma; 39 int dodma;
43 int dmatogo; 40 int dmatogo;
44 41
42 bool irq_disabled;
43 bool irq_enabled;
44 bool irq_state;
45 int sdio_irqen;
46
45 struct mmc_request *mrq; 47 struct mmc_request *mrq;
46 int cmd_is_stop; 48 int cmd_is_stop;
47 49
@@ -68,6 +70,12 @@ struct s3cmci_host {
68 unsigned int ccnt, dcnt; 70 unsigned int ccnt, dcnt;
69 struct tasklet_struct pio_tasklet; 71 struct tasklet_struct pio_tasklet;
70 72
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debug_root;
75 struct dentry *debug_state;
76 struct dentry *debug_regs;
77#endif
78
71#ifdef CONFIG_CPU_FREQ 79#ifdef CONFIG_CPU_FREQ
72 struct notifier_block freq_transition; 80 struct notifier_block freq_transition;
73#endif 81#endif
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index b9eeadf01b74..975e25b19ebe 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev)
805 805
806#ifdef CONFIG_PM 806#ifdef CONFIG_PM
807 807
808static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) 808static int vortex_suspend(struct device *dev)
809{ 809{
810 struct net_device *dev = pci_get_drvdata(pdev); 810 struct pci_dev *pdev = to_pci_dev(dev);
811 struct net_device *ndev = pci_get_drvdata(pdev);
812
813 if (!ndev || !netif_running(ndev))
814 return 0;
815
816 netif_device_detach(ndev);
817 vortex_down(ndev, 1);
811 818
812 if (dev && netdev_priv(dev)) {
813 if (netif_running(dev)) {
814 netif_device_detach(dev);
815 vortex_down(dev, 1);
816 disable_irq(dev->irq);
817 }
818 pci_save_state(pdev);
819 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
820 pci_disable_device(pdev);
821 pci_set_power_state(pdev, pci_choose_state(pdev, state));
822 }
823 return 0; 819 return 0;
824} 820}
825 821
826static int vortex_resume(struct pci_dev *pdev) 822static int vortex_resume(struct device *dev)
827{ 823{
828 struct net_device *dev = pci_get_drvdata(pdev); 824 struct pci_dev *pdev = to_pci_dev(dev);
829 struct vortex_private *vp = netdev_priv(dev); 825 struct net_device *ndev = pci_get_drvdata(pdev);
830 int err; 826 int err;
831 827
832 if (dev && vp) { 828 if (!ndev || !netif_running(ndev))
833 pci_set_power_state(pdev, PCI_D0); 829 return 0;
834 pci_restore_state(pdev); 830
835 err = pci_enable_device(pdev); 831 err = vortex_up(ndev);
836 if (err) { 832 if (err)
837 pr_warning("%s: Could not enable device\n", 833 return err;
838 dev->name); 834
839 return err; 835 netif_device_attach(ndev);
840 } 836
841 pci_set_master(pdev);
842 if (netif_running(dev)) {
843 err = vortex_up(dev);
844 if (err)
845 return err;
846 enable_irq(dev->irq);
847 netif_device_attach(dev);
848 }
849 }
850 return 0; 837 return 0;
851} 838}
852 839
853#endif /* CONFIG_PM */ 840static struct dev_pm_ops vortex_pm_ops = {
841 .suspend = vortex_suspend,
842 .resume = vortex_resume,
843 .freeze = vortex_suspend,
844 .thaw = vortex_resume,
845 .poweroff = vortex_suspend,
846 .restore = vortex_resume,
847};
848
849#define VORTEX_PM_OPS (&vortex_pm_ops)
850
851#else /* !CONFIG_PM */
852
853#define VORTEX_PM_OPS NULL
854
855#endif /* !CONFIG_PM */
854 856
855#ifdef CONFIG_EISA 857#ifdef CONFIG_EISA
856static struct eisa_device_id vortex_eisa_ids[] = { 858static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = {
3199 .probe = vortex_init_one, 3201 .probe = vortex_init_one,
3200 .remove = __devexit_p(vortex_remove_one), 3202 .remove = __devexit_p(vortex_remove_one),
3201 .id_table = vortex_pci_tbl, 3203 .id_table = vortex_pci_tbl,
3202#ifdef CONFIG_PM 3204 .driver.pm = VORTEX_PM_OPS,
3203 .suspend = vortex_suspend,
3204 .resume = vortex_resume,
3205#endif
3206}; 3205};
3207 3206
3208 3207
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2bea67c134f0..712776089b46 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1738,6 +1738,13 @@ config KS8851
1738 help 1738 help
1739 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1740 1740
1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM
1744 help
1745 This platform driver is for Micrel KS8851 Address/data bus
1746 multiplexed network chip.
1747
1741config VIA_RHINE 1748config VIA_RHINE
1742 tristate "VIA Rhine support" 1749 tristate "VIA Rhine support"
1743 depends on NET_PCI && PCI 1750 depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ae8cd30f13d6..d866b8cf65d1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o
89obj-$(CONFIG_SKFP) += skfp/ 89obj-$(CONFIG_SKFP) += skfp/
90obj-$(CONFIG_KS8842) += ks8842.o 90obj-$(CONFIG_KS8842) += ks8842.o
91obj-$(CONFIG_KS8851) += ks8851.o 91obj-$(CONFIG_KS8851) += ks8851.o
92obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
92obj-$(CONFIG_VIA_RHINE) += via-rhine.o 93obj-$(CONFIG_VIA_RHINE) += via-rhine.o
93obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 94obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
94obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 95obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 09d270913c50..ba29dc319b34 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
91 break; 91 break;
92 udelay(1); 92 udelay(1);
93 } while (limit-- >= 0); 93 } while (limit-- > 0);
94 94
95 return (limit < 0) ? 1 : 0; 95 return (limit < 0) ? 1 : 0;
96} 96}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 684c6fe24c8d..a80da0e14a52 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -258,6 +258,7 @@ struct be_adapter {
258 bool link_up; 258 bool link_up;
259 u32 port_num; 259 u32 port_num;
260 bool promiscuous; 260 bool promiscuous;
261 u32 cap;
261}; 262};
262 263
263extern const struct ethtool_ops be_ethtool_ops; 264extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3dd76c4170bf..79d35d122c08 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1068} 1068}
1069 1069
1070/* Uses mbox */ 1070/* Uses mbox */
1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) 1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1072{ 1072{
1073 struct be_mcc_wrb *wrb; 1073 struct be_mcc_wrb *wrb;
1074 struct be_cmd_req_query_fw_cfg *req; 1074 struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
1088 if (!status) { 1088 if (!status) {
1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1090 *port_num = le32_to_cpu(resp->phys_port); 1090 *port_num = le32_to_cpu(resp->phys_port);
1091 *cap = le32_to_cpu(resp->function_cap);
1091 } 1092 }
1092 1093
1093 spin_unlock(&adapter->mbox_lock); 1094 spin_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e432f3d926..8b4c2cb9ad62 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
760 u32 tx_fc, u32 rx_fc); 760 u32 tx_fc, u32 rx_fc);
761extern int be_cmd_get_flow_control(struct be_adapter *adapter, 761extern int be_cmd_get_flow_control(struct be_adapter *adapter,
762 u32 *tx_fc, u32 *rx_fc); 762 u32 *tx_fc, u32 *rx_fc);
763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num); 763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
764 u32 *port_num, u32 *cap);
764extern int be_cmd_reset_function(struct be_adapter *adapter); 765extern int be_cmd_reset_function(struct be_adapter *adapter);
765extern int be_process_mcc(struct be_adapter *adapter); 766extern int be_process_mcc(struct be_adapter *adapter);
766extern int be_cmd_write_flashrom(struct be_adapter *adapter, 767extern int be_cmd_write_flashrom(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 409cf0595903..2f9b50156e0c 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
747 struct be_eth_rx_compl *rxcp) 747 struct be_eth_rx_compl *rxcp)
748{ 748{
749 struct sk_buff *skb; 749 struct sk_buff *skb;
750 u32 vtp, vid; 750 u32 vlanf, vid;
751 u8 vtm;
751 752
752 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 753 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
754 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
755
756 /* vlanf could be wrongly set in some cards.
757 * ignore if vtm is not set */
758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0;
753 760
754 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
755 if (!skb) { 762 if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
772 skb->protocol = eth_type_trans(skb, adapter->netdev); 779 skb->protocol = eth_type_trans(skb, adapter->netdev);
773 skb->dev = adapter->netdev; 780 skb->dev = adapter->netdev;
774 781
775 if (vtp) { 782 if (vlanf) {
776 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 783 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
777 kfree_skb(skb); 784 kfree_skb(skb);
778 return; 785 return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
797 struct be_eq_obj *eq_obj = &adapter->rx_eq; 804 struct be_eq_obj *eq_obj = &adapter->rx_eq;
798 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 805 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
799 u16 i, rxq_idx = 0, vid, j; 806 u16 i, rxq_idx = 0, vid, j;
807 u8 vtm;
800 808
801 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 809 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
802 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 810 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
803 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 811 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
804 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 812 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
813 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
814
815 /* vlanf could be wrongly set in some cards.
816 * ignore if vtm is not set */
817 if ((adapter->cap == 0x400) && !vtm)
818 vlanf = 0;
805 819
806 skb = napi_get_frags(&eq_obj->napi); 820 skb = napi_get_frags(&eq_obj->napi);
807 if (!skb) { 821 if (!skb) {
@@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
2045 if (status) 2059 if (status)
2046 return status; 2060 return status;
2047 2061
2048 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); 2062 status = be_cmd_query_fw_cfg(adapter,
2063 &adapter->port_num, &adapter->cap);
2049 return status; 2064 return status;
2050} 2065}
2051 2066
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6044e12ff9fc..ff449de6f3c0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d,
1182 ": %s: Setting %s as primary slave.\n", 1182 ": %s: Setting %s as primary slave.\n",
1183 bond->dev->name, slave->dev->name); 1183 bond->dev->name, slave->dev->name);
1184 bond->primary_slave = slave; 1184 bond->primary_slave = slave;
1185 strcpy(bond->params.primary, slave->dev->name);
1185 bond_select_active_slave(bond); 1186 bond_select_active_slave(bond);
1186 goto out; 1187 goto out;
1187 } 1188 }
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 211c8e9182fc..46c87ec7960c 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2733 cnic_ulp_init(dev); 2733 cnic_ulp_init(dev);
2734 else if (event == NETDEV_UNREGISTER) 2734 else if (event == NETDEV_UNREGISTER)
2735 cnic_ulp_exit(dev); 2735 cnic_ulp_exit(dev);
2736 else if (event == NETDEV_UP) { 2736
2737 if (event == NETDEV_UP) {
2737 if (cnic_register_netdev(dev) != 0) { 2738 if (cnic_register_netdev(dev) != 0) {
2738 cnic_put(dev); 2739 cnic_put(dev);
2739 goto done; 2740 goto done;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index a49235739eef..d8b09efdcb52 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.0" 15#define CNIC_MODULE_VERSION "2.0.1"
16#define CNIC_MODULE_RELDATE "May 21, 2009" 16#define CNIC_MODULE_RELDATE "Oct 01, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 1a4f89c66a26..42e2b7e21c29 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -149,7 +149,6 @@ do { \
149 149
150#define AUTO_ALL_MODES 0 150#define AUTO_ALL_MODES 0
151#define E1000_EEPROM_82544_APM 0x0004 151#define E1000_EEPROM_82544_APM 0x0004
152#define E1000_EEPROM_ICH8_APME 0x0004
153#define E1000_EEPROM_APME 0x0400 152#define E1000_EEPROM_APME 0x0400
154 153
155#ifndef E1000_MASTER_SLAVE 154#ifndef E1000_MASTER_SLAVE
@@ -293,7 +292,6 @@ struct e1000_adapter {
293 292
294 u64 hw_csum_err; 293 u64 hw_csum_err;
295 u64 hw_csum_good; 294 u64 hw_csum_good;
296 u64 rx_hdr_split;
297 u32 alloc_rx_buff_failed; 295 u32 alloc_rx_buff_failed;
298 u32 rx_int_delay; 296 u32 rx_int_delay;
299 u32 rx_abs_int_delay; 297 u32 rx_abs_int_delay;
@@ -317,7 +315,6 @@ struct e1000_adapter {
317 struct e1000_rx_ring test_rx_ring; 315 struct e1000_rx_ring test_rx_ring;
318 316
319 int msg_enable; 317 int msg_enable;
320 bool have_msi;
321 318
322 /* to not mess up cache alignment, always add to the bottom */ 319 /* to not mess up cache alignment, always add to the bottom */
323 bool tso_force; 320 bool tso_force;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 27f996a2010f..490b2b7cd3ab 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -82,7 +82,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
82 { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, 82 { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
83 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 83 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
84 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 84 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
85 { "rx_header_split", E1000_STAT(rx_hdr_split) },
86 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, 85 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
87 { "tx_smbus", E1000_STAT(stats.mgptc) }, 86 { "tx_smbus", E1000_STAT(stats.mgptc) },
88 { "rx_smbus", E1000_STAT(stats.mgprc) }, 87 { "rx_smbus", E1000_STAT(stats.mgprc) },
@@ -114,8 +113,6 @@ static int e1000_get_settings(struct net_device *netdev,
114 SUPPORTED_1000baseT_Full| 113 SUPPORTED_1000baseT_Full|
115 SUPPORTED_Autoneg | 114 SUPPORTED_Autoneg |
116 SUPPORTED_TP); 115 SUPPORTED_TP);
117 if (hw->phy_type == e1000_phy_ife)
118 ecmd->supported &= ~SUPPORTED_1000baseT_Full;
119 ecmd->advertising = ADVERTISED_TP; 116 ecmd->advertising = ADVERTISED_TP;
120 117
121 if (hw->autoneg == 1) { 118 if (hw->autoneg == 1) {
@@ -178,14 +175,6 @@ static int e1000_set_settings(struct net_device *netdev,
178 struct e1000_adapter *adapter = netdev_priv(netdev); 175 struct e1000_adapter *adapter = netdev_priv(netdev);
179 struct e1000_hw *hw = &adapter->hw; 176 struct e1000_hw *hw = &adapter->hw;
180 177
181 /* When SoL/IDER sessions are active, autoneg/speed/duplex
182 * cannot be changed */
183 if (e1000_check_phy_reset_block(hw)) {
184 DPRINTK(DRV, ERR, "Cannot change link characteristics "
185 "when SoL/IDER is active.\n");
186 return -EINVAL;
187 }
188
189 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 178 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
190 msleep(1); 179 msleep(1);
191 180
@@ -330,10 +319,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
330 else 319 else
331 netdev->features &= ~NETIF_F_TSO; 320 netdev->features &= ~NETIF_F_TSO;
332 321
333 if (data && (adapter->hw.mac_type > e1000_82547_rev_2)) 322 netdev->features &= ~NETIF_F_TSO6;
334 netdev->features |= NETIF_F_TSO6;
335 else
336 netdev->features &= ~NETIF_F_TSO6;
337 323
338 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 324 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
339 adapter->tso_force = true; 325 adapter->tso_force = true;
@@ -441,7 +427,6 @@ static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
441 regs_buff[24] = (u32)phy_data; /* phy local receiver status */ 427 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
442 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 428 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
443 if (hw->mac_type >= e1000_82540 && 429 if (hw->mac_type >= e1000_82540 &&
444 hw->mac_type < e1000_82571 &&
445 hw->media_type == e1000_media_type_copper) { 430 hw->media_type == e1000_media_type_copper) {
446 regs_buff[26] = er32(MANC); 431 regs_buff[26] = er32(MANC);
447 } 432 }
@@ -554,10 +539,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
554 ret_val = e1000_write_eeprom(hw, first_word, 539 ret_val = e1000_write_eeprom(hw, first_word,
555 last_word - first_word + 1, eeprom_buff); 540 last_word - first_word + 1, eeprom_buff);
556 541
557 /* Update the checksum over the first part of the EEPROM if needed 542 /* Update the checksum over the first part of the EEPROM if needed */
558 * and flush shadow RAM for 82573 conrollers */ 543 if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
559 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
560 (hw->mac_type == e1000_82573)))
561 e1000_update_eeprom_checksum(hw); 544 e1000_update_eeprom_checksum(hw);
562 545
563 kfree(eeprom_buff); 546 kfree(eeprom_buff);
@@ -568,31 +551,12 @@ static void e1000_get_drvinfo(struct net_device *netdev,
568 struct ethtool_drvinfo *drvinfo) 551 struct ethtool_drvinfo *drvinfo)
569{ 552{
570 struct e1000_adapter *adapter = netdev_priv(netdev); 553 struct e1000_adapter *adapter = netdev_priv(netdev);
571 struct e1000_hw *hw = &adapter->hw;
572 char firmware_version[32]; 554 char firmware_version[32];
573 u16 eeprom_data;
574 555
575 strncpy(drvinfo->driver, e1000_driver_name, 32); 556 strncpy(drvinfo->driver, e1000_driver_name, 32);
576 strncpy(drvinfo->version, e1000_driver_version, 32); 557 strncpy(drvinfo->version, e1000_driver_version, 32);
577 558
578 /* EEPROM image version # is reported as firmware version # for 559 sprintf(firmware_version, "N/A");
579 * 8257{1|2|3} controllers */
580 e1000_read_eeprom(hw, 5, 1, &eeprom_data);
581 switch (hw->mac_type) {
582 case e1000_82571:
583 case e1000_82572:
584 case e1000_82573:
585 case e1000_80003es2lan:
586 case e1000_ich8lan:
587 sprintf(firmware_version, "%d.%d-%d",
588 (eeprom_data & 0xF000) >> 12,
589 (eeprom_data & 0x0FF0) >> 4,
590 eeprom_data & 0x000F);
591 break;
592 default:
593 sprintf(firmware_version, "N/A");
594 }
595
596 strncpy(drvinfo->fw_version, firmware_version, 32); 560 strncpy(drvinfo->fw_version, firmware_version, 32);
597 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 561 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
598 drvinfo->regdump_len = e1000_get_regs_len(netdev); 562 drvinfo->regdump_len = e1000_get_regs_len(netdev);
@@ -781,21 +745,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
781 /* The status register is Read Only, so a write should fail. 745 /* The status register is Read Only, so a write should fail.
782 * Some bits that get toggled are ignored. 746 * Some bits that get toggled are ignored.
783 */ 747 */
784 switch (hw->mac_type) { 748
785 /* there are several bits on newer hardware that are r/w */ 749 /* there are several bits on newer hardware that are r/w */
786 case e1000_82571: 750 toggle = 0xFFFFF833;
787 case e1000_82572:
788 case e1000_80003es2lan:
789 toggle = 0x7FFFF3FF;
790 break;
791 case e1000_82573:
792 case e1000_ich8lan:
793 toggle = 0x7FFFF033;
794 break;
795 default:
796 toggle = 0xFFFFF833;
797 break;
798 }
799 751
800 before = er32(STATUS); 752 before = er32(STATUS);
801 value = (er32(STATUS) & toggle); 753 value = (er32(STATUS) & toggle);
@@ -810,12 +762,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
810 /* restore previous status */ 762 /* restore previous status */
811 ew32(STATUS, before); 763 ew32(STATUS, before);
812 764
813 if (hw->mac_type != e1000_ich8lan) { 765 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
814 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 766 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
815 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); 767 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
816 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); 768 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
817 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
818 }
819 769
820 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); 770 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
821 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 771 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -830,8 +780,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
830 780
831 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); 781 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
832 782
833 before = (hw->mac_type == e1000_ich8lan ? 783 before = 0x06DFB3FE;
834 0x06C3B33E : 0x06DFB3FE);
835 REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); 784 REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
836 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 785 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
837 786
@@ -839,12 +788,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
839 788
840 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); 789 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
841 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 790 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
842 if (hw->mac_type != e1000_ich8lan) 791 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
843 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
844 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 792 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
845 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 793 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
846 value = (hw->mac_type == e1000_ich8lan ? 794 value = E1000_RAR_ENTRIES;
847 E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
848 for (i = 0; i < value; i++) { 795 for (i = 0; i < value; i++) {
849 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 796 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
850 0xFFFFFFFF); 797 0xFFFFFFFF);
@@ -859,8 +806,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
859 806
860 } 807 }
861 808
862 value = (hw->mac_type == e1000_ich8lan ? 809 value = E1000_MC_TBL_SIZE;
863 E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
864 for (i = 0; i < value; i++) 810 for (i = 0; i < value; i++)
865 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 811 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
866 812
@@ -933,9 +879,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
933 /* Test each interrupt */ 879 /* Test each interrupt */
934 for (; i < 10; i++) { 880 for (; i < 10; i++) {
935 881
936 if (hw->mac_type == e1000_ich8lan && i == 8)
937 continue;
938
939 /* Interrupt to test */ 882 /* Interrupt to test */
940 mask = 1 << i; 883 mask = 1 << i;
941 884
@@ -1289,35 +1232,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1289 e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); 1232 e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
1290 /* autoneg off */ 1233 /* autoneg off */
1291 e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); 1234 e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
1292 } else if (hw->phy_type == e1000_phy_gg82563) 1235 }
1293 e1000_write_phy_reg(hw,
1294 GG82563_PHY_KMRN_MODE_CTRL,
1295 0x1CC);
1296 1236
1297 ctrl_reg = er32(CTRL); 1237 ctrl_reg = er32(CTRL);
1298 1238
1299 if (hw->phy_type == e1000_phy_ife) { 1239 /* force 1000, set loopback */
1300 /* force 100, set loopback */ 1240 e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
1301 e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
1302 1241
1303 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1242 /* Now set up the MAC to the same speed/duplex as the PHY. */
1304 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1243 ctrl_reg = er32(CTRL);
1305 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1244 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1306 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1245 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1307 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1246 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1308 E1000_CTRL_FD); /* Force Duplex to FULL */ 1247 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1309 } else { 1248 E1000_CTRL_FD); /* Force Duplex to FULL */
1310 /* force 1000, set loopback */
1311 e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
1312
1313 /* Now set up the MAC to the same speed/duplex as the PHY. */
1314 ctrl_reg = er32(CTRL);
1315 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1316 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1317 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1318 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1319 E1000_CTRL_FD); /* Force Duplex to FULL */
1320 }
1321 1249
1322 if (hw->media_type == e1000_media_type_copper && 1250 if (hw->media_type == e1000_media_type_copper &&
1323 hw->phy_type == e1000_phy_m88) 1251 hw->phy_type == e1000_phy_m88)
@@ -1373,14 +1301,8 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
1373 case e1000_82541_rev_2: 1301 case e1000_82541_rev_2:
1374 case e1000_82547: 1302 case e1000_82547:
1375 case e1000_82547_rev_2: 1303 case e1000_82547_rev_2:
1376 case e1000_82571:
1377 case e1000_82572:
1378 case e1000_82573:
1379 case e1000_80003es2lan:
1380 case e1000_ich8lan:
1381 return e1000_integrated_phy_loopback(adapter); 1304 return e1000_integrated_phy_loopback(adapter);
1382 break; 1305 break;
1383
1384 default: 1306 default:
1385 /* Default PHY loopback work is to read the MII 1307 /* Default PHY loopback work is to read the MII
1386 * control register and assert bit 14 (loopback mode). 1308 * control register and assert bit 14 (loopback mode).
@@ -1409,14 +1331,6 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1409 case e1000_82546_rev_3: 1331 case e1000_82546_rev_3:
1410 return e1000_set_phy_loopback(adapter); 1332 return e1000_set_phy_loopback(adapter);
1411 break; 1333 break;
1412 case e1000_82571:
1413 case e1000_82572:
1414#define E1000_SERDES_LB_ON 0x410
1415 e1000_set_phy_loopback(adapter);
1416 ew32(SCTL, E1000_SERDES_LB_ON);
1417 msleep(10);
1418 return 0;
1419 break;
1420 default: 1334 default:
1421 rctl = er32(RCTL); 1335 rctl = er32(RCTL);
1422 rctl |= E1000_RCTL_LBM_TCVR; 1336 rctl |= E1000_RCTL_LBM_TCVR;
@@ -1440,26 +1354,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1440 ew32(RCTL, rctl); 1354 ew32(RCTL, rctl);
1441 1355
1442 switch (hw->mac_type) { 1356 switch (hw->mac_type) {
1443 case e1000_82571:
1444 case e1000_82572:
1445 if (hw->media_type == e1000_media_type_fiber ||
1446 hw->media_type == e1000_media_type_internal_serdes) {
1447#define E1000_SERDES_LB_OFF 0x400
1448 ew32(SCTL, E1000_SERDES_LB_OFF);
1449 msleep(10);
1450 break;
1451 }
1452 /* Fall Through */
1453 case e1000_82545: 1357 case e1000_82545:
1454 case e1000_82546: 1358 case e1000_82546:
1455 case e1000_82545_rev_3: 1359 case e1000_82545_rev_3:
1456 case e1000_82546_rev_3: 1360 case e1000_82546_rev_3:
1457 default: 1361 default:
1458 hw->autoneg = true; 1362 hw->autoneg = true;
1459 if (hw->phy_type == e1000_phy_gg82563)
1460 e1000_write_phy_reg(hw,
1461 GG82563_PHY_KMRN_MODE_CTRL,
1462 0x180);
1463 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); 1363 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1464 if (phy_reg & MII_CR_LOOPBACK) { 1364 if (phy_reg & MII_CR_LOOPBACK) {
1465 phy_reg &= ~MII_CR_LOOPBACK; 1365 phy_reg &= ~MII_CR_LOOPBACK;
@@ -1560,17 +1460,6 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1560 1460
1561static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) 1461static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1562{ 1462{
1563 struct e1000_hw *hw = &adapter->hw;
1564
1565 /* PHY loopback cannot be performed if SoL/IDER
1566 * sessions are active */
1567 if (e1000_check_phy_reset_block(hw)) {
1568 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1569 "when SoL/IDER is active.\n");
1570 *data = 0;
1571 goto out;
1572 }
1573
1574 *data = e1000_setup_desc_rings(adapter); 1463 *data = e1000_setup_desc_rings(adapter);
1575 if (*data) 1464 if (*data)
1576 goto out; 1465 goto out;
@@ -1592,13 +1481,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1592 *data = 0; 1481 *data = 0;
1593 if (hw->media_type == e1000_media_type_internal_serdes) { 1482 if (hw->media_type == e1000_media_type_internal_serdes) {
1594 int i = 0; 1483 int i = 0;
1595 hw->serdes_link_down = true; 1484 hw->serdes_has_link = false;
1596 1485
1597 /* On some blade server designs, link establishment 1486 /* On some blade server designs, link establishment
1598 * could take as long as 2-3 minutes */ 1487 * could take as long as 2-3 minutes */
1599 do { 1488 do {
1600 e1000_check_for_link(hw); 1489 e1000_check_for_link(hw);
1601 if (!hw->serdes_link_down) 1490 if (hw->serdes_has_link)
1602 return *data; 1491 return *data;
1603 msleep(20); 1492 msleep(20);
1604 } while (i++ < 3750); 1493 } while (i++ < 3750);
@@ -1716,15 +1605,11 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
1716 case E1000_DEV_ID_82545EM_COPPER: 1605 case E1000_DEV_ID_82545EM_COPPER:
1717 case E1000_DEV_ID_82546GB_QUAD_COPPER: 1606 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1718 case E1000_DEV_ID_82546GB_PCIE: 1607 case E1000_DEV_ID_82546GB_PCIE:
1719 case E1000_DEV_ID_82571EB_SERDES_QUAD:
1720 /* these don't support WoL at all */ 1608 /* these don't support WoL at all */
1721 wol->supported = 0; 1609 wol->supported = 0;
1722 break; 1610 break;
1723 case E1000_DEV_ID_82546EB_FIBER: 1611 case E1000_DEV_ID_82546EB_FIBER:
1724 case E1000_DEV_ID_82546GB_FIBER: 1612 case E1000_DEV_ID_82546GB_FIBER:
1725 case E1000_DEV_ID_82571EB_FIBER:
1726 case E1000_DEV_ID_82571EB_SERDES:
1727 case E1000_DEV_ID_82571EB_COPPER:
1728 /* Wake events not supported on port B */ 1613 /* Wake events not supported on port B */
1729 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1614 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1730 wol->supported = 0; 1615 wol->supported = 0;
@@ -1733,10 +1618,6 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
1733 /* return success for non excluded adapter ports */ 1618 /* return success for non excluded adapter ports */
1734 retval = 0; 1619 retval = 0;
1735 break; 1620 break;
1736 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1737 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1738 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1739 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1740 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1621 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1741 /* quad port adapters only support WoL on port A */ 1622 /* quad port adapters only support WoL on port A */
1742 if (!adapter->quad_port_a) { 1623 if (!adapter->quad_port_a) {
@@ -1872,30 +1753,15 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1872 if (!data) 1753 if (!data)
1873 data = INT_MAX; 1754 data = INT_MAX;
1874 1755
1875 if (hw->mac_type < e1000_82571) { 1756 if (!adapter->blink_timer.function) {
1876 if (!adapter->blink_timer.function) { 1757 init_timer(&adapter->blink_timer);
1877 init_timer(&adapter->blink_timer); 1758 adapter->blink_timer.function = e1000_led_blink_callback;
1878 adapter->blink_timer.function = e1000_led_blink_callback; 1759 adapter->blink_timer.data = (unsigned long)adapter;
1879 adapter->blink_timer.data = (unsigned long)adapter;
1880 }
1881 e1000_setup_led(hw);
1882 mod_timer(&adapter->blink_timer, jiffies);
1883 msleep_interruptible(data * 1000);
1884 del_timer_sync(&adapter->blink_timer);
1885 } else if (hw->phy_type == e1000_phy_ife) {
1886 if (!adapter->blink_timer.function) {
1887 init_timer(&adapter->blink_timer);
1888 adapter->blink_timer.function = e1000_led_blink_callback;
1889 adapter->blink_timer.data = (unsigned long)adapter;
1890 }
1891 mod_timer(&adapter->blink_timer, jiffies);
1892 msleep_interruptible(data * 1000);
1893 del_timer_sync(&adapter->blink_timer);
1894 e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
1895 } else {
1896 e1000_blink_led_start(hw);
1897 msleep_interruptible(data * 1000);
1898 } 1760 }
1761 e1000_setup_led(hw);
1762 mod_timer(&adapter->blink_timer, jiffies);
1763 msleep_interruptible(data * 1000);
1764 del_timer_sync(&adapter->blink_timer);
1899 1765
1900 e1000_led_off(hw); 1766 e1000_led_off(hw);
1901 clear_bit(E1000_LED_ON, &adapter->led_status); 1767 clear_bit(E1000_LED_ON, &adapter->led_status);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 45ac225a7aaa..8d7d87f12827 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -24,88 +24,34 @@
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 26
27*******************************************************************************/ 27 */
28 28
29/* e1000_hw.c 29/* e1000_hw.c
30 * Shared functions for accessing and configuring the MAC 30 * Shared functions for accessing and configuring the MAC
31 */ 31 */
32 32
33
34#include "e1000_hw.h" 33#include "e1000_hw.h"
35 34
36static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
37static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
38static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
39static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
40static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
41static void e1000_release_software_semaphore(struct e1000_hw *hw);
42
43static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
44static s32 e1000_check_downshift(struct e1000_hw *hw); 35static s32 e1000_check_downshift(struct e1000_hw *hw);
45static s32 e1000_check_polarity(struct e1000_hw *hw, 36static s32 e1000_check_polarity(struct e1000_hw *hw,
46 e1000_rev_polarity *polarity); 37 e1000_rev_polarity *polarity);
47static void e1000_clear_hw_cntrs(struct e1000_hw *hw); 38static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
48static void e1000_clear_vfta(struct e1000_hw *hw); 39static void e1000_clear_vfta(struct e1000_hw *hw);
49static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
50static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, 40static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
51 bool link_up); 41 bool link_up);
52static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); 42static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
53static s32 e1000_detect_gig_phy(struct e1000_hw *hw); 43static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
54static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
55static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); 44static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
56static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, 45static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
57 u16 *max_length); 46 u16 *max_length);
58static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
59static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); 47static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
60static s32 e1000_get_software_flag(struct e1000_hw *hw);
61static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
62static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
63static s32 e1000_id_led_init(struct e1000_hw *hw); 48static s32 e1000_id_led_init(struct e1000_hw *hw);
64static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
65 u32 cnf_base_addr,
66 u32 cnf_size);
67static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
68static void e1000_init_rx_addrs(struct e1000_hw *hw); 49static void e1000_init_rx_addrs(struct e1000_hw *hw);
69static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
70static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
71static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
72static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
73static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
74 u16 offset, u8 *sum);
75static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
76 struct e1000_host_mng_command_header
77 *hdr);
78static s32 e1000_mng_write_commit(struct e1000_hw *hw);
79static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
80 struct e1000_phy_info *phy_info);
81static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, 50static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
82 struct e1000_phy_info *phy_info); 51 struct e1000_phy_info *phy_info);
83static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
84 u16 *data);
85static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
86 u16 *data);
87static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
88static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, 52static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
89 struct e1000_phy_info *phy_info); 53 struct e1000_phy_info *phy_info);
90static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
91static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
92static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
93 u8 byte);
94static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
95static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
96static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
97 u16 *data);
98static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
99 u16 data);
100static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
101 u16 *data);
102static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
103 u16 *data);
104static void e1000_release_software_flag(struct e1000_hw *hw);
105static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); 54static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
106static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
107static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
108static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
109static s32 e1000_wait_autoneg(struct e1000_hw *hw); 55static s32 e1000_wait_autoneg(struct e1000_hw *hw);
110static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); 56static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
111static s32 e1000_set_phy_type(struct e1000_hw *hw); 57static s32 e1000_set_phy_type(struct e1000_hw *hw);
@@ -117,12 +63,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
117static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); 63static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
118static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); 64static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
119static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); 65static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
120static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, 66static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count);
121 u16 count);
122static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); 67static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
123static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); 68static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
124static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, 69static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
125 u16 words, u16 *data); 70 u16 words, u16 *data);
126static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, 71static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
127 u16 words, u16 *data); 72 u16 words, u16 *data);
128static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); 73static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
@@ -131,7 +76,7 @@ static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
131static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); 76static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
132static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, 77static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
133 u16 phy_data); 78 u16 phy_data);
134static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr, 79static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
135 u16 *phy_data); 80 u16 *phy_data);
136static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); 81static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
137static s32 e1000_acquire_eeprom(struct e1000_hw *hw); 82static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
@@ -140,188 +85,164 @@ static void e1000_standby_eeprom(struct e1000_hw *hw);
140static s32 e1000_set_vco_speed(struct e1000_hw *hw); 85static s32 e1000_set_vco_speed(struct e1000_hw *hw);
141static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); 86static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
142static s32 e1000_set_phy_mode(struct e1000_hw *hw); 87static s32 e1000_set_phy_mode(struct e1000_hw *hw);
143static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer); 88static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
144static u8 e1000_calculate_mng_checksum(char *buffer, u32 length); 89 u16 *data);
145static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex); 90static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
146static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 91 u16 *data);
147static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
148static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
149 92
150/* IGP cable length table */ 93/* IGP cable length table */
151static const 94static const
152u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 95u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
153 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 96 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
154 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 97 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
155 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 98 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
156 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, 99 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
157 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, 100 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
158 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 101 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100,
159 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 102 100,
160 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 103 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
161 104 110, 110,
162static const 105 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120,
163u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 106 120, 120
164 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 107};
165 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
166 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
167 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
168 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
169 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
170 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
171 104, 109, 114, 118, 121, 124};
172 108
173static DEFINE_SPINLOCK(e1000_eeprom_lock); 109static DEFINE_SPINLOCK(e1000_eeprom_lock);
174 110
175/****************************************************************************** 111/**
176 * Set the phy type member in the hw struct. 112 * e1000_set_phy_type - Set the phy type member in the hw struct.
177 * 113 * @hw: Struct containing variables accessed by shared code
178 * hw - Struct containing variables accessed by shared code 114 */
179 *****************************************************************************/
180static s32 e1000_set_phy_type(struct e1000_hw *hw) 115static s32 e1000_set_phy_type(struct e1000_hw *hw)
181{ 116{
182 DEBUGFUNC("e1000_set_phy_type"); 117 DEBUGFUNC("e1000_set_phy_type");
183
184 if (hw->mac_type == e1000_undefined)
185 return -E1000_ERR_PHY_TYPE;
186
187 switch (hw->phy_id) {
188 case M88E1000_E_PHY_ID:
189 case M88E1000_I_PHY_ID:
190 case M88E1011_I_PHY_ID:
191 case M88E1111_I_PHY_ID:
192 hw->phy_type = e1000_phy_m88;
193 break;
194 case IGP01E1000_I_PHY_ID:
195 if (hw->mac_type == e1000_82541 ||
196 hw->mac_type == e1000_82541_rev_2 ||
197 hw->mac_type == e1000_82547 ||
198 hw->mac_type == e1000_82547_rev_2) {
199 hw->phy_type = e1000_phy_igp;
200 break;
201 }
202 case IGP03E1000_E_PHY_ID:
203 hw->phy_type = e1000_phy_igp_3;
204 break;
205 case IFE_E_PHY_ID:
206 case IFE_PLUS_E_PHY_ID:
207 case IFE_C_E_PHY_ID:
208 hw->phy_type = e1000_phy_ife;
209 break;
210 case GG82563_E_PHY_ID:
211 if (hw->mac_type == e1000_80003es2lan) {
212 hw->phy_type = e1000_phy_gg82563;
213 break;
214 }
215 /* Fall Through */
216 default:
217 /* Should never have loaded on this device */
218 hw->phy_type = e1000_phy_undefined;
219 return -E1000_ERR_PHY_TYPE;
220 }
221
222 return E1000_SUCCESS;
223}
224
225/******************************************************************************
226 * IGP phy init script - initializes the GbE PHY
227 *
228 * hw - Struct containing variables accessed by shared code
229 *****************************************************************************/
230static void e1000_phy_init_script(struct e1000_hw *hw)
231{
232 u32 ret_val;
233 u16 phy_saved_data;
234
235 DEBUGFUNC("e1000_phy_init_script");
236
237 if (hw->phy_init_script) {
238 msleep(20);
239
240 /* Save off the current value of register 0x2F5B to be restored at
241 * the end of this routine. */
242 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
243
244 /* Disabled the PHY transmitter */
245 e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
246 118
247 msleep(20); 119 if (hw->mac_type == e1000_undefined)
120 return -E1000_ERR_PHY_TYPE;
248 121
249 e1000_write_phy_reg(hw,0x0000,0x0140); 122 switch (hw->phy_id) {
250 123 case M88E1000_E_PHY_ID:
251 msleep(5); 124 case M88E1000_I_PHY_ID:
252 125 case M88E1011_I_PHY_ID:
253 switch (hw->mac_type) { 126 case M88E1111_I_PHY_ID:
254 case e1000_82541: 127 hw->phy_type = e1000_phy_m88;
255 case e1000_82547: 128 break;
256 e1000_write_phy_reg(hw, 0x1F95, 0x0001); 129 case IGP01E1000_I_PHY_ID:
257 130 if (hw->mac_type == e1000_82541 ||
258 e1000_write_phy_reg(hw, 0x1F71, 0xBD21); 131 hw->mac_type == e1000_82541_rev_2 ||
259 132 hw->mac_type == e1000_82547 ||
260 e1000_write_phy_reg(hw, 0x1F79, 0x0018); 133 hw->mac_type == e1000_82547_rev_2) {
261 134 hw->phy_type = e1000_phy_igp;
262 e1000_write_phy_reg(hw, 0x1F30, 0x1600); 135 break;
263 136 }
264 e1000_write_phy_reg(hw, 0x1F31, 0x0014); 137 default:
265 138 /* Should never have loaded on this device */
266 e1000_write_phy_reg(hw, 0x1F32, 0x161C); 139 hw->phy_type = e1000_phy_undefined;
267 140 return -E1000_ERR_PHY_TYPE;
268 e1000_write_phy_reg(hw, 0x1F94, 0x0003); 141 }
269
270 e1000_write_phy_reg(hw, 0x1F96, 0x003F);
271
272 e1000_write_phy_reg(hw, 0x2010, 0x0008);
273 break;
274
275 case e1000_82541_rev_2:
276 case e1000_82547_rev_2:
277 e1000_write_phy_reg(hw, 0x1F73, 0x0099);
278 break;
279 default:
280 break;
281 }
282
283 e1000_write_phy_reg(hw, 0x0000, 0x3300);
284
285 msleep(20);
286
287 /* Now enable the transmitter */
288 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
289
290 if (hw->mac_type == e1000_82547) {
291 u16 fused, fine, coarse;
292
293 /* Move to analog registers page */
294 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
295
296 if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
297 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
298 142
299 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; 143 return E1000_SUCCESS;
300 coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; 144}
301 145
302 if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { 146/**
303 coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; 147 * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY
304 fine -= IGP01E1000_ANALOG_FUSE_FINE_1; 148 * @hw: Struct containing variables accessed by shared code
305 } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) 149 */
306 fine -= IGP01E1000_ANALOG_FUSE_FINE_10; 150static void e1000_phy_init_script(struct e1000_hw *hw)
151{
152 u32 ret_val;
153 u16 phy_saved_data;
154
155 DEBUGFUNC("e1000_phy_init_script");
156
157 if (hw->phy_init_script) {
158 msleep(20);
159
160 /* Save off the current value of register 0x2F5B to be restored at
161 * the end of this routine. */
162 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
163
164 /* Disabled the PHY transmitter */
165 e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
166 msleep(20);
167
168 e1000_write_phy_reg(hw, 0x0000, 0x0140);
169 msleep(5);
170
171 switch (hw->mac_type) {
172 case e1000_82541:
173 case e1000_82547:
174 e1000_write_phy_reg(hw, 0x1F95, 0x0001);
175 e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
176 e1000_write_phy_reg(hw, 0x1F79, 0x0018);
177 e1000_write_phy_reg(hw, 0x1F30, 0x1600);
178 e1000_write_phy_reg(hw, 0x1F31, 0x0014);
179 e1000_write_phy_reg(hw, 0x1F32, 0x161C);
180 e1000_write_phy_reg(hw, 0x1F94, 0x0003);
181 e1000_write_phy_reg(hw, 0x1F96, 0x003F);
182 e1000_write_phy_reg(hw, 0x2010, 0x0008);
183 break;
307 184
308 fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | 185 case e1000_82541_rev_2:
309 (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | 186 case e1000_82547_rev_2:
310 (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); 187 e1000_write_phy_reg(hw, 0x1F73, 0x0099);
188 break;
189 default:
190 break;
191 }
311 192
312 e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused); 193 e1000_write_phy_reg(hw, 0x0000, 0x3300);
313 e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS, 194 msleep(20);
314 IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); 195
315 } 196 /* Now enable the transmitter */
316 } 197 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
317 } 198
199 if (hw->mac_type == e1000_82547) {
200 u16 fused, fine, coarse;
201
202 /* Move to analog registers page */
203 e1000_read_phy_reg(hw,
204 IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
205 &fused);
206
207 if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
208 e1000_read_phy_reg(hw,
209 IGP01E1000_ANALOG_FUSE_STATUS,
210 &fused);
211
212 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
213 coarse =
214 fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
215
216 if (coarse >
217 IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
218 coarse -=
219 IGP01E1000_ANALOG_FUSE_COARSE_10;
220 fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
221 } else if (coarse ==
222 IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
223 fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
224
225 fused =
226 (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
227 (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
228 (coarse &
229 IGP01E1000_ANALOG_FUSE_COARSE_MASK);
230
231 e1000_write_phy_reg(hw,
232 IGP01E1000_ANALOG_FUSE_CONTROL,
233 fused);
234 e1000_write_phy_reg(hw,
235 IGP01E1000_ANALOG_FUSE_BYPASS,
236 IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
237 }
238 }
239 }
318} 240}
319 241
320/****************************************************************************** 242/**
321 * Set the mac type member in the hw struct. 243 * e1000_set_mac_type - Set the mac type member in the hw struct.
322 * 244 * @hw: Struct containing variables accessed by shared code
323 * hw - Struct containing variables accessed by shared code 245 */
324 *****************************************************************************/
325s32 e1000_set_mac_type(struct e1000_hw *hw) 246s32 e1000_set_mac_type(struct e1000_hw *hw)
326{ 247{
327 DEBUGFUNC("e1000_set_mac_type"); 248 DEBUGFUNC("e1000_set_mac_type");
@@ -397,61 +318,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
397 case E1000_DEV_ID_82547GI: 318 case E1000_DEV_ID_82547GI:
398 hw->mac_type = e1000_82547_rev_2; 319 hw->mac_type = e1000_82547_rev_2;
399 break; 320 break;
400 case E1000_DEV_ID_82571EB_COPPER:
401 case E1000_DEV_ID_82571EB_FIBER:
402 case E1000_DEV_ID_82571EB_SERDES:
403 case E1000_DEV_ID_82571EB_SERDES_DUAL:
404 case E1000_DEV_ID_82571EB_SERDES_QUAD:
405 case E1000_DEV_ID_82571EB_QUAD_COPPER:
406 case E1000_DEV_ID_82571PT_QUAD_COPPER:
407 case E1000_DEV_ID_82571EB_QUAD_FIBER:
408 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
409 hw->mac_type = e1000_82571;
410 break;
411 case E1000_DEV_ID_82572EI_COPPER:
412 case E1000_DEV_ID_82572EI_FIBER:
413 case E1000_DEV_ID_82572EI_SERDES:
414 case E1000_DEV_ID_82572EI:
415 hw->mac_type = e1000_82572;
416 break;
417 case E1000_DEV_ID_82573E:
418 case E1000_DEV_ID_82573E_IAMT:
419 case E1000_DEV_ID_82573L:
420 hw->mac_type = e1000_82573;
421 break;
422 case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
423 case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
424 case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
425 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
426 hw->mac_type = e1000_80003es2lan;
427 break;
428 case E1000_DEV_ID_ICH8_IGP_M_AMT:
429 case E1000_DEV_ID_ICH8_IGP_AMT:
430 case E1000_DEV_ID_ICH8_IGP_C:
431 case E1000_DEV_ID_ICH8_IFE:
432 case E1000_DEV_ID_ICH8_IFE_GT:
433 case E1000_DEV_ID_ICH8_IFE_G:
434 case E1000_DEV_ID_ICH8_IGP_M:
435 hw->mac_type = e1000_ich8lan;
436 break;
437 default: 321 default:
438 /* Should never have loaded on this device */ 322 /* Should never have loaded on this device */
439 return -E1000_ERR_MAC_TYPE; 323 return -E1000_ERR_MAC_TYPE;
440 } 324 }
441 325
442 switch (hw->mac_type) { 326 switch (hw->mac_type) {
443 case e1000_ich8lan:
444 hw->swfwhw_semaphore_present = true;
445 hw->asf_firmware_present = true;
446 break;
447 case e1000_80003es2lan:
448 hw->swfw_sync_present = true;
449 /* fall through */
450 case e1000_82571:
451 case e1000_82572:
452 case e1000_82573:
453 hw->eeprom_semaphore_present = true;
454 /* fall through */
455 case e1000_82541: 327 case e1000_82541:
456 case e1000_82547: 328 case e1000_82547:
457 case e1000_82541_rev_2: 329 case e1000_82541_rev_2:
@@ -468,6058 +340,4500 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
468 if (hw->mac_type == e1000_82543) 340 if (hw->mac_type == e1000_82543)
469 hw->bad_tx_carr_stats_fd = true; 341 hw->bad_tx_carr_stats_fd = true;
470 342
471 /* capable of receiving management packets to the host */
472 if (hw->mac_type >= e1000_82571)
473 hw->has_manc2h = true;
474
475 /* In rare occasions, ESB2 systems would end up started without
476 * the RX unit being turned on.
477 */
478 if (hw->mac_type == e1000_80003es2lan)
479 hw->rx_needs_kicking = true;
480
481 if (hw->mac_type > e1000_82544) 343 if (hw->mac_type > e1000_82544)
482 hw->has_smbus = true; 344 hw->has_smbus = true;
483 345
484 return E1000_SUCCESS; 346 return E1000_SUCCESS;
485} 347}
486 348
487/***************************************************************************** 349/**
488 * Set media type and TBI compatibility. 350 * e1000_set_media_type - Set media type and TBI compatibility.
489 * 351 * @hw: Struct containing variables accessed by shared code
490 * hw - Struct containing variables accessed by shared code 352 */
491 * **************************************************************************/
492void e1000_set_media_type(struct e1000_hw *hw) 353void e1000_set_media_type(struct e1000_hw *hw)
493{ 354{
494 u32 status; 355 u32 status;
495 356
496 DEBUGFUNC("e1000_set_media_type"); 357 DEBUGFUNC("e1000_set_media_type");
497 358
498 if (hw->mac_type != e1000_82543) { 359 if (hw->mac_type != e1000_82543) {
499 /* tbi_compatibility is only valid on 82543 */ 360 /* tbi_compatibility is only valid on 82543 */
500 hw->tbi_compatibility_en = false; 361 hw->tbi_compatibility_en = false;
501 } 362 }
502 363
503 switch (hw->device_id) { 364 switch (hw->device_id) {
504 case E1000_DEV_ID_82545GM_SERDES: 365 case E1000_DEV_ID_82545GM_SERDES:
505 case E1000_DEV_ID_82546GB_SERDES: 366 case E1000_DEV_ID_82546GB_SERDES:
506 case E1000_DEV_ID_82571EB_SERDES: 367 hw->media_type = e1000_media_type_internal_serdes;
507 case E1000_DEV_ID_82571EB_SERDES_DUAL: 368 break;
508 case E1000_DEV_ID_82571EB_SERDES_QUAD: 369 default:
509 case E1000_DEV_ID_82572EI_SERDES: 370 switch (hw->mac_type) {
510 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: 371 case e1000_82542_rev2_0:
511 hw->media_type = e1000_media_type_internal_serdes; 372 case e1000_82542_rev2_1:
512 break; 373 hw->media_type = e1000_media_type_fiber;
513 default: 374 break;
514 switch (hw->mac_type) { 375 default:
515 case e1000_82542_rev2_0: 376 status = er32(STATUS);
516 case e1000_82542_rev2_1: 377 if (status & E1000_STATUS_TBIMODE) {
517 hw->media_type = e1000_media_type_fiber; 378 hw->media_type = e1000_media_type_fiber;
518 break; 379 /* tbi_compatibility not valid on fiber */
519 case e1000_ich8lan: 380 hw->tbi_compatibility_en = false;
520 case e1000_82573: 381 } else {
521 /* The STATUS_TBIMODE bit is reserved or reused for the this 382 hw->media_type = e1000_media_type_copper;
522 * device. 383 }
523 */ 384 break;
524 hw->media_type = e1000_media_type_copper; 385 }
525 break; 386 }
526 default:
527 status = er32(STATUS);
528 if (status & E1000_STATUS_TBIMODE) {
529 hw->media_type = e1000_media_type_fiber;
530 /* tbi_compatibility not valid on fiber */
531 hw->tbi_compatibility_en = false;
532 } else {
533 hw->media_type = e1000_media_type_copper;
534 }
535 break;
536 }
537 }
538} 387}
539 388
540/****************************************************************************** 389/**
541 * Reset the transmit and receive units; mask and clear all interrupts. 390 * e1000_reset_hw: reset the hardware completely
391 * @hw: Struct containing variables accessed by shared code
542 * 392 *
543 * hw - Struct containing variables accessed by shared code 393 * Reset the transmit and receive units; mask and clear all interrupts.
544 *****************************************************************************/ 394 */
545s32 e1000_reset_hw(struct e1000_hw *hw) 395s32 e1000_reset_hw(struct e1000_hw *hw)
546{ 396{
547 u32 ctrl; 397 u32 ctrl;
548 u32 ctrl_ext; 398 u32 ctrl_ext;
549 u32 icr; 399 u32 icr;
550 u32 manc; 400 u32 manc;
551 u32 led_ctrl; 401 u32 led_ctrl;
552 u32 timeout; 402 s32 ret_val;
553 u32 extcnf_ctrl;
554 s32 ret_val;
555
556 DEBUGFUNC("e1000_reset_hw");
557
558 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
559 if (hw->mac_type == e1000_82542_rev2_0) {
560 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
561 e1000_pci_clear_mwi(hw);
562 }
563
564 if (hw->bus_type == e1000_bus_type_pci_express) {
565 /* Prevent the PCI-E bus from sticking if there is no TLP connection
566 * on the last TLP read/write transaction when MAC is reset.
567 */
568 if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
569 DEBUGOUT("PCI-E Master disable polling has failed.\n");
570 }
571 }
572
573 /* Clear interrupt mask to stop board from generating interrupts */
574 DEBUGOUT("Masking off all interrupts\n");
575 ew32(IMC, 0xffffffff);
576
577 /* Disable the Transmit and Receive units. Then delay to allow
578 * any pending transactions to complete before we hit the MAC with
579 * the global reset.
580 */
581 ew32(RCTL, 0);
582 ew32(TCTL, E1000_TCTL_PSP);
583 E1000_WRITE_FLUSH();
584
585 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
586 hw->tbi_compatibility_on = false;
587
588 /* Delay to allow any outstanding PCI transactions to complete before
589 * resetting the device
590 */
591 msleep(10);
592
593 ctrl = er32(CTRL);
594
595 /* Must reset the PHY before resetting the MAC */
596 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
597 ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
598 msleep(5);
599 }
600
601 /* Must acquire the MDIO ownership before MAC reset.
602 * Ownership defaults to firmware after a reset. */
603 if (hw->mac_type == e1000_82573) {
604 timeout = 10;
605
606 extcnf_ctrl = er32(EXTCNF_CTRL);
607 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
608
609 do {
610 ew32(EXTCNF_CTRL, extcnf_ctrl);
611 extcnf_ctrl = er32(EXTCNF_CTRL);
612
613 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
614 break;
615 else
616 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
617
618 msleep(2);
619 timeout--;
620 } while (timeout);
621 }
622
623 /* Workaround for ICH8 bit corruption issue in FIFO memory */
624 if (hw->mac_type == e1000_ich8lan) {
625 /* Set Tx and Rx buffer allocation to 8k apiece. */
626 ew32(PBA, E1000_PBA_8K);
627 /* Set Packet Buffer Size to 16k. */
628 ew32(PBS, E1000_PBS_16K);
629 }
630
631 /* Issue a global reset to the MAC. This will reset the chip's
632 * transmit, receive, DMA, and link units. It will not effect
633 * the current PCI configuration. The global reset bit is self-
634 * clearing, and should clear within a microsecond.
635 */
636 DEBUGOUT("Issuing a global reset to MAC\n");
637
638 switch (hw->mac_type) {
639 case e1000_82544:
640 case e1000_82540:
641 case e1000_82545:
642 case e1000_82546:
643 case e1000_82541:
644 case e1000_82541_rev_2:
645 /* These controllers can't ack the 64-bit write when issuing the
646 * reset, so use IO-mapping as a workaround to issue the reset */
647 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
648 break;
649 case e1000_82545_rev_3:
650 case e1000_82546_rev_3:
651 /* Reset is performed on a shadow of the control register */
652 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
653 break;
654 case e1000_ich8lan:
655 if (!hw->phy_reset_disable &&
656 e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
657 /* e1000_ich8lan PHY HW reset requires MAC CORE reset
658 * at the same time to make sure the interface between
659 * MAC and the external PHY is reset.
660 */
661 ctrl |= E1000_CTRL_PHY_RST;
662 }
663
664 e1000_get_software_flag(hw);
665 ew32(CTRL, (ctrl | E1000_CTRL_RST));
666 msleep(5);
667 break;
668 default:
669 ew32(CTRL, (ctrl | E1000_CTRL_RST));
670 break;
671 }
672
673 /* After MAC reset, force reload of EEPROM to restore power-on settings to
674 * device. Later controllers reload the EEPROM automatically, so just wait
675 * for reload to complete.
676 */
677 switch (hw->mac_type) {
678 case e1000_82542_rev2_0:
679 case e1000_82542_rev2_1:
680 case e1000_82543:
681 case e1000_82544:
682 /* Wait for reset to complete */
683 udelay(10);
684 ctrl_ext = er32(CTRL_EXT);
685 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
686 ew32(CTRL_EXT, ctrl_ext);
687 E1000_WRITE_FLUSH();
688 /* Wait for EEPROM reload */
689 msleep(2);
690 break;
691 case e1000_82541:
692 case e1000_82541_rev_2:
693 case e1000_82547:
694 case e1000_82547_rev_2:
695 /* Wait for EEPROM reload */
696 msleep(20);
697 break;
698 case e1000_82573:
699 if (!e1000_is_onboard_nvm_eeprom(hw)) {
700 udelay(10);
701 ctrl_ext = er32(CTRL_EXT);
702 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
703 ew32(CTRL_EXT, ctrl_ext);
704 E1000_WRITE_FLUSH();
705 }
706 /* fall through */
707 default:
708 /* Auto read done will delay 5ms or poll based on mac type */
709 ret_val = e1000_get_auto_rd_done(hw);
710 if (ret_val)
711 return ret_val;
712 break;
713 }
714
715 /* Disable HW ARPs on ASF enabled adapters */
716 if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
717 manc = er32(MANC);
718 manc &= ~(E1000_MANC_ARP_EN);
719 ew32(MANC, manc);
720 }
721
722 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
723 e1000_phy_init_script(hw);
724
725 /* Configure activity LED after PHY reset */
726 led_ctrl = er32(LEDCTL);
727 led_ctrl &= IGP_ACTIVITY_LED_MASK;
728 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
729 ew32(LEDCTL, led_ctrl);
730 }
731
732 /* Clear interrupt mask to stop board from generating interrupts */
733 DEBUGOUT("Masking off all interrupts\n");
734 ew32(IMC, 0xffffffff);
735
736 /* Clear any pending interrupt events. */
737 icr = er32(ICR);
738
739 /* If MWI was previously enabled, reenable it. */
740 if (hw->mac_type == e1000_82542_rev2_0) {
741 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
742 e1000_pci_set_mwi(hw);
743 }
744
745 if (hw->mac_type == e1000_ich8lan) {
746 u32 kab = er32(KABGTXD);
747 kab |= E1000_KABGTXD_BGSQLBIAS;
748 ew32(KABGTXD, kab);
749 }
750
751 return E1000_SUCCESS;
752}
753 403
754/****************************************************************************** 404 DEBUGFUNC("e1000_reset_hw");
755 * 405
756 * Initialize a number of hardware-dependent bits 406 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
757 * 407 if (hw->mac_type == e1000_82542_rev2_0) {
758 * hw: Struct containing variables accessed by shared code 408 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
759 * 409 e1000_pci_clear_mwi(hw);
760 * This function contains hardware limitation workarounds for PCI-E adapters 410 }
761 * 411
762 *****************************************************************************/ 412 /* Clear interrupt mask to stop board from generating interrupts */
763static void e1000_initialize_hardware_bits(struct e1000_hw *hw) 413 DEBUGOUT("Masking off all interrupts\n");
764{ 414 ew32(IMC, 0xffffffff);
765 if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { 415
766 /* Settings common to all PCI-express silicon */ 416 /* Disable the Transmit and Receive units. Then delay to allow
767 u32 reg_ctrl, reg_ctrl_ext; 417 * any pending transactions to complete before we hit the MAC with
768 u32 reg_tarc0, reg_tarc1; 418 * the global reset.
769 u32 reg_tctl; 419 */
770 u32 reg_txdctl, reg_txdctl1; 420 ew32(RCTL, 0);
771 421 ew32(TCTL, E1000_TCTL_PSP);
772 /* link autonegotiation/sync workarounds */ 422 E1000_WRITE_FLUSH();
773 reg_tarc0 = er32(TARC0); 423
774 reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)); 424 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
775 425 hw->tbi_compatibility_on = false;
776 /* Enable not-done TX descriptor counting */ 426
777 reg_txdctl = er32(TXDCTL); 427 /* Delay to allow any outstanding PCI transactions to complete before
778 reg_txdctl |= E1000_TXDCTL_COUNT_DESC; 428 * resetting the device
779 ew32(TXDCTL, reg_txdctl); 429 */
780 reg_txdctl1 = er32(TXDCTL1); 430 msleep(10);
781 reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC; 431
782 ew32(TXDCTL1, reg_txdctl1); 432 ctrl = er32(CTRL);
783 433
784 switch (hw->mac_type) { 434 /* Must reset the PHY before resetting the MAC */
785 case e1000_82571: 435 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
786 case e1000_82572: 436 ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
787 /* Clear PHY TX compatible mode bits */ 437 msleep(5);
788 reg_tarc1 = er32(TARC1); 438 }
789 reg_tarc1 &= ~((1 << 30)|(1 << 29)); 439
790 440 /* Issue a global reset to the MAC. This will reset the chip's
791 /* link autonegotiation/sync workarounds */ 441 * transmit, receive, DMA, and link units. It will not effect
792 reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23)); 442 * the current PCI configuration. The global reset bit is self-
793 443 * clearing, and should clear within a microsecond.
794 /* TX ring control fixes */ 444 */
795 reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24)); 445 DEBUGOUT("Issuing a global reset to MAC\n");
796 446
797 /* Multiple read bit is reversed polarity */ 447 switch (hw->mac_type) {
798 reg_tctl = er32(TCTL); 448 case e1000_82544:
799 if (reg_tctl & E1000_TCTL_MULR) 449 case e1000_82540:
800 reg_tarc1 &= ~(1 << 28); 450 case e1000_82545:
801 else 451 case e1000_82546:
802 reg_tarc1 |= (1 << 28); 452 case e1000_82541:
803 453 case e1000_82541_rev_2:
804 ew32(TARC1, reg_tarc1); 454 /* These controllers can't ack the 64-bit write when issuing the
805 break; 455 * reset, so use IO-mapping as a workaround to issue the reset */
806 case e1000_82573: 456 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
807 reg_ctrl_ext = er32(CTRL_EXT); 457 break;
808 reg_ctrl_ext &= ~(1 << 23); 458 case e1000_82545_rev_3:
809 reg_ctrl_ext |= (1 << 22); 459 case e1000_82546_rev_3:
810 460 /* Reset is performed on a shadow of the control register */
811 /* TX byte count fix */ 461 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
812 reg_ctrl = er32(CTRL); 462 break;
813 reg_ctrl &= ~(1 << 29); 463 default:
814 464 ew32(CTRL, (ctrl | E1000_CTRL_RST));
815 ew32(CTRL_EXT, reg_ctrl_ext); 465 break;
816 ew32(CTRL, reg_ctrl); 466 }
817 break; 467
818 case e1000_80003es2lan: 468 /* After MAC reset, force reload of EEPROM to restore power-on settings to
819 /* improve small packet performace for fiber/serdes */ 469 * device. Later controllers reload the EEPROM automatically, so just wait
820 if ((hw->media_type == e1000_media_type_fiber) || 470 * for reload to complete.
821 (hw->media_type == e1000_media_type_internal_serdes)) { 471 */
822 reg_tarc0 &= ~(1 << 20); 472 switch (hw->mac_type) {
823 } 473 case e1000_82542_rev2_0:
824 474 case e1000_82542_rev2_1:
825 /* Multiple read bit is reversed polarity */ 475 case e1000_82543:
826 reg_tctl = er32(TCTL); 476 case e1000_82544:
827 reg_tarc1 = er32(TARC1); 477 /* Wait for reset to complete */
828 if (reg_tctl & E1000_TCTL_MULR) 478 udelay(10);
829 reg_tarc1 &= ~(1 << 28); 479 ctrl_ext = er32(CTRL_EXT);
830 else 480 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
831 reg_tarc1 |= (1 << 28); 481 ew32(CTRL_EXT, ctrl_ext);
832 482 E1000_WRITE_FLUSH();
833 ew32(TARC1, reg_tarc1); 483 /* Wait for EEPROM reload */
834 break; 484 msleep(2);
835 case e1000_ich8lan: 485 break;
836 /* Reduce concurrent DMA requests to 3 from 4 */ 486 case e1000_82541:
837 if ((hw->revision_id < 3) || 487 case e1000_82541_rev_2:
838 ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && 488 case e1000_82547:
839 (hw->device_id != E1000_DEV_ID_ICH8_IGP_M))) 489 case e1000_82547_rev_2:
840 reg_tarc0 |= ((1 << 29)|(1 << 28)); 490 /* Wait for EEPROM reload */
841 491 msleep(20);
842 reg_ctrl_ext = er32(CTRL_EXT); 492 break;
843 reg_ctrl_ext |= (1 << 22); 493 default:
844 ew32(CTRL_EXT, reg_ctrl_ext); 494 /* Auto read done will delay 5ms or poll based on mac type */
845 495 ret_val = e1000_get_auto_rd_done(hw);
846 /* workaround TX hang with TSO=on */ 496 if (ret_val)
847 reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)); 497 return ret_val;
848 498 break;
849 /* Multiple read bit is reversed polarity */ 499 }
850 reg_tctl = er32(TCTL); 500
851 reg_tarc1 = er32(TARC1); 501 /* Disable HW ARPs on ASF enabled adapters */
852 if (reg_tctl & E1000_TCTL_MULR) 502 if (hw->mac_type >= e1000_82540) {
853 reg_tarc1 &= ~(1 << 28); 503 manc = er32(MANC);
854 else 504 manc &= ~(E1000_MANC_ARP_EN);
855 reg_tarc1 |= (1 << 28); 505 ew32(MANC, manc);
856 506 }
857 /* workaround TX hang with TSO=on */ 507
858 reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24)); 508 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
859 509 e1000_phy_init_script(hw);
860 ew32(TARC1, reg_tarc1); 510
861 break; 511 /* Configure activity LED after PHY reset */
862 default: 512 led_ctrl = er32(LEDCTL);
863 break; 513 led_ctrl &= IGP_ACTIVITY_LED_MASK;
864 } 514 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
865 515 ew32(LEDCTL, led_ctrl);
866 ew32(TARC0, reg_tarc0); 516 }
867 } 517
518 /* Clear interrupt mask to stop board from generating interrupts */
519 DEBUGOUT("Masking off all interrupts\n");
520 ew32(IMC, 0xffffffff);
521
522 /* Clear any pending interrupt events. */
523 icr = er32(ICR);
524
525 /* If MWI was previously enabled, reenable it. */
526 if (hw->mac_type == e1000_82542_rev2_0) {
527 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
528 e1000_pci_set_mwi(hw);
529 }
530
531 return E1000_SUCCESS;
868} 532}
869 533
870/****************************************************************************** 534/**
871 * Performs basic configuration of the adapter. 535 * e1000_init_hw: Performs basic configuration of the adapter.
872 * 536 * @hw: Struct containing variables accessed by shared code
873 * hw - Struct containing variables accessed by shared code
874 * 537 *
875 * Assumes that the controller has previously been reset and is in a 538 * Assumes that the controller has previously been reset and is in a
876 * post-reset uninitialized state. Initializes the receive address registers, 539 * post-reset uninitialized state. Initializes the receive address registers,
877 * multicast table, and VLAN filter table. Calls routines to setup link 540 * multicast table, and VLAN filter table. Calls routines to setup link
878 * configuration and flow control settings. Clears all on-chip counters. Leaves 541 * configuration and flow control settings. Clears all on-chip counters. Leaves
879 * the transmit and receive units disabled and uninitialized. 542 * the transmit and receive units disabled and uninitialized.
880 *****************************************************************************/ 543 */
881s32 e1000_init_hw(struct e1000_hw *hw) 544s32 e1000_init_hw(struct e1000_hw *hw)
882{ 545{
883 u32 ctrl; 546 u32 ctrl;
884 u32 i; 547 u32 i;
885 s32 ret_val; 548 s32 ret_val;
886 u32 mta_size; 549 u32 mta_size;
887 u32 reg_data; 550 u32 ctrl_ext;
888 u32 ctrl_ext; 551
889 552 DEBUGFUNC("e1000_init_hw");
890 DEBUGFUNC("e1000_init_hw"); 553
891 554 /* Initialize Identification LED */
892 /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */ 555 ret_val = e1000_id_led_init(hw);
893 if ((hw->mac_type == e1000_ich8lan) && 556 if (ret_val) {
894 ((hw->revision_id < 3) || 557 DEBUGOUT("Error Initializing Identification LED\n");
895 ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && 558 return ret_val;
896 (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) { 559 }
897 reg_data = er32(STATUS); 560
898 reg_data &= ~0x80000000; 561 /* Set the media type and TBI compatibility */
899 ew32(STATUS, reg_data); 562 e1000_set_media_type(hw);
900 } 563
901 564 /* Disabling VLAN filtering. */
902 /* Initialize Identification LED */ 565 DEBUGOUT("Initializing the IEEE VLAN\n");
903 ret_val = e1000_id_led_init(hw); 566 if (hw->mac_type < e1000_82545_rev_3)
904 if (ret_val) { 567 ew32(VET, 0);
905 DEBUGOUT("Error Initializing Identification LED\n"); 568 e1000_clear_vfta(hw);
906 return ret_val; 569
907 } 570 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
908 571 if (hw->mac_type == e1000_82542_rev2_0) {
909 /* Set the media type and TBI compatibility */ 572 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
910 e1000_set_media_type(hw); 573 e1000_pci_clear_mwi(hw);
911 574 ew32(RCTL, E1000_RCTL_RST);
912 /* Must be called after e1000_set_media_type because media_type is used */ 575 E1000_WRITE_FLUSH();
913 e1000_initialize_hardware_bits(hw); 576 msleep(5);
914 577 }
915 /* Disabling VLAN filtering. */ 578
916 DEBUGOUT("Initializing the IEEE VLAN\n"); 579 /* Setup the receive address. This involves initializing all of the Receive
917 /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ 580 * Address Registers (RARs 0 - 15).
918 if (hw->mac_type != e1000_ich8lan) { 581 */
919 if (hw->mac_type < e1000_82545_rev_3) 582 e1000_init_rx_addrs(hw);
920 ew32(VET, 0); 583
921 e1000_clear_vfta(hw); 584 /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
922 } 585 if (hw->mac_type == e1000_82542_rev2_0) {
923 586 ew32(RCTL, 0);
924 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 587 E1000_WRITE_FLUSH();
925 if (hw->mac_type == e1000_82542_rev2_0) { 588 msleep(1);
926 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 589 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
927 e1000_pci_clear_mwi(hw); 590 e1000_pci_set_mwi(hw);
928 ew32(RCTL, E1000_RCTL_RST); 591 }
929 E1000_WRITE_FLUSH(); 592
930 msleep(5); 593 /* Zero out the Multicast HASH table */
931 } 594 DEBUGOUT("Zeroing the MTA\n");
932 595 mta_size = E1000_MC_TBL_SIZE;
933 /* Setup the receive address. This involves initializing all of the Receive 596 for (i = 0; i < mta_size; i++) {
934 * Address Registers (RARs 0 - 15). 597 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
935 */ 598 /* use write flush to prevent Memory Write Block (MWB) from
936 e1000_init_rx_addrs(hw); 599 * occurring when accessing our register space */
937 600 E1000_WRITE_FLUSH();
938 /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ 601 }
939 if (hw->mac_type == e1000_82542_rev2_0) { 602
940 ew32(RCTL, 0); 603 /* Set the PCI priority bit correctly in the CTRL register. This
941 E1000_WRITE_FLUSH(); 604 * determines if the adapter gives priority to receives, or if it
942 msleep(1); 605 * gives equal priority to transmits and receives. Valid only on
943 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 606 * 82542 and 82543 silicon.
944 e1000_pci_set_mwi(hw); 607 */
945 } 608 if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
946 609 ctrl = er32(CTRL);
947 /* Zero out the Multicast HASH table */ 610 ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
948 DEBUGOUT("Zeroing the MTA\n"); 611 }
949 mta_size = E1000_MC_TBL_SIZE; 612
950 if (hw->mac_type == e1000_ich8lan) 613 switch (hw->mac_type) {
951 mta_size = E1000_MC_TBL_SIZE_ICH8LAN; 614 case e1000_82545_rev_3:
952 for (i = 0; i < mta_size; i++) { 615 case e1000_82546_rev_3:
953 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 616 break;
954 /* use write flush to prevent Memory Write Block (MWB) from 617 default:
955 * occuring when accessing our register space */ 618 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
956 E1000_WRITE_FLUSH(); 619 if (hw->bus_type == e1000_bus_type_pcix
957 } 620 && e1000_pcix_get_mmrbc(hw) > 2048)
958 621 e1000_pcix_set_mmrbc(hw, 2048);
959 /* Set the PCI priority bit correctly in the CTRL register. This 622 break;
960 * determines if the adapter gives priority to receives, or if it 623 }
961 * gives equal priority to transmits and receives. Valid only on 624
962 * 82542 and 82543 silicon. 625 /* Call a subroutine to configure the link and setup flow control. */
963 */ 626 ret_val = e1000_setup_link(hw);
964 if (hw->dma_fairness && hw->mac_type <= e1000_82543) { 627
965 ctrl = er32(CTRL); 628 /* Set the transmit descriptor write-back policy */
966 ew32(CTRL, ctrl | E1000_CTRL_PRIOR); 629 if (hw->mac_type > e1000_82544) {
967 } 630 ctrl = er32(TXDCTL);
968 631 ctrl =
969 switch (hw->mac_type) { 632 (ctrl & ~E1000_TXDCTL_WTHRESH) |
970 case e1000_82545_rev_3: 633 E1000_TXDCTL_FULL_TX_DESC_WB;
971 case e1000_82546_rev_3: 634 ew32(TXDCTL, ctrl);
972 break; 635 }
973 default: 636
974 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ 637 /* Clear all of the statistics registers (clear on read). It is
975 if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048) 638 * important that we do this after we have tried to establish link
976 e1000_pcix_set_mmrbc(hw, 2048); 639 * because the symbol error count will increment wildly if there
977 break; 640 * is no link.
978 } 641 */
979 642 e1000_clear_hw_cntrs(hw);
980 /* More time needed for PHY to initialize */ 643
981 if (hw->mac_type == e1000_ich8lan) 644 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
982 msleep(15); 645 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
983 646 ctrl_ext = er32(CTRL_EXT);
984 /* Call a subroutine to configure the link and setup flow control. */ 647 /* Relaxed ordering must be disabled to avoid a parity
985 ret_val = e1000_setup_link(hw); 648 * error crash in a PCI slot. */
986 649 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
987 /* Set the transmit descriptor write-back policy */ 650 ew32(CTRL_EXT, ctrl_ext);
988 if (hw->mac_type > e1000_82544) { 651 }
989 ctrl = er32(TXDCTL); 652
990 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 653 return ret_val;
991 ew32(TXDCTL, ctrl);
992 }
993
994 if (hw->mac_type == e1000_82573) {
995 e1000_enable_tx_pkt_filtering(hw);
996 }
997
998 switch (hw->mac_type) {
999 default:
1000 break;
1001 case e1000_80003es2lan:
1002 /* Enable retransmit on late collisions */
1003 reg_data = er32(TCTL);
1004 reg_data |= E1000_TCTL_RTLC;
1005 ew32(TCTL, reg_data);
1006
1007 /* Configure Gigabit Carry Extend Padding */
1008 reg_data = er32(TCTL_EXT);
1009 reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
1010 reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
1011 ew32(TCTL_EXT, reg_data);
1012
1013 /* Configure Transmit Inter-Packet Gap */
1014 reg_data = er32(TIPG);
1015 reg_data &= ~E1000_TIPG_IPGT_MASK;
1016 reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
1017 ew32(TIPG, reg_data);
1018
1019 reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
1020 reg_data &= ~0x00100000;
1021 E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
1022 /* Fall through */
1023 case e1000_82571:
1024 case e1000_82572:
1025 case e1000_ich8lan:
1026 ctrl = er32(TXDCTL1);
1027 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
1028 ew32(TXDCTL1, ctrl);
1029 break;
1030 }
1031
1032
1033 if (hw->mac_type == e1000_82573) {
1034 u32 gcr = er32(GCR);
1035 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
1036 ew32(GCR, gcr);
1037 }
1038
1039 /* Clear all of the statistics registers (clear on read). It is
1040 * important that we do this after we have tried to establish link
1041 * because the symbol error count will increment wildly if there
1042 * is no link.
1043 */
1044 e1000_clear_hw_cntrs(hw);
1045
1046 /* ICH8 No-snoop bits are opposite polarity.
1047 * Set to snoop by default after reset. */
1048 if (hw->mac_type == e1000_ich8lan)
1049 e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
1050
1051 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
1052 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
1053 ctrl_ext = er32(CTRL_EXT);
1054 /* Relaxed ordering must be disabled to avoid a parity
1055 * error crash in a PCI slot. */
1056 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
1057 ew32(CTRL_EXT, ctrl_ext);
1058 }
1059
1060 return ret_val;
1061} 654}
1062 655
1063/****************************************************************************** 656/**
1064 * Adjust SERDES output amplitude based on EEPROM setting. 657 * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting.
1065 * 658 * @hw: Struct containing variables accessed by shared code.
1066 * hw - Struct containing variables accessed by shared code. 659 */
1067 *****************************************************************************/
1068static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) 660static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
1069{ 661{
1070 u16 eeprom_data; 662 u16 eeprom_data;
1071 s32 ret_val; 663 s32 ret_val;
1072 664
1073 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 665 DEBUGFUNC("e1000_adjust_serdes_amplitude");
1074 666
1075 if (hw->media_type != e1000_media_type_internal_serdes) 667 if (hw->media_type != e1000_media_type_internal_serdes)
1076 return E1000_SUCCESS; 668 return E1000_SUCCESS;
1077 669
1078 switch (hw->mac_type) { 670 switch (hw->mac_type) {
1079 case e1000_82545_rev_3: 671 case e1000_82545_rev_3:
1080 case e1000_82546_rev_3: 672 case e1000_82546_rev_3:
1081 break; 673 break;
1082 default: 674 default:
1083 return E1000_SUCCESS; 675 return E1000_SUCCESS;
1084 } 676 }
1085 677
1086 ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data); 678 ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
1087 if (ret_val) { 679 &eeprom_data);
1088 return ret_val; 680 if (ret_val) {
1089 } 681 return ret_val;
1090 682 }
1091 if (eeprom_data != EEPROM_RESERVED_WORD) { 683
1092 /* Adjust SERDES output amplitude only. */ 684 if (eeprom_data != EEPROM_RESERVED_WORD) {
1093 eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; 685 /* Adjust SERDES output amplitude only. */
1094 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); 686 eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
1095 if (ret_val) 687 ret_val =
1096 return ret_val; 688 e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
1097 } 689 if (ret_val)
1098 690 return ret_val;
1099 return E1000_SUCCESS; 691 }
692
693 return E1000_SUCCESS;
1100} 694}
1101 695
1102/****************************************************************************** 696/**
1103 * Configures flow control and link settings. 697 * e1000_setup_link - Configures flow control and link settings.
1104 * 698 * @hw: Struct containing variables accessed by shared code
1105 * hw - Struct containing variables accessed by shared code
1106 * 699 *
1107 * Determines which flow control settings to use. Calls the apropriate media- 700 * Determines which flow control settings to use. Calls the appropriate media-
1108 * specific link configuration function. Configures the flow control settings. 701 * specific link configuration function. Configures the flow control settings.
1109 * Assuming the adapter has a valid link partner, a valid link should be 702 * Assuming the adapter has a valid link partner, a valid link should be
1110 * established. Assumes the hardware has previously been reset and the 703 * established. Assumes the hardware has previously been reset and the
1111 * transmitter and receiver are not enabled. 704 * transmitter and receiver are not enabled.
1112 *****************************************************************************/ 705 */
1113s32 e1000_setup_link(struct e1000_hw *hw) 706s32 e1000_setup_link(struct e1000_hw *hw)
1114{ 707{
1115 u32 ctrl_ext; 708 u32 ctrl_ext;
1116 s32 ret_val; 709 s32 ret_val;
1117 u16 eeprom_data; 710 u16 eeprom_data;
1118 711
1119 DEBUGFUNC("e1000_setup_link"); 712 DEBUGFUNC("e1000_setup_link");
1120 713
1121 /* In the case of the phy reset being blocked, we already have a link. 714 /* Read and store word 0x0F of the EEPROM. This word contains bits
1122 * We do not have to set it up again. */ 715 * that determine the hardware's default PAUSE (flow control) mode,
1123 if (e1000_check_phy_reset_block(hw)) 716 * a bit that determines whether the HW defaults to enabling or
1124 return E1000_SUCCESS; 717 * disabling auto-negotiation, and the direction of the
1125 718 * SW defined pins. If there is no SW over-ride of the flow
1126 /* Read and store word 0x0F of the EEPROM. This word contains bits 719 * control setting, then the variable hw->fc will
1127 * that determine the hardware's default PAUSE (flow control) mode, 720 * be initialized based on a value in the EEPROM.
1128 * a bit that determines whether the HW defaults to enabling or 721 */
1129 * disabling auto-negotiation, and the direction of the 722 if (hw->fc == E1000_FC_DEFAULT) {
1130 * SW defined pins. If there is no SW over-ride of the flow 723 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1131 * control setting, then the variable hw->fc will 724 1, &eeprom_data);
1132 * be initialized based on a value in the EEPROM. 725 if (ret_val) {
1133 */ 726 DEBUGOUT("EEPROM Read Error\n");
1134 if (hw->fc == E1000_FC_DEFAULT) { 727 return -E1000_ERR_EEPROM;
1135 switch (hw->mac_type) { 728 }
1136 case e1000_ich8lan: 729 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
1137 case e1000_82573: 730 hw->fc = E1000_FC_NONE;
1138 hw->fc = E1000_FC_FULL; 731 else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
1139 break; 732 EEPROM_WORD0F_ASM_DIR)
1140 default: 733 hw->fc = E1000_FC_TX_PAUSE;
1141 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 734 else
1142 1, &eeprom_data); 735 hw->fc = E1000_FC_FULL;
1143 if (ret_val) { 736 }
1144 DEBUGOUT("EEPROM Read Error\n"); 737
1145 return -E1000_ERR_EEPROM; 738 /* We want to save off the original Flow Control configuration just
1146 } 739 * in case we get disconnected and then reconnected into a different
1147 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) 740 * hub or switch with different Flow Control capabilities.
1148 hw->fc = E1000_FC_NONE; 741 */
1149 else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 742 if (hw->mac_type == e1000_82542_rev2_0)
1150 EEPROM_WORD0F_ASM_DIR) 743 hw->fc &= (~E1000_FC_TX_PAUSE);
1151 hw->fc = E1000_FC_TX_PAUSE; 744
1152 else 745 if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
1153 hw->fc = E1000_FC_FULL; 746 hw->fc &= (~E1000_FC_RX_PAUSE);
1154 break; 747
1155 } 748 hw->original_fc = hw->fc;
1156 } 749
1157 750 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
1158 /* We want to save off the original Flow Control configuration just 751
1159 * in case we get disconnected and then reconnected into a different 752 /* Take the 4 bits from EEPROM word 0x0F that determine the initial
1160 * hub or switch with different Flow Control capabilities. 753 * polarity value for the SW controlled pins, and setup the
1161 */ 754 * Extended Device Control reg with that info.
1162 if (hw->mac_type == e1000_82542_rev2_0) 755 * This is needed because one of the SW controlled pins is used for
1163 hw->fc &= (~E1000_FC_TX_PAUSE); 756 * signal detection. So this should be done before e1000_setup_pcs_link()
1164 757 * or e1000_phy_setup() is called.
1165 if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) 758 */
1166 hw->fc &= (~E1000_FC_RX_PAUSE); 759 if (hw->mac_type == e1000_82543) {
1167 760 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1168 hw->original_fc = hw->fc; 761 1, &eeprom_data);
1169 762 if (ret_val) {
1170 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc); 763 DEBUGOUT("EEPROM Read Error\n");
1171 764 return -E1000_ERR_EEPROM;
1172 /* Take the 4 bits from EEPROM word 0x0F that determine the initial 765 }
1173 * polarity value for the SW controlled pins, and setup the 766 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
1174 * Extended Device Control reg with that info. 767 SWDPIO__EXT_SHIFT);
1175 * This is needed because one of the SW controlled pins is used for 768 ew32(CTRL_EXT, ctrl_ext);
1176 * signal detection. So this should be done before e1000_setup_pcs_link() 769 }
1177 * or e1000_phy_setup() is called. 770
1178 */ 771 /* Call the necessary subroutine to configure the link. */
1179 if (hw->mac_type == e1000_82543) { 772 ret_val = (hw->media_type == e1000_media_type_copper) ?
1180 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 773 e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw);
1181 1, &eeprom_data); 774
1182 if (ret_val) { 775 /* Initialize the flow control address, type, and PAUSE timer
1183 DEBUGOUT("EEPROM Read Error\n"); 776 * registers to their default values. This is done even if flow
1184 return -E1000_ERR_EEPROM; 777 * control is disabled, because it does not hurt anything to
1185 } 778 * initialize these registers.
1186 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << 779 */
1187 SWDPIO__EXT_SHIFT); 780 DEBUGOUT
1188 ew32(CTRL_EXT, ctrl_ext); 781 ("Initializing the Flow Control address, type and timer regs\n");
1189 } 782
1190 783 ew32(FCT, FLOW_CONTROL_TYPE);
1191 /* Call the necessary subroutine to configure the link. */ 784 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
1192 ret_val = (hw->media_type == e1000_media_type_copper) ? 785 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
1193 e1000_setup_copper_link(hw) : 786
1194 e1000_setup_fiber_serdes_link(hw); 787 ew32(FCTTV, hw->fc_pause_time);
1195 788
1196 /* Initialize the flow control address, type, and PAUSE timer 789 /* Set the flow control receive threshold registers. Normally,
1197 * registers to their default values. This is done even if flow 790 * these registers will be set to a default threshold that may be
1198 * control is disabled, because it does not hurt anything to 791 * adjusted later by the driver's runtime code. However, if the
1199 * initialize these registers. 792 * ability to transmit pause frames in not enabled, then these
1200 */ 793 * registers will be set to 0.
1201 DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); 794 */
1202 795 if (!(hw->fc & E1000_FC_TX_PAUSE)) {
1203 /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ 796 ew32(FCRTL, 0);
1204 if (hw->mac_type != e1000_ich8lan) { 797 ew32(FCRTH, 0);
1205 ew32(FCT, FLOW_CONTROL_TYPE); 798 } else {
1206 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 799 /* We need to set up the Receive Threshold high and low water marks
1207 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); 800 * as well as (optionally) enabling the transmission of XON frames.
1208 } 801 */
1209 802 if (hw->fc_send_xon) {
1210 ew32(FCTTV, hw->fc_pause_time); 803 ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
1211 804 ew32(FCRTH, hw->fc_high_water);
1212 /* Set the flow control receive threshold registers. Normally, 805 } else {
1213 * these registers will be set to a default threshold that may be 806 ew32(FCRTL, hw->fc_low_water);
1214 * adjusted later by the driver's runtime code. However, if the 807 ew32(FCRTH, hw->fc_high_water);
1215 * ability to transmit pause frames in not enabled, then these 808 }
1216 * registers will be set to 0. 809 }
1217 */ 810 return ret_val;
1218 if (!(hw->fc & E1000_FC_TX_PAUSE)) {
1219 ew32(FCRTL, 0);
1220 ew32(FCRTH, 0);
1221 } else {
1222 /* We need to set up the Receive Threshold high and low water marks
1223 * as well as (optionally) enabling the transmission of XON frames.
1224 */
1225 if (hw->fc_send_xon) {
1226 ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
1227 ew32(FCRTH, hw->fc_high_water);
1228 } else {
1229 ew32(FCRTL, hw->fc_low_water);
1230 ew32(FCRTH, hw->fc_high_water);
1231 }
1232 }
1233 return ret_val;
1234} 811}
1235 812
1236/****************************************************************************** 813/**
1237 * Sets up link for a fiber based or serdes based adapter 814 * e1000_setup_fiber_serdes_link - prepare fiber or serdes link
1238 * 815 * @hw: Struct containing variables accessed by shared code
1239 * hw - Struct containing variables accessed by shared code
1240 * 816 *
1241 * Manipulates Physical Coding Sublayer functions in order to configure 817 * Manipulates Physical Coding Sublayer functions in order to configure
1242 * link. Assumes the hardware has been previously reset and the transmitter 818 * link. Assumes the hardware has been previously reset and the transmitter
1243 * and receiver are not enabled. 819 * and receiver are not enabled.
1244 *****************************************************************************/ 820 */
1245static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) 821static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1246{ 822{
1247 u32 ctrl; 823 u32 ctrl;
1248 u32 status; 824 u32 status;
1249 u32 txcw = 0; 825 u32 txcw = 0;
1250 u32 i; 826 u32 i;
1251 u32 signal = 0; 827 u32 signal = 0;
1252 s32 ret_val; 828 s32 ret_val;
1253 829
1254 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 830 DEBUGFUNC("e1000_setup_fiber_serdes_link");
1255 831
1256 /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists 832 /* On adapters with a MAC newer than 82544, SWDP 1 will be
1257 * until explicitly turned off or a power cycle is performed. A read to 833 * set when the optics detect a signal. On older adapters, it will be
1258 * the register does not indicate its status. Therefore, we ensure 834 * cleared when there is a signal. This applies to fiber media only.
1259 * loopback mode is disabled during initialization. 835 * If we're on serdes media, adjust the output amplitude to value
1260 */ 836 * set in the EEPROM.
1261 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) 837 */
1262 ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK); 838 ctrl = er32(CTRL);
1263 839 if (hw->media_type == e1000_media_type_fiber)
1264 /* On adapters with a MAC newer than 82544, SWDP 1 will be 840 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
1265 * set when the optics detect a signal. On older adapters, it will be 841
1266 * cleared when there is a signal. This applies to fiber media only. 842 ret_val = e1000_adjust_serdes_amplitude(hw);
1267 * If we're on serdes media, adjust the output amplitude to value 843 if (ret_val)
1268 * set in the EEPROM. 844 return ret_val;
1269 */ 845
1270 ctrl = er32(CTRL); 846 /* Take the link out of reset */
1271 if (hw->media_type == e1000_media_type_fiber) 847 ctrl &= ~(E1000_CTRL_LRST);
1272 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 848
1273 849 /* Adjust VCO speed to improve BER performance */
1274 ret_val = e1000_adjust_serdes_amplitude(hw); 850 ret_val = e1000_set_vco_speed(hw);
1275 if (ret_val) 851 if (ret_val)
1276 return ret_val; 852 return ret_val;
1277 853
1278 /* Take the link out of reset */ 854 e1000_config_collision_dist(hw);
1279 ctrl &= ~(E1000_CTRL_LRST); 855
1280 856 /* Check for a software override of the flow control settings, and setup
1281 /* Adjust VCO speed to improve BER performance */ 857 * the device accordingly. If auto-negotiation is enabled, then software
1282 ret_val = e1000_set_vco_speed(hw); 858 * will have to set the "PAUSE" bits to the correct value in the Tranmsit
1283 if (ret_val) 859 * Config Word Register (TXCW) and re-start auto-negotiation. However, if
1284 return ret_val; 860 * auto-negotiation is disabled, then software will have to manually
1285 861 * configure the two flow control enable bits in the CTRL register.
1286 e1000_config_collision_dist(hw); 862 *
1287 863 * The possible values of the "fc" parameter are:
1288 /* Check for a software override of the flow control settings, and setup 864 * 0: Flow control is completely disabled
1289 * the device accordingly. If auto-negotiation is enabled, then software 865 * 1: Rx flow control is enabled (we can receive pause frames, but
1290 * will have to set the "PAUSE" bits to the correct value in the Tranmsit 866 * not send pause frames).
1291 * Config Word Register (TXCW) and re-start auto-negotiation. However, if 867 * 2: Tx flow control is enabled (we can send pause frames but we do
1292 * auto-negotiation is disabled, then software will have to manually 868 * not support receiving pause frames).
1293 * configure the two flow control enable bits in the CTRL register. 869 * 3: Both Rx and TX flow control (symmetric) are enabled.
1294 * 870 */
1295 * The possible values of the "fc" parameter are: 871 switch (hw->fc) {
1296 * 0: Flow control is completely disabled 872 case E1000_FC_NONE:
1297 * 1: Rx flow control is enabled (we can receive pause frames, but 873 /* Flow control is completely disabled by a software over-ride. */
1298 * not send pause frames). 874 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
1299 * 2: Tx flow control is enabled (we can send pause frames but we do 875 break;
1300 * not support receiving pause frames). 876 case E1000_FC_RX_PAUSE:
1301 * 3: Both Rx and TX flow control (symmetric) are enabled. 877 /* RX Flow control is enabled and TX Flow control is disabled by a
1302 */ 878 * software over-ride. Since there really isn't a way to advertise
1303 switch (hw->fc) { 879 * that we are capable of RX Pause ONLY, we will advertise that we
1304 case E1000_FC_NONE: 880 * support both symmetric and asymmetric RX PAUSE. Later, we will
1305 /* Flow control is completely disabled by a software over-ride. */ 881 * disable the adapter's ability to send PAUSE frames.
1306 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 882 */
1307 break; 883 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1308 case E1000_FC_RX_PAUSE: 884 break;
1309 /* RX Flow control is enabled and TX Flow control is disabled by a 885 case E1000_FC_TX_PAUSE:
1310 * software over-ride. Since there really isn't a way to advertise 886 /* TX Flow control is enabled, and RX Flow control is disabled, by a
1311 * that we are capable of RX Pause ONLY, we will advertise that we 887 * software over-ride.
1312 * support both symmetric and asymmetric RX PAUSE. Later, we will 888 */
1313 * disable the adapter's ability to send PAUSE frames. 889 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
1314 */ 890 break;
1315 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 891 case E1000_FC_FULL:
1316 break; 892 /* Flow control (both RX and TX) is enabled by a software over-ride. */
1317 case E1000_FC_TX_PAUSE: 893 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1318 /* TX Flow control is enabled, and RX Flow control is disabled, by a 894 break;
1319 * software over-ride. 895 default:
1320 */ 896 DEBUGOUT("Flow control param set incorrectly\n");
1321 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 897 return -E1000_ERR_CONFIG;
1322 break; 898 break;
1323 case E1000_FC_FULL: 899 }
1324 /* Flow control (both RX and TX) is enabled by a software over-ride. */ 900
1325 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 901 /* Since auto-negotiation is enabled, take the link out of reset (the link
1326 break; 902 * will be in reset, because we previously reset the chip). This will
1327 default: 903 * restart auto-negotiation. If auto-negotiation is successful then the
1328 DEBUGOUT("Flow control param set incorrectly\n"); 904 * link-up status bit will be set and the flow control enable bits (RFCE
1329 return -E1000_ERR_CONFIG; 905 * and TFCE) will be set according to their negotiated value.
1330 break; 906 */
1331 } 907 DEBUGOUT("Auto-negotiation enabled\n");
1332 908
1333 /* Since auto-negotiation is enabled, take the link out of reset (the link 909 ew32(TXCW, txcw);
1334 * will be in reset, because we previously reset the chip). This will 910 ew32(CTRL, ctrl);
1335 * restart auto-negotiation. If auto-neogtiation is successful then the 911 E1000_WRITE_FLUSH();
1336 * link-up status bit will be set and the flow control enable bits (RFCE 912
1337 * and TFCE) will be set according to their negotiated value. 913 hw->txcw = txcw;
1338 */ 914 msleep(1);
1339 DEBUGOUT("Auto-negotiation enabled\n"); 915
1340 916 /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
1341 ew32(TXCW, txcw); 917 * indication in the Device Status Register. Time-out if a link isn't
1342 ew32(CTRL, ctrl); 918 * seen in 500 milliseconds seconds (Auto-negotiation should complete in
1343 E1000_WRITE_FLUSH(); 919 * less than 500 milliseconds even if the other end is doing it in SW).
1344 920 * For internal serdes, we just assume a signal is present, then poll.
1345 hw->txcw = txcw; 921 */
1346 msleep(1); 922 if (hw->media_type == e1000_media_type_internal_serdes ||
1347 923 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
1348 /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" 924 DEBUGOUT("Looking for Link\n");
1349 * indication in the Device Status Register. Time-out if a link isn't 925 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
1350 * seen in 500 milliseconds seconds (Auto-negotiation should complete in 926 msleep(10);
1351 * less than 500 milliseconds even if the other end is doing it in SW). 927 status = er32(STATUS);
1352 * For internal serdes, we just assume a signal is present, then poll. 928 if (status & E1000_STATUS_LU)
1353 */ 929 break;
1354 if (hw->media_type == e1000_media_type_internal_serdes || 930 }
1355 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { 931 if (i == (LINK_UP_TIMEOUT / 10)) {
1356 DEBUGOUT("Looking for Link\n"); 932 DEBUGOUT("Never got a valid link from auto-neg!!!\n");
1357 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { 933 hw->autoneg_failed = 1;
1358 msleep(10); 934 /* AutoNeg failed to achieve a link, so we'll call
1359 status = er32(STATUS); 935 * e1000_check_for_link. This routine will force the link up if
1360 if (status & E1000_STATUS_LU) break; 936 * we detect a signal. This will allow us to communicate with
1361 } 937 * non-autonegotiating link partners.
1362 if (i == (LINK_UP_TIMEOUT / 10)) { 938 */
1363 DEBUGOUT("Never got a valid link from auto-neg!!!\n"); 939 ret_val = e1000_check_for_link(hw);
1364 hw->autoneg_failed = 1; 940 if (ret_val) {
1365 /* AutoNeg failed to achieve a link, so we'll call 941 DEBUGOUT("Error while checking for link\n");
1366 * e1000_check_for_link. This routine will force the link up if 942 return ret_val;
1367 * we detect a signal. This will allow us to communicate with 943 }
1368 * non-autonegotiating link partners. 944 hw->autoneg_failed = 0;
1369 */ 945 } else {
1370 ret_val = e1000_check_for_link(hw); 946 hw->autoneg_failed = 0;
1371 if (ret_val) { 947 DEBUGOUT("Valid Link Found\n");
1372 DEBUGOUT("Error while checking for link\n"); 948 }
1373 return ret_val; 949 } else {
1374 } 950 DEBUGOUT("No Signal Detected\n");
1375 hw->autoneg_failed = 0; 951 }
1376 } else { 952 return E1000_SUCCESS;
1377 hw->autoneg_failed = 0;
1378 DEBUGOUT("Valid Link Found\n");
1379 }
1380 } else {
1381 DEBUGOUT("No Signal Detected\n");
1382 }
1383 return E1000_SUCCESS;
1384} 953}
1385 954
1386/****************************************************************************** 955/**
1387* Make sure we have a valid PHY and change PHY mode before link setup. 956 * e1000_copper_link_preconfig - early configuration for copper
1388* 957 * @hw: Struct containing variables accessed by shared code
1389* hw - Struct containing variables accessed by shared code 958 *
1390******************************************************************************/ 959 * Make sure we have a valid PHY and change PHY mode before link setup.
960 */
1391static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) 961static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
1392{ 962{
1393 u32 ctrl; 963 u32 ctrl;
1394 s32 ret_val; 964 s32 ret_val;
1395 u16 phy_data; 965 u16 phy_data;
1396
1397 DEBUGFUNC("e1000_copper_link_preconfig");
1398
1399 ctrl = er32(CTRL);
1400 /* With 82543, we need to force speed and duplex on the MAC equal to what
1401 * the PHY speed and duplex configuration is. In addition, we need to
1402 * perform a hardware reset on the PHY to take it out of reset.
1403 */
1404 if (hw->mac_type > e1000_82543) {
1405 ctrl |= E1000_CTRL_SLU;
1406 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1407 ew32(CTRL, ctrl);
1408 } else {
1409 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
1410 ew32(CTRL, ctrl);
1411 ret_val = e1000_phy_hw_reset(hw);
1412 if (ret_val)
1413 return ret_val;
1414 }
1415
1416 /* Make sure we have a valid PHY */
1417 ret_val = e1000_detect_gig_phy(hw);
1418 if (ret_val) {
1419 DEBUGOUT("Error, did not detect valid phy.\n");
1420 return ret_val;
1421 }
1422 DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
1423
1424 /* Set PHY to class A mode (if necessary) */
1425 ret_val = e1000_set_phy_mode(hw);
1426 if (ret_val)
1427 return ret_val;
1428
1429 if ((hw->mac_type == e1000_82545_rev_3) ||
1430 (hw->mac_type == e1000_82546_rev_3)) {
1431 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1432 phy_data |= 0x00000008;
1433 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1434 }
1435
1436 if (hw->mac_type <= e1000_82543 ||
1437 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1438 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
1439 hw->phy_reset_disable = false;
1440
1441 return E1000_SUCCESS;
1442}
1443 966
967 DEBUGFUNC("e1000_copper_link_preconfig");
1444 968
1445/******************************************************************** 969 ctrl = er32(CTRL);
1446* Copper link setup for e1000_phy_igp series. 970 /* With 82543, we need to force speed and duplex on the MAC equal to what
1447* 971 * the PHY speed and duplex configuration is. In addition, we need to
1448* hw - Struct containing variables accessed by shared code 972 * perform a hardware reset on the PHY to take it out of reset.
1449*********************************************************************/ 973 */
1450static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) 974 if (hw->mac_type > e1000_82543) {
1451{ 975 ctrl |= E1000_CTRL_SLU;
1452 u32 led_ctrl; 976 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1453 s32 ret_val; 977 ew32(CTRL, ctrl);
1454 u16 phy_data; 978 } else {
1455 979 ctrl |=
1456 DEBUGFUNC("e1000_copper_link_igp_setup"); 980 (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
1457 981 ew32(CTRL, ctrl);
1458 if (hw->phy_reset_disable) 982 ret_val = e1000_phy_hw_reset(hw);
1459 return E1000_SUCCESS; 983 if (ret_val)
1460 984 return ret_val;
1461 ret_val = e1000_phy_reset(hw); 985 }
1462 if (ret_val) { 986
1463 DEBUGOUT("Error Resetting the PHY\n"); 987 /* Make sure we have a valid PHY */
1464 return ret_val; 988 ret_val = e1000_detect_gig_phy(hw);
1465 } 989 if (ret_val) {
1466 990 DEBUGOUT("Error, did not detect valid phy.\n");
1467 /* Wait 15ms for MAC to configure PHY from eeprom settings */ 991 return ret_val;
1468 msleep(15); 992 }
1469 if (hw->mac_type != e1000_ich8lan) { 993 DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
1470 /* Configure activity LED after PHY reset */ 994
1471 led_ctrl = er32(LEDCTL); 995 /* Set PHY to class A mode (if necessary) */
1472 led_ctrl &= IGP_ACTIVITY_LED_MASK; 996 ret_val = e1000_set_phy_mode(hw);
1473 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 997 if (ret_val)
1474 ew32(LEDCTL, led_ctrl); 998 return ret_val;
1475 } 999
1476 1000 if ((hw->mac_type == e1000_82545_rev_3) ||
1477 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ 1001 (hw->mac_type == e1000_82546_rev_3)) {
1478 if (hw->phy_type == e1000_phy_igp) { 1002 ret_val =
1479 /* disable lplu d3 during driver init */ 1003 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1480 ret_val = e1000_set_d3_lplu_state(hw, false); 1004 phy_data |= 0x00000008;
1481 if (ret_val) { 1005 ret_val =
1482 DEBUGOUT("Error Disabling LPLU D3\n"); 1006 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1483 return ret_val; 1007 }
1484 } 1008
1485 } 1009 if (hw->mac_type <= e1000_82543 ||
1486 1010 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1487 /* disable lplu d0 during driver init */ 1011 hw->mac_type == e1000_82541_rev_2
1488 ret_val = e1000_set_d0_lplu_state(hw, false); 1012 || hw->mac_type == e1000_82547_rev_2)
1489 if (ret_val) { 1013 hw->phy_reset_disable = false;
1490 DEBUGOUT("Error Disabling LPLU D0\n"); 1014
1491 return ret_val; 1015 return E1000_SUCCESS;
1492 }
1493 /* Configure mdi-mdix settings */
1494 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1495 if (ret_val)
1496 return ret_val;
1497
1498 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
1499 hw->dsp_config_state = e1000_dsp_config_disabled;
1500 /* Force MDI for earlier revs of the IGP PHY */
1501 phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
1502 hw->mdix = 1;
1503
1504 } else {
1505 hw->dsp_config_state = e1000_dsp_config_enabled;
1506 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1507
1508 switch (hw->mdix) {
1509 case 1:
1510 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1511 break;
1512 case 2:
1513 phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
1514 break;
1515 case 0:
1516 default:
1517 phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
1518 break;
1519 }
1520 }
1521 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1522 if (ret_val)
1523 return ret_val;
1524
1525 /* set auto-master slave resolution settings */
1526 if (hw->autoneg) {
1527 e1000_ms_type phy_ms_setting = hw->master_slave;
1528
1529 if (hw->ffe_config_state == e1000_ffe_config_active)
1530 hw->ffe_config_state = e1000_ffe_config_enabled;
1531
1532 if (hw->dsp_config_state == e1000_dsp_config_activated)
1533 hw->dsp_config_state = e1000_dsp_config_enabled;
1534
1535 /* when autonegotiation advertisment is only 1000Mbps then we
1536 * should disable SmartSpeed and enable Auto MasterSlave
1537 * resolution as hardware default. */
1538 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1539 /* Disable SmartSpeed */
1540 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1541 &phy_data);
1542 if (ret_val)
1543 return ret_val;
1544 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1545 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1546 phy_data);
1547 if (ret_val)
1548 return ret_val;
1549 /* Set auto Master/Slave resolution process */
1550 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1551 if (ret_val)
1552 return ret_val;
1553 phy_data &= ~CR_1000T_MS_ENABLE;
1554 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1555 if (ret_val)
1556 return ret_val;
1557 }
1558
1559 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1560 if (ret_val)
1561 return ret_val;
1562
1563 /* load defaults for future use */
1564 hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
1565 ((phy_data & CR_1000T_MS_VALUE) ?
1566 e1000_ms_force_master :
1567 e1000_ms_force_slave) :
1568 e1000_ms_auto;
1569
1570 switch (phy_ms_setting) {
1571 case e1000_ms_force_master:
1572 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
1573 break;
1574 case e1000_ms_force_slave:
1575 phy_data |= CR_1000T_MS_ENABLE;
1576 phy_data &= ~(CR_1000T_MS_VALUE);
1577 break;
1578 case e1000_ms_auto:
1579 phy_data &= ~CR_1000T_MS_ENABLE;
1580 default:
1581 break;
1582 }
1583 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1584 if (ret_val)
1585 return ret_val;
1586 }
1587
1588 return E1000_SUCCESS;
1589} 1016}
1590 1017
1591/******************************************************************** 1018/**
1592* Copper link setup for e1000_phy_gg82563 series. 1019 * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series.
1593* 1020 * @hw: Struct containing variables accessed by shared code
1594* hw - Struct containing variables accessed by shared code 1021 */
1595*********************************************************************/ 1022static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1596static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1597{ 1023{
1598 s32 ret_val; 1024 u32 led_ctrl;
1599 u16 phy_data; 1025 s32 ret_val;
1600 u32 reg_data; 1026 u16 phy_data;
1601 1027
1602 DEBUGFUNC("e1000_copper_link_ggp_setup"); 1028 DEBUGFUNC("e1000_copper_link_igp_setup");
1603 1029
1604 if (!hw->phy_reset_disable) { 1030 if (hw->phy_reset_disable)
1605 1031 return E1000_SUCCESS;
1606 /* Enable CRS on TX for half-duplex operation. */ 1032
1607 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 1033 ret_val = e1000_phy_reset(hw);
1608 &phy_data); 1034 if (ret_val) {
1609 if (ret_val) 1035 DEBUGOUT("Error Resetting the PHY\n");
1610 return ret_val; 1036 return ret_val;
1611 1037 }
1612 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; 1038
1613 /* Use 25MHz for both link down and 1000BASE-T for Tx clock */ 1039 /* Wait 15ms for MAC to configure PHY from eeprom settings */
1614 phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ; 1040 msleep(15);
1615 1041 /* Configure activity LED after PHY reset */
1616 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 1042 led_ctrl = er32(LEDCTL);
1617 phy_data); 1043 led_ctrl &= IGP_ACTIVITY_LED_MASK;
1618 if (ret_val) 1044 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
1619 return ret_val; 1045 ew32(LEDCTL, led_ctrl);
1620 1046
1621 /* Options: 1047 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
1622 * MDI/MDI-X = 0 (default) 1048 if (hw->phy_type == e1000_phy_igp) {
1623 * 0 - Auto for all speeds 1049 /* disable lplu d3 during driver init */
1624 * 1 - MDI mode 1050 ret_val = e1000_set_d3_lplu_state(hw, false);
1625 * 2 - MDI-X mode 1051 if (ret_val) {
1626 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1052 DEBUGOUT("Error Disabling LPLU D3\n");
1627 */ 1053 return ret_val;
1628 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); 1054 }
1629 if (ret_val) 1055 }
1630 return ret_val; 1056
1631 1057 /* Configure mdi-mdix settings */
1632 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; 1058 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1633 1059 if (ret_val)
1634 switch (hw->mdix) { 1060 return ret_val;
1635 case 1: 1061
1636 phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI; 1062 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
1637 break; 1063 hw->dsp_config_state = e1000_dsp_config_disabled;
1638 case 2: 1064 /* Force MDI for earlier revs of the IGP PHY */
1639 phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; 1065 phy_data &=
1640 break; 1066 ~(IGP01E1000_PSCR_AUTO_MDIX |
1641 case 0: 1067 IGP01E1000_PSCR_FORCE_MDI_MDIX);
1642 default: 1068 hw->mdix = 1;
1643 phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; 1069
1644 break; 1070 } else {
1645 } 1071 hw->dsp_config_state = e1000_dsp_config_enabled;
1646 1072 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1647 /* Options: 1073
1648 * disable_polarity_correction = 0 (default) 1074 switch (hw->mdix) {
1649 * Automatic Correction for Reversed Cable Polarity 1075 case 1:
1650 * 0 - Disabled 1076 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1651 * 1 - Enabled 1077 break;
1652 */ 1078 case 2:
1653 phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; 1079 phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
1654 if (hw->disable_polarity_correction == 1) 1080 break;
1655 phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; 1081 case 0:
1656 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); 1082 default:
1657 1083 phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
1658 if (ret_val) 1084 break;
1659 return ret_val; 1085 }
1660 1086 }
1661 /* SW Reset the PHY so all changes take effect */ 1087 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1662 ret_val = e1000_phy_reset(hw); 1088 if (ret_val)
1663 if (ret_val) { 1089 return ret_val;
1664 DEBUGOUT("Error Resetting the PHY\n"); 1090
1665 return ret_val; 1091 /* set auto-master slave resolution settings */
1666 } 1092 if (hw->autoneg) {
1667 } /* phy_reset_disable */ 1093 e1000_ms_type phy_ms_setting = hw->master_slave;
1668 1094
1669 if (hw->mac_type == e1000_80003es2lan) { 1095 if (hw->ffe_config_state == e1000_ffe_config_active)
1670 /* Bypass RX and TX FIFO's */ 1096 hw->ffe_config_state = e1000_ffe_config_enabled;
1671 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL, 1097
1672 E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | 1098 if (hw->dsp_config_state == e1000_dsp_config_activated)
1673 E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 1099 hw->dsp_config_state = e1000_dsp_config_enabled;
1674 if (ret_val) 1100
1675 return ret_val; 1101 /* when autonegotiation advertisement is only 1000Mbps then we
1676 1102 * should disable SmartSpeed and enable Auto MasterSlave
1677 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data); 1103 * resolution as hardware default. */
1678 if (ret_val) 1104 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1679 return ret_val; 1105 /* Disable SmartSpeed */
1680 1106 ret_val =
1681 phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; 1107 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1682 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data); 1108 &phy_data);
1683 1109 if (ret_val)
1684 if (ret_val) 1110 return ret_val;
1685 return ret_val; 1111 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1686 1112 ret_val =
1687 reg_data = er32(CTRL_EXT); 1113 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1688 reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); 1114 phy_data);
1689 ew32(CTRL_EXT, reg_data); 1115 if (ret_val)
1690 1116 return ret_val;
1691 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, 1117 /* Set auto Master/Slave resolution process */
1692 &phy_data); 1118 ret_val =
1693 if (ret_val) 1119 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1694 return ret_val; 1120 if (ret_val)
1695 1121 return ret_val;
1696 /* Do not init these registers when the HW is in IAMT mode, since the 1122 phy_data &= ~CR_1000T_MS_ENABLE;
1697 * firmware will have already initialized them. We only initialize 1123 ret_val =
1698 * them if the HW is not in IAMT mode. 1124 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1699 */ 1125 if (ret_val)
1700 if (!e1000_check_mng_mode(hw)) { 1126 return ret_val;
1701 /* Enable Electrical Idle on the PHY */ 1127 }
1702 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; 1128
1703 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, 1129 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1704 phy_data); 1130 if (ret_val)
1705 if (ret_val) 1131 return ret_val;
1706 return ret_val; 1132
1707 1133 /* load defaults for future use */
1708 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 1134 hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
1709 &phy_data); 1135 ((phy_data & CR_1000T_MS_VALUE) ?
1710 if (ret_val) 1136 e1000_ms_force_master :
1711 return ret_val; 1137 e1000_ms_force_slave) : e1000_ms_auto;
1712 1138
1713 phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1139 switch (phy_ms_setting) {
1714 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 1140 case e1000_ms_force_master:
1715 phy_data); 1141 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
1716 1142 break;
1717 if (ret_val) 1143 case e1000_ms_force_slave:
1718 return ret_val; 1144 phy_data |= CR_1000T_MS_ENABLE;
1719 } 1145 phy_data &= ~(CR_1000T_MS_VALUE);
1720 1146 break;
1721 /* Workaround: Disable padding in Kumeran interface in the MAC 1147 case e1000_ms_auto:
1722 * and in the PHY to avoid CRC errors. 1148 phy_data &= ~CR_1000T_MS_ENABLE;
1723 */ 1149 default:
1724 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, 1150 break;
1725 &phy_data); 1151 }
1726 if (ret_val) 1152 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1727 return ret_val; 1153 if (ret_val)
1728 phy_data |= GG82563_ICR_DIS_PADDING; 1154 return ret_val;
1729 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, 1155 }
1730 phy_data); 1156
1731 if (ret_val) 1157 return E1000_SUCCESS;
1732 return ret_val;
1733 }
1734
1735 return E1000_SUCCESS;
1736} 1158}
1737 1159
1738/******************************************************************** 1160/**
1739* Copper link setup for e1000_phy_m88 series. 1161 * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series.
1740* 1162 * @hw: Struct containing variables accessed by shared code
1741* hw - Struct containing variables accessed by shared code 1163 */
1742*********************************************************************/
1743static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) 1164static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1744{ 1165{
1745 s32 ret_val; 1166 s32 ret_val;
1746 u16 phy_data; 1167 u16 phy_data;
1747 1168
1748 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1169 DEBUGFUNC("e1000_copper_link_mgp_setup");
1749 1170
1750 if (hw->phy_reset_disable) 1171 if (hw->phy_reset_disable)
1751 return E1000_SUCCESS; 1172 return E1000_SUCCESS;
1752 1173
1753 /* Enable CRS on TX. This must be set for half-duplex operation. */ 1174 /* Enable CRS on TX. This must be set for half-duplex operation. */
1754 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1175 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1755 if (ret_val) 1176 if (ret_val)
1756 return ret_val; 1177 return ret_val;
1757 1178
1758 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1179 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1759 1180
1760 /* Options: 1181 /* Options:
1761 * MDI/MDI-X = 0 (default) 1182 * MDI/MDI-X = 0 (default)
1762 * 0 - Auto for all speeds 1183 * 0 - Auto for all speeds
1763 * 1 - MDI mode 1184 * 1 - MDI mode
1764 * 2 - MDI-X mode 1185 * 2 - MDI-X mode
1765 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1186 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
1766 */ 1187 */
1767 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1188 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1768 1189
1769 switch (hw->mdix) { 1190 switch (hw->mdix) {
1770 case 1: 1191 case 1:
1771 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1192 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
1772 break; 1193 break;
1773 case 2: 1194 case 2:
1774 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1195 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
1775 break; 1196 break;
1776 case 3: 1197 case 3:
1777 phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1198 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
1778 break; 1199 break;
1779 case 0: 1200 case 0:
1780 default: 1201 default:
1781 phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1202 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
1782 break; 1203 break;
1783 } 1204 }
1784 1205
1785 /* Options: 1206 /* Options:
1786 * disable_polarity_correction = 0 (default) 1207 * disable_polarity_correction = 0 (default)
1787 * Automatic Correction for Reversed Cable Polarity 1208 * Automatic Correction for Reversed Cable Polarity
1788 * 0 - Disabled 1209 * 0 - Disabled
1789 * 1 - Enabled 1210 * 1 - Enabled
1790 */ 1211 */
1791 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1212 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
1792 if (hw->disable_polarity_correction == 1) 1213 if (hw->disable_polarity_correction == 1)
1793 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1214 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
1794 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1215 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1795 if (ret_val) 1216 if (ret_val)
1796 return ret_val; 1217 return ret_val;
1797 1218
1798 if (hw->phy_revision < M88E1011_I_REV_4) { 1219 if (hw->phy_revision < M88E1011_I_REV_4) {
1799 /* Force TX_CLK in the Extended PHY Specific Control Register 1220 /* Force TX_CLK in the Extended PHY Specific Control Register
1800 * to 25MHz clock. 1221 * to 25MHz clock.
1801 */ 1222 */
1802 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1223 ret_val =
1803 if (ret_val) 1224 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
1804 return ret_val; 1225 &phy_data);
1805 1226 if (ret_val)
1806 phy_data |= M88E1000_EPSCR_TX_CLK_25; 1227 return ret_val;
1807 1228
1808 if ((hw->phy_revision == E1000_REVISION_2) && 1229 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1809 (hw->phy_id == M88E1111_I_PHY_ID)) { 1230
1810 /* Vidalia Phy, set the downshift counter to 5x */ 1231 if ((hw->phy_revision == E1000_REVISION_2) &&
1811 phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); 1232 (hw->phy_id == M88E1111_I_PHY_ID)) {
1812 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; 1233 /* Vidalia Phy, set the downshift counter to 5x */
1813 ret_val = e1000_write_phy_reg(hw, 1234 phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
1814 M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1235 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
1815 if (ret_val) 1236 ret_val = e1000_write_phy_reg(hw,
1816 return ret_val; 1237 M88E1000_EXT_PHY_SPEC_CTRL,
1817 } else { 1238 phy_data);
1818 /* Configure Master and Slave downshift values */ 1239 if (ret_val)
1819 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1240 return ret_val;
1820 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1241 } else {
1821 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1242 /* Configure Master and Slave downshift values */
1822 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1243 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
1823 ret_val = e1000_write_phy_reg(hw, 1244 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
1824 M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1245 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
1825 if (ret_val) 1246 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
1826 return ret_val; 1247 ret_val = e1000_write_phy_reg(hw,
1827 } 1248 M88E1000_EXT_PHY_SPEC_CTRL,
1828 } 1249 phy_data);
1829 1250 if (ret_val)
1830 /* SW Reset the PHY so all changes take effect */ 1251 return ret_val;
1831 ret_val = e1000_phy_reset(hw); 1252 }
1832 if (ret_val) { 1253 }
1833 DEBUGOUT("Error Resetting the PHY\n"); 1254
1834 return ret_val; 1255 /* SW Reset the PHY so all changes take effect */
1835 } 1256 ret_val = e1000_phy_reset(hw);
1836 1257 if (ret_val) {
1837 return E1000_SUCCESS; 1258 DEBUGOUT("Error Resetting the PHY\n");
1259 return ret_val;
1260 }
1261
1262 return E1000_SUCCESS;
1838} 1263}
1839 1264
1840/******************************************************************** 1265/**
1841* Setup auto-negotiation and flow control advertisements, 1266 * e1000_copper_link_autoneg - setup auto-neg
1842* and then perform auto-negotiation. 1267 * @hw: Struct containing variables accessed by shared code
1843* 1268 *
1844* hw - Struct containing variables accessed by shared code 1269 * Setup auto-negotiation and flow control advertisements,
1845*********************************************************************/ 1270 * and then perform auto-negotiation.
1271 */
1846static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) 1272static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1847{ 1273{
1848 s32 ret_val; 1274 s32 ret_val;
1849 u16 phy_data; 1275 u16 phy_data;
1850 1276
1851 DEBUGFUNC("e1000_copper_link_autoneg"); 1277 DEBUGFUNC("e1000_copper_link_autoneg");
1852 1278
1853 /* Perform some bounds checking on the hw->autoneg_advertised 1279 /* Perform some bounds checking on the hw->autoneg_advertised
1854 * parameter. If this variable is zero, then set it to the default. 1280 * parameter. If this variable is zero, then set it to the default.
1855 */ 1281 */
1856 hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; 1282 hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
1857 1283
1858 /* If autoneg_advertised is zero, we assume it was not defaulted 1284 /* If autoneg_advertised is zero, we assume it was not defaulted
1859 * by the calling code so we set to advertise full capability. 1285 * by the calling code so we set to advertise full capability.
1860 */ 1286 */
1861 if (hw->autoneg_advertised == 0) 1287 if (hw->autoneg_advertised == 0)
1862 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1288 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1863 1289
1864 /* IFE phy only supports 10/100 */ 1290 DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
1865 if (hw->phy_type == e1000_phy_ife) 1291 ret_val = e1000_phy_setup_autoneg(hw);
1866 hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; 1292 if (ret_val) {
1867 1293 DEBUGOUT("Error Setting up Auto-Negotiation\n");
1868 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1294 return ret_val;
1869 ret_val = e1000_phy_setup_autoneg(hw); 1295 }
1870 if (ret_val) { 1296 DEBUGOUT("Restarting Auto-Neg\n");
1871 DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1297
1872 return ret_val; 1298 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
1873 } 1299 * the Auto Neg Restart bit in the PHY control register.
1874 DEBUGOUT("Restarting Auto-Neg\n"); 1300 */
1875 1301 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1876 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1302 if (ret_val)
1877 * the Auto Neg Restart bit in the PHY control register. 1303 return ret_val;
1878 */ 1304
1879 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1305 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
1880 if (ret_val) 1306 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
1881 return ret_val; 1307 if (ret_val)
1882 1308 return ret_val;
1883 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1309
1884 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1310 /* Does the user want to wait for Auto-Neg to complete here, or
1885 if (ret_val) 1311 * check at a later time (for example, callback routine).
1886 return ret_val; 1312 */
1887 1313 if (hw->wait_autoneg_complete) {
1888 /* Does the user want to wait for Auto-Neg to complete here, or 1314 ret_val = e1000_wait_autoneg(hw);
1889 * check at a later time (for example, callback routine). 1315 if (ret_val) {
1890 */ 1316 DEBUGOUT
1891 if (hw->wait_autoneg_complete) { 1317 ("Error while waiting for autoneg to complete\n");
1892 ret_val = e1000_wait_autoneg(hw); 1318 return ret_val;
1893 if (ret_val) { 1319 }
1894 DEBUGOUT("Error while waiting for autoneg to complete\n"); 1320 }
1895 return ret_val; 1321
1896 } 1322 hw->get_link_status = true;
1897 } 1323
1898 1324 return E1000_SUCCESS;
1899 hw->get_link_status = true;
1900
1901 return E1000_SUCCESS;
1902} 1325}
1903 1326
1904/****************************************************************************** 1327/**
1905* Config the MAC and the PHY after link is up. 1328 * e1000_copper_link_postconfig - post link setup
1906* 1) Set up the MAC to the current PHY speed/duplex 1329 * @hw: Struct containing variables accessed by shared code
1907* if we are on 82543. If we 1330 *
1908* are on newer silicon, we only need to configure 1331 * Config the MAC and the PHY after link is up.
1909* collision distance in the Transmit Control Register. 1332 * 1) Set up the MAC to the current PHY speed/duplex
1910* 2) Set up flow control on the MAC to that established with 1333 * if we are on 82543. If we
1911* the link partner. 1334 * are on newer silicon, we only need to configure
1912* 3) Config DSP to improve Gigabit link quality for some PHY revisions. 1335 * collision distance in the Transmit Control Register.
1913* 1336 * 2) Set up flow control on the MAC to that established with
1914* hw - Struct containing variables accessed by shared code 1337 * the link partner.
1915******************************************************************************/ 1338 * 3) Config DSP to improve Gigabit link quality for some PHY revisions.
1339 */
1916static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1340static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1917{ 1341{
1918 s32 ret_val; 1342 s32 ret_val;
1919 DEBUGFUNC("e1000_copper_link_postconfig"); 1343 DEBUGFUNC("e1000_copper_link_postconfig");
1920 1344
1921 if (hw->mac_type >= e1000_82544) { 1345 if (hw->mac_type >= e1000_82544) {
1922 e1000_config_collision_dist(hw); 1346 e1000_config_collision_dist(hw);
1923 } else { 1347 } else {
1924 ret_val = e1000_config_mac_to_phy(hw); 1348 ret_val = e1000_config_mac_to_phy(hw);
1925 if (ret_val) { 1349 if (ret_val) {
1926 DEBUGOUT("Error configuring MAC to PHY settings\n"); 1350 DEBUGOUT("Error configuring MAC to PHY settings\n");
1927 return ret_val; 1351 return ret_val;
1928 } 1352 }
1929 } 1353 }
1930 ret_val = e1000_config_fc_after_link_up(hw); 1354 ret_val = e1000_config_fc_after_link_up(hw);
1931 if (ret_val) { 1355 if (ret_val) {
1932 DEBUGOUT("Error Configuring Flow Control\n"); 1356 DEBUGOUT("Error Configuring Flow Control\n");
1933 return ret_val; 1357 return ret_val;
1934 } 1358 }
1935 1359
1936 /* Config DSP to improve Giga link quality */ 1360 /* Config DSP to improve Giga link quality */
1937 if (hw->phy_type == e1000_phy_igp) { 1361 if (hw->phy_type == e1000_phy_igp) {
1938 ret_val = e1000_config_dsp_after_link_change(hw, true); 1362 ret_val = e1000_config_dsp_after_link_change(hw, true);
1939 if (ret_val) { 1363 if (ret_val) {
1940 DEBUGOUT("Error Configuring DSP after link up\n"); 1364 DEBUGOUT("Error Configuring DSP after link up\n");
1941 return ret_val; 1365 return ret_val;
1942 } 1366 }
1943 } 1367 }
1944 1368
1945 return E1000_SUCCESS; 1369 return E1000_SUCCESS;
1946} 1370}
1947 1371
1948/****************************************************************************** 1372/**
1949* Detects which PHY is present and setup the speed and duplex 1373 * e1000_setup_copper_link - phy/speed/duplex setting
1950* 1374 * @hw: Struct containing variables accessed by shared code
1951* hw - Struct containing variables accessed by shared code 1375 *
1952******************************************************************************/ 1376 * Detects which PHY is present and sets up the speed and duplex
1377 */
1953static s32 e1000_setup_copper_link(struct e1000_hw *hw) 1378static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1954{ 1379{
1955 s32 ret_val; 1380 s32 ret_val;
1956 u16 i; 1381 u16 i;
1957 u16 phy_data; 1382 u16 phy_data;
1958 u16 reg_data = 0; 1383
1959 1384 DEBUGFUNC("e1000_setup_copper_link");
1960 DEBUGFUNC("e1000_setup_copper_link"); 1385
1961 1386 /* Check if it is a valid PHY and set PHY mode if necessary. */
1962 switch (hw->mac_type) { 1387 ret_val = e1000_copper_link_preconfig(hw);
1963 case e1000_80003es2lan: 1388 if (ret_val)
1964 case e1000_ich8lan: 1389 return ret_val;
1965 /* Set the mac to wait the maximum time between each 1390
1966 * iteration and increase the max iterations when 1391 if (hw->phy_type == e1000_phy_igp) {
1967 * polling the phy; this fixes erroneous timeouts at 10Mbps. */ 1392 ret_val = e1000_copper_link_igp_setup(hw);
1968 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1393 if (ret_val)
1969 if (ret_val) 1394 return ret_val;
1970 return ret_val; 1395 } else if (hw->phy_type == e1000_phy_m88) {
1971 ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data); 1396 ret_val = e1000_copper_link_mgp_setup(hw);
1972 if (ret_val) 1397 if (ret_val)
1973 return ret_val; 1398 return ret_val;
1974 reg_data |= 0x3F; 1399 }
1975 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); 1400
1976 if (ret_val) 1401 if (hw->autoneg) {
1977 return ret_val; 1402 /* Setup autoneg and flow control advertisement
1978 default: 1403 * and perform autonegotiation */
1979 break; 1404 ret_val = e1000_copper_link_autoneg(hw);
1980 } 1405 if (ret_val)
1981 1406 return ret_val;
1982 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1407 } else {
1983 ret_val = e1000_copper_link_preconfig(hw); 1408 /* PHY will be set to 10H, 10F, 100H,or 100F
1984 if (ret_val) 1409 * depending on value from forced_speed_duplex. */
1985 return ret_val; 1410 DEBUGOUT("Forcing speed and duplex\n");
1986 1411 ret_val = e1000_phy_force_speed_duplex(hw);
1987 switch (hw->mac_type) { 1412 if (ret_val) {
1988 case e1000_80003es2lan: 1413 DEBUGOUT("Error Forcing Speed and Duplex\n");
1989 /* Kumeran registers are written-only */ 1414 return ret_val;
1990 reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT; 1415 }
1991 reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; 1416 }
1992 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, 1417
1993 reg_data); 1418 /* Check link status. Wait up to 100 microseconds for link to become
1994 if (ret_val) 1419 * valid.
1995 return ret_val; 1420 */
1996 break; 1421 for (i = 0; i < 10; i++) {
1997 default: 1422 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
1998 break; 1423 if (ret_val)
1999 } 1424 return ret_val;
2000 1425 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2001 if (hw->phy_type == e1000_phy_igp || 1426 if (ret_val)
2002 hw->phy_type == e1000_phy_igp_3 || 1427 return ret_val;
2003 hw->phy_type == e1000_phy_igp_2) { 1428
2004 ret_val = e1000_copper_link_igp_setup(hw); 1429 if (phy_data & MII_SR_LINK_STATUS) {
2005 if (ret_val) 1430 /* Config the MAC and PHY after link is up */
2006 return ret_val; 1431 ret_val = e1000_copper_link_postconfig(hw);
2007 } else if (hw->phy_type == e1000_phy_m88) { 1432 if (ret_val)
2008 ret_val = e1000_copper_link_mgp_setup(hw); 1433 return ret_val;
2009 if (ret_val) 1434
2010 return ret_val; 1435 DEBUGOUT("Valid link established!!!\n");
2011 } else if (hw->phy_type == e1000_phy_gg82563) { 1436 return E1000_SUCCESS;
2012 ret_val = e1000_copper_link_ggp_setup(hw); 1437 }
2013 if (ret_val) 1438 udelay(10);
2014 return ret_val; 1439 }
2015 } 1440
2016 1441 DEBUGOUT("Unable to establish link!!!\n");
2017 if (hw->autoneg) { 1442 return E1000_SUCCESS;
2018 /* Setup autoneg and flow control advertisement
2019 * and perform autonegotiation */
2020 ret_val = e1000_copper_link_autoneg(hw);
2021 if (ret_val)
2022 return ret_val;
2023 } else {
2024 /* PHY will be set to 10H, 10F, 100H,or 100F
2025 * depending on value from forced_speed_duplex. */
2026 DEBUGOUT("Forcing speed and duplex\n");
2027 ret_val = e1000_phy_force_speed_duplex(hw);
2028 if (ret_val) {
2029 DEBUGOUT("Error Forcing Speed and Duplex\n");
2030 return ret_val;
2031 }
2032 }
2033
2034 /* Check link status. Wait up to 100 microseconds for link to become
2035 * valid.
2036 */
2037 for (i = 0; i < 10; i++) {
2038 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2039 if (ret_val)
2040 return ret_val;
2041 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2042 if (ret_val)
2043 return ret_val;
2044
2045 if (phy_data & MII_SR_LINK_STATUS) {
2046 /* Config the MAC and PHY after link is up */
2047 ret_val = e1000_copper_link_postconfig(hw);
2048 if (ret_val)
2049 return ret_val;
2050
2051 DEBUGOUT("Valid link established!!!\n");
2052 return E1000_SUCCESS;
2053 }
2054 udelay(10);
2055 }
2056
2057 DEBUGOUT("Unable to establish link!!!\n");
2058 return E1000_SUCCESS;
2059} 1443}
2060 1444
2061/****************************************************************************** 1445/**
2062* Configure the MAC-to-PHY interface for 10/100Mbps 1446 * e1000_phy_setup_autoneg - phy settings
2063* 1447 * @hw: Struct containing variables accessed by shared code
2064* hw - Struct containing variables accessed by shared code 1448 *
2065******************************************************************************/ 1449 * Configures PHY autoneg and flow control advertisement settings
2066static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) 1450 */
1451s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
2067{ 1452{
2068 s32 ret_val = E1000_SUCCESS; 1453 s32 ret_val;
2069 u32 tipg; 1454 u16 mii_autoneg_adv_reg;
2070 u16 reg_data; 1455 u16 mii_1000t_ctrl_reg;
2071 1456
2072 DEBUGFUNC("e1000_configure_kmrn_for_10_100"); 1457 DEBUGFUNC("e1000_phy_setup_autoneg");
2073 1458
2074 reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT; 1459 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2075 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, 1460 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
2076 reg_data); 1461 if (ret_val)
2077 if (ret_val) 1462 return ret_val;
2078 return ret_val;
2079 1463
2080 /* Configure Transmit Inter-Packet Gap */ 1464 /* Read the MII 1000Base-T Control Register (Address 9). */
2081 tipg = er32(TIPG); 1465 ret_val =
2082 tipg &= ~E1000_TIPG_IPGT_MASK; 1466 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
2083 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; 1467 if (ret_val)
2084 ew32(TIPG, tipg); 1468 return ret_val;
2085 1469
2086 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); 1470 /* Need to parse both autoneg_advertised and fc and set up
1471 * the appropriate PHY registers. First we will parse for
1472 * autoneg_advertised software override. Since we can advertise
1473 * a plethora of combinations, we need to check each bit
1474 * individually.
1475 */
2087 1476
2088 if (ret_val) 1477 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
2089 return ret_val; 1478 * Advertisement Register (Address 4) and the 1000 mb speed bits in
1479 * the 1000Base-T Control Register (Address 9).
1480 */
1481 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
1482 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
2090 1483
2091 if (duplex == HALF_DUPLEX) 1484 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
2092 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
2093 else
2094 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
2095 1485
2096 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); 1486 /* Do we want to advertise 10 Mb Half Duplex? */
1487 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
1488 DEBUGOUT("Advertise 10mb Half duplex\n");
1489 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
1490 }
2097 1491
2098 return ret_val; 1492 /* Do we want to advertise 10 Mb Full Duplex? */
2099} 1493 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
1494 DEBUGOUT("Advertise 10mb Full duplex\n");
1495 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
1496 }
2100 1497
2101static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw) 1498 /* Do we want to advertise 100 Mb Half Duplex? */
2102{ 1499 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
2103 s32 ret_val = E1000_SUCCESS; 1500 DEBUGOUT("Advertise 100mb Half duplex\n");
2104 u16 reg_data; 1501 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
2105 u32 tipg; 1502 }
2106 1503
2107 DEBUGFUNC("e1000_configure_kmrn_for_1000"); 1504 /* Do we want to advertise 100 Mb Full Duplex? */
1505 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
1506 DEBUGOUT("Advertise 100mb Full duplex\n");
1507 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
1508 }
2108 1509
2109 reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT; 1510 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
2110 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, 1511 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
2111 reg_data); 1512 DEBUGOUT
2112 if (ret_val) 1513 ("Advertise 1000mb Half duplex requested, request denied!\n");
2113 return ret_val; 1514 }
2114 1515
2115 /* Configure Transmit Inter-Packet Gap */ 1516 /* Do we want to advertise 1000 Mb Full Duplex? */
2116 tipg = er32(TIPG); 1517 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
2117 tipg &= ~E1000_TIPG_IPGT_MASK; 1518 DEBUGOUT("Advertise 1000mb Full duplex\n");
2118 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; 1519 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
2119 ew32(TIPG, tipg); 1520 }
2120 1521
2121 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); 1522 /* Check for a software override of the flow control settings, and
1523 * setup the PHY advertisement registers accordingly. If
1524 * auto-negotiation is enabled, then software will have to set the
1525 * "PAUSE" bits to the correct value in the Auto-Negotiation
1526 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
1527 *
1528 * The possible values of the "fc" parameter are:
1529 * 0: Flow control is completely disabled
1530 * 1: Rx flow control is enabled (we can receive pause frames
1531 * but not send pause frames).
1532 * 2: Tx flow control is enabled (we can send pause frames
1533 * but we do not support receiving pause frames).
1534 * 3: Both Rx and TX flow control (symmetric) are enabled.
1535 * other: No software override. The flow control configuration
1536 * in the EEPROM is used.
1537 */
1538 switch (hw->fc) {
1539 case E1000_FC_NONE: /* 0 */
1540 /* Flow control (RX & TX) is completely disabled by a
1541 * software over-ride.
1542 */
1543 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1544 break;
1545 case E1000_FC_RX_PAUSE: /* 1 */
1546 /* RX Flow control is enabled, and TX Flow control is
1547 * disabled, by a software over-ride.
1548 */
1549 /* Since there really isn't a way to advertise that we are
1550 * capable of RX Pause ONLY, we will advertise that we
1551 * support both symmetric and asymmetric RX PAUSE. Later
1552 * (in e1000_config_fc_after_link_up) we will disable the
1553 *hw's ability to send PAUSE frames.
1554 */
1555 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1556 break;
1557 case E1000_FC_TX_PAUSE: /* 2 */
1558 /* TX Flow control is enabled, and RX Flow control is
1559 * disabled, by a software over-ride.
1560 */
1561 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
1562 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
1563 break;
1564 case E1000_FC_FULL: /* 3 */
1565 /* Flow control (both RX and TX) is enabled by a software
1566 * over-ride.
1567 */
1568 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1569 break;
1570 default:
1571 DEBUGOUT("Flow control param set incorrectly\n");
1572 return -E1000_ERR_CONFIG;
1573 }
2122 1574
2123 if (ret_val) 1575 ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
2124 return ret_val; 1576 if (ret_val)
1577 return ret_val;
2125 1578
2126 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1579 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
2127 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
2128 1580
2129 return ret_val; 1581 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
2130} 1582 if (ret_val)
1583 return ret_val;
2131 1584
2132/****************************************************************************** 1585 return E1000_SUCCESS;
2133* Configures PHY autoneg and flow control advertisement settings
2134*
2135* hw - Struct containing variables accessed by shared code
2136******************************************************************************/
2137s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
2138{
2139 s32 ret_val;
2140 u16 mii_autoneg_adv_reg;
2141 u16 mii_1000t_ctrl_reg;
2142
2143 DEBUGFUNC("e1000_phy_setup_autoneg");
2144
2145 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2146 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
2147 if (ret_val)
2148 return ret_val;
2149
2150 if (hw->phy_type != e1000_phy_ife) {
2151 /* Read the MII 1000Base-T Control Register (Address 9). */
2152 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
2153 if (ret_val)
2154 return ret_val;
2155 } else
2156 mii_1000t_ctrl_reg=0;
2157
2158 /* Need to parse both autoneg_advertised and fc and set up
2159 * the appropriate PHY registers. First we will parse for
2160 * autoneg_advertised software override. Since we can advertise
2161 * a plethora of combinations, we need to check each bit
2162 * individually.
2163 */
2164
2165 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
2166 * Advertisement Register (Address 4) and the 1000 mb speed bits in
2167 * the 1000Base-T Control Register (Address 9).
2168 */
2169 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
2170 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
2171
2172 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
2173
2174 /* Do we want to advertise 10 Mb Half Duplex? */
2175 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
2176 DEBUGOUT("Advertise 10mb Half duplex\n");
2177 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
2178 }
2179
2180 /* Do we want to advertise 10 Mb Full Duplex? */
2181 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
2182 DEBUGOUT("Advertise 10mb Full duplex\n");
2183 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
2184 }
2185
2186 /* Do we want to advertise 100 Mb Half Duplex? */
2187 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
2188 DEBUGOUT("Advertise 100mb Half duplex\n");
2189 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
2190 }
2191
2192 /* Do we want to advertise 100 Mb Full Duplex? */
2193 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
2194 DEBUGOUT("Advertise 100mb Full duplex\n");
2195 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
2196 }
2197
2198 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
2199 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
2200 DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
2201 }
2202
2203 /* Do we want to advertise 1000 Mb Full Duplex? */
2204 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
2205 DEBUGOUT("Advertise 1000mb Full duplex\n");
2206 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
2207 if (hw->phy_type == e1000_phy_ife) {
2208 DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
2209 }
2210 }
2211
2212 /* Check for a software override of the flow control settings, and
2213 * setup the PHY advertisement registers accordingly. If
2214 * auto-negotiation is enabled, then software will have to set the
2215 * "PAUSE" bits to the correct value in the Auto-Negotiation
2216 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
2217 *
2218 * The possible values of the "fc" parameter are:
2219 * 0: Flow control is completely disabled
2220 * 1: Rx flow control is enabled (we can receive pause frames
2221 * but not send pause frames).
2222 * 2: Tx flow control is enabled (we can send pause frames
2223 * but we do not support receiving pause frames).
2224 * 3: Both Rx and TX flow control (symmetric) are enabled.
2225 * other: No software override. The flow control configuration
2226 * in the EEPROM is used.
2227 */
2228 switch (hw->fc) {
2229 case E1000_FC_NONE: /* 0 */
2230 /* Flow control (RX & TX) is completely disabled by a
2231 * software over-ride.
2232 */
2233 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
2234 break;
2235 case E1000_FC_RX_PAUSE: /* 1 */
2236 /* RX Flow control is enabled, and TX Flow control is
2237 * disabled, by a software over-ride.
2238 */
2239 /* Since there really isn't a way to advertise that we are
2240 * capable of RX Pause ONLY, we will advertise that we
2241 * support both symmetric and asymmetric RX PAUSE. Later
2242 * (in e1000_config_fc_after_link_up) we will disable the
2243 *hw's ability to send PAUSE frames.
2244 */
2245 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
2246 break;
2247 case E1000_FC_TX_PAUSE: /* 2 */
2248 /* TX Flow control is enabled, and RX Flow control is
2249 * disabled, by a software over-ride.
2250 */
2251 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
2252 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
2253 break;
2254 case E1000_FC_FULL: /* 3 */
2255 /* Flow control (both RX and TX) is enabled by a software
2256 * over-ride.
2257 */
2258 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
2259 break;
2260 default:
2261 DEBUGOUT("Flow control param set incorrectly\n");
2262 return -E1000_ERR_CONFIG;
2263 }
2264
2265 ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
2266 if (ret_val)
2267 return ret_val;
2268
2269 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
2270
2271 if (hw->phy_type != e1000_phy_ife) {
2272 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
2273 if (ret_val)
2274 return ret_val;
2275 }
2276
2277 return E1000_SUCCESS;
2278} 1586}
2279 1587
2280/****************************************************************************** 1588/**
2281* Force PHY speed and duplex settings to hw->forced_speed_duplex 1589 * e1000_phy_force_speed_duplex - force link settings
2282* 1590 * @hw: Struct containing variables accessed by shared code
2283* hw - Struct containing variables accessed by shared code 1591 *
2284******************************************************************************/ 1592 * Force PHY speed and duplex settings to hw->forced_speed_duplex
1593 */
2285static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) 1594static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2286{ 1595{
2287 u32 ctrl; 1596 u32 ctrl;
2288 s32 ret_val; 1597 s32 ret_val;
2289 u16 mii_ctrl_reg; 1598 u16 mii_ctrl_reg;
2290 u16 mii_status_reg; 1599 u16 mii_status_reg;
2291 u16 phy_data; 1600 u16 phy_data;
2292 u16 i; 1601 u16 i;
2293 1602
2294 DEBUGFUNC("e1000_phy_force_speed_duplex"); 1603 DEBUGFUNC("e1000_phy_force_speed_duplex");
2295 1604
2296 /* Turn off Flow control if we are forcing speed and duplex. */ 1605 /* Turn off Flow control if we are forcing speed and duplex. */
2297 hw->fc = E1000_FC_NONE; 1606 hw->fc = E1000_FC_NONE;
2298 1607
2299 DEBUGOUT1("hw->fc = %d\n", hw->fc); 1608 DEBUGOUT1("hw->fc = %d\n", hw->fc);
2300 1609
2301 /* Read the Device Control Register. */ 1610 /* Read the Device Control Register. */
2302 ctrl = er32(CTRL); 1611 ctrl = er32(CTRL);
2303 1612
2304 /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ 1613 /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
2305 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1614 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
2306 ctrl &= ~(DEVICE_SPEED_MASK); 1615 ctrl &= ~(DEVICE_SPEED_MASK);
2307 1616
2308 /* Clear the Auto Speed Detect Enable bit. */ 1617 /* Clear the Auto Speed Detect Enable bit. */
2309 ctrl &= ~E1000_CTRL_ASDE; 1618 ctrl &= ~E1000_CTRL_ASDE;
2310 1619
2311 /* Read the MII Control Register. */ 1620 /* Read the MII Control Register. */
2312 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); 1621 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
2313 if (ret_val) 1622 if (ret_val)
2314 return ret_val; 1623 return ret_val;
2315 1624
2316 /* We need to disable autoneg in order to force link and duplex. */ 1625 /* We need to disable autoneg in order to force link and duplex. */
2317 1626
2318 mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; 1627 mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
2319 1628
2320 /* Are we forcing Full or Half Duplex? */ 1629 /* Are we forcing Full or Half Duplex? */
2321 if (hw->forced_speed_duplex == e1000_100_full || 1630 if (hw->forced_speed_duplex == e1000_100_full ||
2322 hw->forced_speed_duplex == e1000_10_full) { 1631 hw->forced_speed_duplex == e1000_10_full) {
2323 /* We want to force full duplex so we SET the full duplex bits in the 1632 /* We want to force full duplex so we SET the full duplex bits in the
2324 * Device and MII Control Registers. 1633 * Device and MII Control Registers.
2325 */ 1634 */
2326 ctrl |= E1000_CTRL_FD; 1635 ctrl |= E1000_CTRL_FD;
2327 mii_ctrl_reg |= MII_CR_FULL_DUPLEX; 1636 mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
2328 DEBUGOUT("Full Duplex\n"); 1637 DEBUGOUT("Full Duplex\n");
2329 } else { 1638 } else {
2330 /* We want to force half duplex so we CLEAR the full duplex bits in 1639 /* We want to force half duplex so we CLEAR the full duplex bits in
2331 * the Device and MII Control Registers. 1640 * the Device and MII Control Registers.
2332 */ 1641 */
2333 ctrl &= ~E1000_CTRL_FD; 1642 ctrl &= ~E1000_CTRL_FD;
2334 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; 1643 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
2335 DEBUGOUT("Half Duplex\n"); 1644 DEBUGOUT("Half Duplex\n");
2336 } 1645 }
2337 1646
2338 /* Are we forcing 100Mbps??? */ 1647 /* Are we forcing 100Mbps??? */
2339 if (hw->forced_speed_duplex == e1000_100_full || 1648 if (hw->forced_speed_duplex == e1000_100_full ||
2340 hw->forced_speed_duplex == e1000_100_half) { 1649 hw->forced_speed_duplex == e1000_100_half) {
2341 /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ 1650 /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
2342 ctrl |= E1000_CTRL_SPD_100; 1651 ctrl |= E1000_CTRL_SPD_100;
2343 mii_ctrl_reg |= MII_CR_SPEED_100; 1652 mii_ctrl_reg |= MII_CR_SPEED_100;
2344 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); 1653 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
2345 DEBUGOUT("Forcing 100mb "); 1654 DEBUGOUT("Forcing 100mb ");
2346 } else { 1655 } else {
2347 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ 1656 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
2348 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1657 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2349 mii_ctrl_reg |= MII_CR_SPEED_10; 1658 mii_ctrl_reg |= MII_CR_SPEED_10;
2350 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); 1659 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
2351 DEBUGOUT("Forcing 10mb "); 1660 DEBUGOUT("Forcing 10mb ");
2352 } 1661 }
2353 1662
2354 e1000_config_collision_dist(hw); 1663 e1000_config_collision_dist(hw);
2355 1664
2356 /* Write the configured values back to the Device Control Reg. */ 1665 /* Write the configured values back to the Device Control Reg. */
2357 ew32(CTRL, ctrl); 1666 ew32(CTRL, ctrl);
2358 1667
2359 if ((hw->phy_type == e1000_phy_m88) || 1668 if (hw->phy_type == e1000_phy_m88) {
2360 (hw->phy_type == e1000_phy_gg82563)) { 1669 ret_val =
2361 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1670 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2362 if (ret_val) 1671 if (ret_val)
2363 return ret_val; 1672 return ret_val;
2364 1673
2365 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 1674 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
2366 * forced whenever speed are duplex are forced. 1675 * forced whenever speed are duplex are forced.
2367 */ 1676 */
2368 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1677 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
2369 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1678 ret_val =
2370 if (ret_val) 1679 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
2371 return ret_val; 1680 if (ret_val)
2372 1681 return ret_val;
2373 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); 1682
2374 1683 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
2375 /* Need to reset the PHY or these changes will be ignored */ 1684
2376 mii_ctrl_reg |= MII_CR_RESET; 1685 /* Need to reset the PHY or these changes will be ignored */
2377 1686 mii_ctrl_reg |= MII_CR_RESET;
2378 /* Disable MDI-X support for 10/100 */ 1687
2379 } else if (hw->phy_type == e1000_phy_ife) { 1688 /* Disable MDI-X support for 10/100 */
2380 ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); 1689 } else {
2381 if (ret_val) 1690 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
2382 return ret_val; 1691 * forced whenever speed or duplex are forced.
2383 1692 */
2384 phy_data &= ~IFE_PMC_AUTO_MDIX; 1693 ret_val =
2385 phy_data &= ~IFE_PMC_FORCE_MDIX; 1694 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
2386 1695 if (ret_val)
2387 ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); 1696 return ret_val;
2388 if (ret_val) 1697
2389 return ret_val; 1698 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
2390 1699 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
2391 } else { 1700
2392 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI 1701 ret_val =
2393 * forced whenever speed or duplex are forced. 1702 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
2394 */ 1703 if (ret_val)
2395 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1704 return ret_val;
2396 if (ret_val) 1705 }
2397 return ret_val; 1706
2398 1707 /* Write back the modified PHY MII control register. */
2399 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1708 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
2400 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 1709 if (ret_val)
2401 1710 return ret_val;
2402 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); 1711
2403 if (ret_val) 1712 udelay(1);
2404 return ret_val; 1713
2405 } 1714 /* The wait_autoneg_complete flag may be a little misleading here.
2406 1715 * Since we are forcing speed and duplex, Auto-Neg is not enabled.
2407 /* Write back the modified PHY MII control register. */ 1716 * But we do want to delay for a period while forcing only so we
2408 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); 1717 * don't generate false No Link messages. So we will wait here
2409 if (ret_val) 1718 * only if the user has set wait_autoneg_complete to 1, which is
2410 return ret_val; 1719 * the default.
2411 1720 */
2412 udelay(1); 1721 if (hw->wait_autoneg_complete) {
2413 1722 /* We will wait for autoneg to complete. */
2414 /* The wait_autoneg_complete flag may be a little misleading here. 1723 DEBUGOUT("Waiting for forced speed/duplex link.\n");
2415 * Since we are forcing speed and duplex, Auto-Neg is not enabled. 1724 mii_status_reg = 0;
2416 * But we do want to delay for a period while forcing only so we 1725
2417 * don't generate false No Link messages. So we will wait here 1726 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
2418 * only if the user has set wait_autoneg_complete to 1, which is 1727 for (i = PHY_FORCE_TIME; i > 0; i--) {
2419 * the default. 1728 /* Read the MII Status Register and wait for Auto-Neg Complete bit
2420 */ 1729 * to be set.
2421 if (hw->wait_autoneg_complete) { 1730 */
2422 /* We will wait for autoneg to complete. */ 1731 ret_val =
2423 DEBUGOUT("Waiting for forced speed/duplex link.\n"); 1732 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2424 mii_status_reg = 0; 1733 if (ret_val)
2425 1734 return ret_val;
2426 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 1735
2427 for (i = PHY_FORCE_TIME; i > 0; i--) { 1736 ret_val =
2428 /* Read the MII Status Register and wait for Auto-Neg Complete bit 1737 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2429 * to be set. 1738 if (ret_val)
2430 */ 1739 return ret_val;
2431 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1740
2432 if (ret_val) 1741 if (mii_status_reg & MII_SR_LINK_STATUS)
2433 return ret_val; 1742 break;
2434 1743 msleep(100);
2435 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1744 }
2436 if (ret_val) 1745 if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
2437 return ret_val; 1746 /* We didn't get link. Reset the DSP and wait again for link. */
2438 1747 ret_val = e1000_phy_reset_dsp(hw);
2439 if (mii_status_reg & MII_SR_LINK_STATUS) break; 1748 if (ret_val) {
2440 msleep(100); 1749 DEBUGOUT("Error Resetting PHY DSP\n");
2441 } 1750 return ret_val;
2442 if ((i == 0) && 1751 }
2443 ((hw->phy_type == e1000_phy_m88) || 1752 }
2444 (hw->phy_type == e1000_phy_gg82563))) { 1753 /* This loop will early-out if the link condition has been met. */
2445 /* We didn't get link. Reset the DSP and wait again for link. */ 1754 for (i = PHY_FORCE_TIME; i > 0; i--) {
2446 ret_val = e1000_phy_reset_dsp(hw); 1755 if (mii_status_reg & MII_SR_LINK_STATUS)
2447 if (ret_val) { 1756 break;
2448 DEBUGOUT("Error Resetting PHY DSP\n"); 1757 msleep(100);
2449 return ret_val; 1758 /* Read the MII Status Register and wait for Auto-Neg Complete bit
2450 } 1759 * to be set.
2451 } 1760 */
2452 /* This loop will early-out if the link condition has been met. */ 1761 ret_val =
2453 for (i = PHY_FORCE_TIME; i > 0; i--) { 1762 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2454 if (mii_status_reg & MII_SR_LINK_STATUS) break; 1763 if (ret_val)
2455 msleep(100); 1764 return ret_val;
2456 /* Read the MII Status Register and wait for Auto-Neg Complete bit 1765
2457 * to be set. 1766 ret_val =
2458 */ 1767 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2459 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1768 if (ret_val)
2460 if (ret_val) 1769 return ret_val;
2461 return ret_val; 1770 }
2462 1771 }
2463 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1772
2464 if (ret_val) 1773 if (hw->phy_type == e1000_phy_m88) {
2465 return ret_val; 1774 /* Because we reset the PHY above, we need to re-force TX_CLK in the
2466 } 1775 * Extended PHY Specific Control Register to 25MHz clock. This value
2467 } 1776 * defaults back to a 2.5MHz clock when the PHY is reset.
2468 1777 */
2469 if (hw->phy_type == e1000_phy_m88) { 1778 ret_val =
2470 /* Because we reset the PHY above, we need to re-force TX_CLK in the 1779 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
2471 * Extended PHY Specific Control Register to 25MHz clock. This value 1780 &phy_data);
2472 * defaults back to a 2.5MHz clock when the PHY is reset. 1781 if (ret_val)
2473 */ 1782 return ret_val;
2474 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1783
2475 if (ret_val) 1784 phy_data |= M88E1000_EPSCR_TX_CLK_25;
2476 return ret_val; 1785 ret_val =
2477 1786 e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
2478 phy_data |= M88E1000_EPSCR_TX_CLK_25; 1787 phy_data);
2479 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1788 if (ret_val)
2480 if (ret_val) 1789 return ret_val;
2481 return ret_val; 1790
2482 1791 /* In addition, because of the s/w reset above, we need to enable CRS on
2483 /* In addition, because of the s/w reset above, we need to enable CRS on 1792 * TX. This must be set for both full and half duplex operation.
2484 * TX. This must be set for both full and half duplex operation. 1793 */
2485 */ 1794 ret_val =
2486 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1795 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2487 if (ret_val) 1796 if (ret_val)
2488 return ret_val; 1797 return ret_val;
2489 1798
2490 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1799 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
2491 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1800 ret_val =
2492 if (ret_val) 1801 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
2493 return ret_val; 1802 if (ret_val)
2494 1803 return ret_val;
2495 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && 1804
2496 (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full || 1805 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
2497 hw->forced_speed_duplex == e1000_10_half)) { 1806 && (!hw->autoneg)
2498 ret_val = e1000_polarity_reversal_workaround(hw); 1807 && (hw->forced_speed_duplex == e1000_10_full
2499 if (ret_val) 1808 || hw->forced_speed_duplex == e1000_10_half)) {
2500 return ret_val; 1809 ret_val = e1000_polarity_reversal_workaround(hw);
2501 } 1810 if (ret_val)
2502 } else if (hw->phy_type == e1000_phy_gg82563) { 1811 return ret_val;
2503 /* The TX_CLK of the Extended PHY Specific Control Register defaults 1812 }
2504 * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if 1813 }
2505 * we're not in a forced 10/duplex configuration. */ 1814 return E1000_SUCCESS;
2506 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
2507 if (ret_val)
2508 return ret_val;
2509
2510 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
2511 if ((hw->forced_speed_duplex == e1000_10_full) ||
2512 (hw->forced_speed_duplex == e1000_10_half))
2513 phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
2514 else
2515 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
2516
2517 /* Also due to the reset, we need to enable CRS on Tx. */
2518 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
2519
2520 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
2521 if (ret_val)
2522 return ret_val;
2523 }
2524 return E1000_SUCCESS;
2525} 1815}
2526 1816
2527/****************************************************************************** 1817/**
2528* Sets the collision distance in the Transmit Control register 1818 * e1000_config_collision_dist - set collision distance register
2529* 1819 * @hw: Struct containing variables accessed by shared code
2530* hw - Struct containing variables accessed by shared code 1820 *
2531* 1821 * Sets the collision distance in the Transmit Control register.
2532* Link should have been established previously. Reads the speed and duplex 1822 * Link should have been established previously. Reads the speed and duplex
2533* information from the Device Status register. 1823 * information from the Device Status register.
2534******************************************************************************/ 1824 */
2535void e1000_config_collision_dist(struct e1000_hw *hw) 1825void e1000_config_collision_dist(struct e1000_hw *hw)
2536{ 1826{
2537 u32 tctl, coll_dist; 1827 u32 tctl, coll_dist;
2538 1828
2539 DEBUGFUNC("e1000_config_collision_dist"); 1829 DEBUGFUNC("e1000_config_collision_dist");
2540 1830
2541 if (hw->mac_type < e1000_82543) 1831 if (hw->mac_type < e1000_82543)
2542 coll_dist = E1000_COLLISION_DISTANCE_82542; 1832 coll_dist = E1000_COLLISION_DISTANCE_82542;
2543 else 1833 else
2544 coll_dist = E1000_COLLISION_DISTANCE; 1834 coll_dist = E1000_COLLISION_DISTANCE;
2545 1835
2546 tctl = er32(TCTL); 1836 tctl = er32(TCTL);
2547 1837
2548 tctl &= ~E1000_TCTL_COLD; 1838 tctl &= ~E1000_TCTL_COLD;
2549 tctl |= coll_dist << E1000_COLD_SHIFT; 1839 tctl |= coll_dist << E1000_COLD_SHIFT;
2550 1840
2551 ew32(TCTL, tctl); 1841 ew32(TCTL, tctl);
2552 E1000_WRITE_FLUSH(); 1842 E1000_WRITE_FLUSH();
2553} 1843}
2554 1844
2555/****************************************************************************** 1845/**
2556* Sets MAC speed and duplex settings to reflect the those in the PHY 1846 * e1000_config_mac_to_phy - sync phy and mac settings
2557* 1847 * @hw: Struct containing variables accessed by shared code
2558* hw - Struct containing variables accessed by shared code 1848 * @mii_reg: data to write to the MII control register
2559* mii_reg - data to write to the MII control register 1849 *
2560* 1850 * Sets MAC speed and duplex settings to reflect the those in the PHY
2561* The contents of the PHY register containing the needed information need to 1851 * The contents of the PHY register containing the needed information need to
2562* be passed in. 1852 * be passed in.
2563******************************************************************************/ 1853 */
2564static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) 1854static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
2565{ 1855{
2566 u32 ctrl; 1856 u32 ctrl;
2567 s32 ret_val; 1857 s32 ret_val;
2568 u16 phy_data; 1858 u16 phy_data;
2569 1859
2570 DEBUGFUNC("e1000_config_mac_to_phy"); 1860 DEBUGFUNC("e1000_config_mac_to_phy");
2571 1861
2572 /* 82544 or newer MAC, Auto Speed Detection takes care of 1862 /* 82544 or newer MAC, Auto Speed Detection takes care of
2573 * MAC speed/duplex configuration.*/ 1863 * MAC speed/duplex configuration.*/
2574 if (hw->mac_type >= e1000_82544) 1864 if (hw->mac_type >= e1000_82544)
2575 return E1000_SUCCESS; 1865 return E1000_SUCCESS;
2576 1866
2577 /* Read the Device Control Register and set the bits to Force Speed 1867 /* Read the Device Control Register and set the bits to Force Speed
2578 * and Duplex. 1868 * and Duplex.
2579 */ 1869 */
2580 ctrl = er32(CTRL); 1870 ctrl = er32(CTRL);
2581 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1871 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
2582 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1872 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
2583 1873
2584 /* Set up duplex in the Device Control and Transmit Control 1874 /* Set up duplex in the Device Control and Transmit Control
2585 * registers depending on negotiated values. 1875 * registers depending on negotiated values.
2586 */ 1876 */
2587 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1877 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
2588 if (ret_val) 1878 if (ret_val)
2589 return ret_val; 1879 return ret_val;
2590 1880
2591 if (phy_data & M88E1000_PSSR_DPLX) 1881 if (phy_data & M88E1000_PSSR_DPLX)
2592 ctrl |= E1000_CTRL_FD; 1882 ctrl |= E1000_CTRL_FD;
2593 else 1883 else
2594 ctrl &= ~E1000_CTRL_FD; 1884 ctrl &= ~E1000_CTRL_FD;
2595 1885
2596 e1000_config_collision_dist(hw); 1886 e1000_config_collision_dist(hw);
2597 1887
2598 /* Set up speed in the Device Control register depending on 1888 /* Set up speed in the Device Control register depending on
2599 * negotiated values. 1889 * negotiated values.
2600 */ 1890 */
2601 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1891 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2602 ctrl |= E1000_CTRL_SPD_1000; 1892 ctrl |= E1000_CTRL_SPD_1000;
2603 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1893 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
2604 ctrl |= E1000_CTRL_SPD_100; 1894 ctrl |= E1000_CTRL_SPD_100;
2605 1895
2606 /* Write the configured values back to the Device Control Reg. */ 1896 /* Write the configured values back to the Device Control Reg. */
2607 ew32(CTRL, ctrl); 1897 ew32(CTRL, ctrl);
2608 return E1000_SUCCESS; 1898 return E1000_SUCCESS;
2609} 1899}
2610 1900
2611/****************************************************************************** 1901/**
2612 * Forces the MAC's flow control settings. 1902 * e1000_force_mac_fc - force flow control settings
2613 * 1903 * @hw: Struct containing variables accessed by shared code
2614 * hw - Struct containing variables accessed by shared code
2615 * 1904 *
1905 * Forces the MAC's flow control settings.
2616 * Sets the TFCE and RFCE bits in the device control register to reflect 1906 * Sets the TFCE and RFCE bits in the device control register to reflect
2617 * the adapter settings. TFCE and RFCE need to be explicitly set by 1907 * the adapter settings. TFCE and RFCE need to be explicitly set by
2618 * software when a Copper PHY is used because autonegotiation is managed 1908 * software when a Copper PHY is used because autonegotiation is managed
2619 * by the PHY rather than the MAC. Software must also configure these 1909 * by the PHY rather than the MAC. Software must also configure these
2620 * bits when link is forced on a fiber connection. 1910 * bits when link is forced on a fiber connection.
2621 *****************************************************************************/ 1911 */
2622s32 e1000_force_mac_fc(struct e1000_hw *hw) 1912s32 e1000_force_mac_fc(struct e1000_hw *hw)
2623{ 1913{
2624 u32 ctrl; 1914 u32 ctrl;
2625 1915
2626 DEBUGFUNC("e1000_force_mac_fc"); 1916 DEBUGFUNC("e1000_force_mac_fc");
2627 1917
2628 /* Get the current configuration of the Device Control Register */ 1918 /* Get the current configuration of the Device Control Register */
2629 ctrl = er32(CTRL); 1919 ctrl = er32(CTRL);
2630 1920
2631 /* Because we didn't get link via the internal auto-negotiation 1921 /* Because we didn't get link via the internal auto-negotiation
2632 * mechanism (we either forced link or we got link via PHY 1922 * mechanism (we either forced link or we got link via PHY
2633 * auto-neg), we have to manually enable/disable transmit an 1923 * auto-neg), we have to manually enable/disable transmit an
2634 * receive flow control. 1924 * receive flow control.
2635 * 1925 *
2636 * The "Case" statement below enables/disable flow control 1926 * The "Case" statement below enables/disable flow control
2637 * according to the "hw->fc" parameter. 1927 * according to the "hw->fc" parameter.
2638 * 1928 *
2639 * The possible values of the "fc" parameter are: 1929 * The possible values of the "fc" parameter are:
2640 * 0: Flow control is completely disabled 1930 * 0: Flow control is completely disabled
2641 * 1: Rx flow control is enabled (we can receive pause 1931 * 1: Rx flow control is enabled (we can receive pause
2642 * frames but not send pause frames). 1932 * frames but not send pause frames).
2643 * 2: Tx flow control is enabled (we can send pause frames 1933 * 2: Tx flow control is enabled (we can send pause frames
2644 * frames but we do not receive pause frames). 1934 * frames but we do not receive pause frames).
2645 * 3: Both Rx and TX flow control (symmetric) is enabled. 1935 * 3: Both Rx and TX flow control (symmetric) is enabled.
2646 * other: No other values should be possible at this point. 1936 * other: No other values should be possible at this point.
2647 */ 1937 */
2648 1938
2649 switch (hw->fc) { 1939 switch (hw->fc) {
2650 case E1000_FC_NONE: 1940 case E1000_FC_NONE:
2651 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 1941 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
2652 break; 1942 break;
2653 case E1000_FC_RX_PAUSE: 1943 case E1000_FC_RX_PAUSE:
2654 ctrl &= (~E1000_CTRL_TFCE); 1944 ctrl &= (~E1000_CTRL_TFCE);
2655 ctrl |= E1000_CTRL_RFCE; 1945 ctrl |= E1000_CTRL_RFCE;
2656 break; 1946 break;
2657 case E1000_FC_TX_PAUSE: 1947 case E1000_FC_TX_PAUSE:
2658 ctrl &= (~E1000_CTRL_RFCE); 1948 ctrl &= (~E1000_CTRL_RFCE);
2659 ctrl |= E1000_CTRL_TFCE; 1949 ctrl |= E1000_CTRL_TFCE;
2660 break; 1950 break;
2661 case E1000_FC_FULL: 1951 case E1000_FC_FULL:
2662 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 1952 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
2663 break; 1953 break;
2664 default: 1954 default:
2665 DEBUGOUT("Flow control param set incorrectly\n"); 1955 DEBUGOUT("Flow control param set incorrectly\n");
2666 return -E1000_ERR_CONFIG; 1956 return -E1000_ERR_CONFIG;
2667 } 1957 }
2668 1958
2669 /* Disable TX Flow Control for 82542 (rev 2.0) */ 1959 /* Disable TX Flow Control for 82542 (rev 2.0) */
2670 if (hw->mac_type == e1000_82542_rev2_0) 1960 if (hw->mac_type == e1000_82542_rev2_0)
2671 ctrl &= (~E1000_CTRL_TFCE); 1961 ctrl &= (~E1000_CTRL_TFCE);
2672 1962
2673 ew32(CTRL, ctrl); 1963 ew32(CTRL, ctrl);
2674 return E1000_SUCCESS; 1964 return E1000_SUCCESS;
2675} 1965}
2676 1966
2677/****************************************************************************** 1967/**
2678 * Configures flow control settings after link is established 1968 * e1000_config_fc_after_link_up - configure flow control after autoneg
2679 * 1969 * @hw: Struct containing variables accessed by shared code
2680 * hw - Struct containing variables accessed by shared code
2681 * 1970 *
1971 * Configures flow control settings after link is established
2682 * Should be called immediately after a valid link has been established. 1972 * Should be called immediately after a valid link has been established.
2683 * Forces MAC flow control settings if link was forced. When in MII/GMII mode 1973 * Forces MAC flow control settings if link was forced. When in MII/GMII mode
2684 * and autonegotiation is enabled, the MAC flow control settings will be set 1974 * and autonegotiation is enabled, the MAC flow control settings will be set
2685 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE 1975 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
2686 * and RFCE bits will be automaticaly set to the negotiated flow control mode. 1976 * and RFCE bits will be automatically set to the negotiated flow control mode.
2687 *****************************************************************************/ 1977 */
2688static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) 1978static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2689{ 1979{
2690 s32 ret_val; 1980 s32 ret_val;
2691 u16 mii_status_reg; 1981 u16 mii_status_reg;
2692 u16 mii_nway_adv_reg; 1982 u16 mii_nway_adv_reg;
2693 u16 mii_nway_lp_ability_reg; 1983 u16 mii_nway_lp_ability_reg;
2694 u16 speed; 1984 u16 speed;
2695 u16 duplex; 1985 u16 duplex;
2696 1986
2697 DEBUGFUNC("e1000_config_fc_after_link_up"); 1987 DEBUGFUNC("e1000_config_fc_after_link_up");
2698 1988
2699 /* Check for the case where we have fiber media and auto-neg failed 1989 /* Check for the case where we have fiber media and auto-neg failed
2700 * so we had to force link. In this case, we need to force the 1990 * so we had to force link. In this case, we need to force the
2701 * configuration of the MAC to match the "fc" parameter. 1991 * configuration of the MAC to match the "fc" parameter.
2702 */ 1992 */
2703 if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) || 1993 if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
2704 ((hw->media_type == e1000_media_type_internal_serdes) && 1994 || ((hw->media_type == e1000_media_type_internal_serdes)
2705 (hw->autoneg_failed)) || 1995 && (hw->autoneg_failed))
2706 ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) { 1996 || ((hw->media_type == e1000_media_type_copper)
2707 ret_val = e1000_force_mac_fc(hw); 1997 && (!hw->autoneg))) {
2708 if (ret_val) { 1998 ret_val = e1000_force_mac_fc(hw);
2709 DEBUGOUT("Error forcing flow control settings\n"); 1999 if (ret_val) {
2710 return ret_val; 2000 DEBUGOUT("Error forcing flow control settings\n");
2711 } 2001 return ret_val;
2712 } 2002 }
2713 2003 }
2714 /* Check for the case where we have copper media and auto-neg is 2004
2715 * enabled. In this case, we need to check and see if Auto-Neg 2005 /* Check for the case where we have copper media and auto-neg is
2716 * has completed, and if so, how the PHY and link partner has 2006 * enabled. In this case, we need to check and see if Auto-Neg
2717 * flow control configured. 2007 * has completed, and if so, how the PHY and link partner has
2718 */ 2008 * flow control configured.
2719 if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { 2009 */
2720 /* Read the MII Status Register and check to see if AutoNeg 2010 if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
2721 * has completed. We read this twice because this reg has 2011 /* Read the MII Status Register and check to see if AutoNeg
2722 * some "sticky" (latched) bits. 2012 * has completed. We read this twice because this reg has
2723 */ 2013 * some "sticky" (latched) bits.
2724 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2014 */
2725 if (ret_val) 2015 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2726 return ret_val; 2016 if (ret_val)
2727 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2017 return ret_val;
2728 if (ret_val) 2018 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2729 return ret_val; 2019 if (ret_val)
2730 2020 return ret_val;
2731 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { 2021
2732 /* The AutoNeg process has completed, so we now need to 2022 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
2733 * read both the Auto Negotiation Advertisement Register 2023 /* The AutoNeg process has completed, so we now need to
2734 * (Address 4) and the Auto_Negotiation Base Page Ability 2024 * read both the Auto Negotiation Advertisement Register
2735 * Register (Address 5) to determine how flow control was 2025 * (Address 4) and the Auto_Negotiation Base Page Ability
2736 * negotiated. 2026 * Register (Address 5) to determine how flow control was
2737 */ 2027 * negotiated.
2738 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 2028 */
2739 &mii_nway_adv_reg); 2029 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
2740 if (ret_val) 2030 &mii_nway_adv_reg);
2741 return ret_val; 2031 if (ret_val)
2742 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, 2032 return ret_val;
2743 &mii_nway_lp_ability_reg); 2033 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
2744 if (ret_val) 2034 &mii_nway_lp_ability_reg);
2745 return ret_val; 2035 if (ret_val)
2746 2036 return ret_val;
2747 /* Two bits in the Auto Negotiation Advertisement Register 2037
2748 * (Address 4) and two bits in the Auto Negotiation Base 2038 /* Two bits in the Auto Negotiation Advertisement Register
2749 * Page Ability Register (Address 5) determine flow control 2039 * (Address 4) and two bits in the Auto Negotiation Base
2750 * for both the PHY and the link partner. The following 2040 * Page Ability Register (Address 5) determine flow control
2751 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 2041 * for both the PHY and the link partner. The following
2752 * 1999, describes these PAUSE resolution bits and how flow 2042 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
2753 * control is determined based upon these settings. 2043 * 1999, describes these PAUSE resolution bits and how flow
2754 * NOTE: DC = Don't Care 2044 * control is determined based upon these settings.
2755 * 2045 * NOTE: DC = Don't Care
2756 * LOCAL DEVICE | LINK PARTNER 2046 *
2757 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 2047 * LOCAL DEVICE | LINK PARTNER
2758 *-------|---------|-------|---------|-------------------- 2048 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
2759 * 0 | 0 | DC | DC | E1000_FC_NONE 2049 *-------|---------|-------|---------|--------------------
2760 * 0 | 1 | 0 | DC | E1000_FC_NONE 2050 * 0 | 0 | DC | DC | E1000_FC_NONE
2761 * 0 | 1 | 1 | 0 | E1000_FC_NONE 2051 * 0 | 1 | 0 | DC | E1000_FC_NONE
2762 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE 2052 * 0 | 1 | 1 | 0 | E1000_FC_NONE
2763 * 1 | 0 | 0 | DC | E1000_FC_NONE 2053 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
2764 * 1 | DC | 1 | DC | E1000_FC_FULL 2054 * 1 | 0 | 0 | DC | E1000_FC_NONE
2765 * 1 | 1 | 0 | 0 | E1000_FC_NONE 2055 * 1 | DC | 1 | DC | E1000_FC_FULL
2766 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE 2056 * 1 | 1 | 0 | 0 | E1000_FC_NONE
2767 * 2057 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
2768 */ 2058 *
2769 /* Are both PAUSE bits set to 1? If so, this implies 2059 */
2770 * Symmetric Flow Control is enabled at both ends. The 2060 /* Are both PAUSE bits set to 1? If so, this implies
2771 * ASM_DIR bits are irrelevant per the spec. 2061 * Symmetric Flow Control is enabled at both ends. The
2772 * 2062 * ASM_DIR bits are irrelevant per the spec.
2773 * For Symmetric Flow Control: 2063 *
2774 * 2064 * For Symmetric Flow Control:
2775 * LOCAL DEVICE | LINK PARTNER 2065 *
2776 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2066 * LOCAL DEVICE | LINK PARTNER
2777 *-------|---------|-------|---------|-------------------- 2067 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2778 * 1 | DC | 1 | DC | E1000_FC_FULL 2068 *-------|---------|-------|---------|--------------------
2779 * 2069 * 1 | DC | 1 | DC | E1000_FC_FULL
2780 */ 2070 *
2781 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2071 */
2782 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 2072 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2783 /* Now we need to check if the user selected RX ONLY 2073 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
2784 * of pause frames. In this case, we had to advertise 2074 /* Now we need to check if the user selected RX ONLY
2785 * FULL flow control because we could not advertise RX 2075 * of pause frames. In this case, we had to advertise
2786 * ONLY. Hence, we must now check to see if we need to 2076 * FULL flow control because we could not advertise RX
2787 * turn OFF the TRANSMISSION of PAUSE frames. 2077 * ONLY. Hence, we must now check to see if we need to
2788 */ 2078 * turn OFF the TRANSMISSION of PAUSE frames.
2789 if (hw->original_fc == E1000_FC_FULL) { 2079 */
2790 hw->fc = E1000_FC_FULL; 2080 if (hw->original_fc == E1000_FC_FULL) {
2791 DEBUGOUT("Flow Control = FULL.\n"); 2081 hw->fc = E1000_FC_FULL;
2792 } else { 2082 DEBUGOUT("Flow Control = FULL.\n");
2793 hw->fc = E1000_FC_RX_PAUSE; 2083 } else {
2794 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2084 hw->fc = E1000_FC_RX_PAUSE;
2795 } 2085 DEBUGOUT
2796 } 2086 ("Flow Control = RX PAUSE frames only.\n");
2797 /* For receiving PAUSE frames ONLY. 2087 }
2798 * 2088 }
2799 * LOCAL DEVICE | LINK PARTNER 2089 /* For receiving PAUSE frames ONLY.
2800 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2090 *
2801 *-------|---------|-------|---------|-------------------- 2091 * LOCAL DEVICE | LINK PARTNER
2802 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE 2092 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2803 * 2093 *-------|---------|-------|---------|--------------------
2804 */ 2094 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
2805 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 2095 *
2806 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 2096 */
2807 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 2097 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2808 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 2098 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
2809 hw->fc = E1000_FC_TX_PAUSE; 2099 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
2810 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2100 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2811 } 2101 {
2812 /* For transmitting PAUSE frames ONLY. 2102 hw->fc = E1000_FC_TX_PAUSE;
2813 * 2103 DEBUGOUT
2814 * LOCAL DEVICE | LINK PARTNER 2104 ("Flow Control = TX PAUSE frames only.\n");
2815 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2105 }
2816 *-------|---------|-------|---------|-------------------- 2106 /* For transmitting PAUSE frames ONLY.
2817 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE 2107 *
2818 * 2108 * LOCAL DEVICE | LINK PARTNER
2819 */ 2109 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2820 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2110 *-------|---------|-------|---------|--------------------
2821 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 2111 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
2822 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 2112 *
2823 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 2113 */
2824 hw->fc = E1000_FC_RX_PAUSE; 2114 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2825 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2115 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
2826 } 2116 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
2827 /* Per the IEEE spec, at this point flow control should be 2117 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2828 * disabled. However, we want to consider that we could 2118 {
2829 * be connected to a legacy switch that doesn't advertise 2119 hw->fc = E1000_FC_RX_PAUSE;
2830 * desired flow control, but can be forced on the link 2120 DEBUGOUT
2831 * partner. So if we advertised no flow control, that is 2121 ("Flow Control = RX PAUSE frames only.\n");
2832 * what we will resolve to. If we advertised some kind of 2122 }
2833 * receive capability (Rx Pause Only or Full Flow Control) 2123 /* Per the IEEE spec, at this point flow control should be
2834 * and the link partner advertised none, we will configure 2124 * disabled. However, we want to consider that we could
2835 * ourselves to enable Rx Flow Control only. We can do 2125 * be connected to a legacy switch that doesn't advertise
2836 * this safely for two reasons: If the link partner really 2126 * desired flow control, but can be forced on the link
2837 * didn't want flow control enabled, and we enable Rx, no 2127 * partner. So if we advertised no flow control, that is
2838 * harm done since we won't be receiving any PAUSE frames 2128 * what we will resolve to. If we advertised some kind of
2839 * anyway. If the intent on the link partner was to have 2129 * receive capability (Rx Pause Only or Full Flow Control)
2840 * flow control enabled, then by us enabling RX only, we 2130 * and the link partner advertised none, we will configure
2841 * can at least receive pause frames and process them. 2131 * ourselves to enable Rx Flow Control only. We can do
2842 * This is a good idea because in most cases, since we are 2132 * this safely for two reasons: If the link partner really
2843 * predominantly a server NIC, more times than not we will 2133 * didn't want flow control enabled, and we enable Rx, no
2844 * be asked to delay transmission of packets than asking 2134 * harm done since we won't be receiving any PAUSE frames
2845 * our link partner to pause transmission of frames. 2135 * anyway. If the intent on the link partner was to have
2846 */ 2136 * flow control enabled, then by us enabling RX only, we
2847 else if ((hw->original_fc == E1000_FC_NONE || 2137 * can at least receive pause frames and process them.
2848 hw->original_fc == E1000_FC_TX_PAUSE) || 2138 * This is a good idea because in most cases, since we are
2849 hw->fc_strict_ieee) { 2139 * predominantly a server NIC, more times than not we will
2850 hw->fc = E1000_FC_NONE; 2140 * be asked to delay transmission of packets than asking
2851 DEBUGOUT("Flow Control = NONE.\n"); 2141 * our link partner to pause transmission of frames.
2852 } else { 2142 */
2853 hw->fc = E1000_FC_RX_PAUSE; 2143 else if ((hw->original_fc == E1000_FC_NONE ||
2854 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2144 hw->original_fc == E1000_FC_TX_PAUSE) ||
2855 } 2145 hw->fc_strict_ieee) {
2856 2146 hw->fc = E1000_FC_NONE;
2857 /* Now we need to do one last check... If we auto- 2147 DEBUGOUT("Flow Control = NONE.\n");
2858 * negotiated to HALF DUPLEX, flow control should not be 2148 } else {
2859 * enabled per IEEE 802.3 spec. 2149 hw->fc = E1000_FC_RX_PAUSE;
2860 */ 2150 DEBUGOUT
2861 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 2151 ("Flow Control = RX PAUSE frames only.\n");
2862 if (ret_val) { 2152 }
2863 DEBUGOUT("Error getting link speed and duplex\n"); 2153
2864 return ret_val; 2154 /* Now we need to do one last check... If we auto-
2865 } 2155 * negotiated to HALF DUPLEX, flow control should not be
2866 2156 * enabled per IEEE 802.3 spec.
2867 if (duplex == HALF_DUPLEX) 2157 */
2868 hw->fc = E1000_FC_NONE; 2158 ret_val =
2869 2159 e1000_get_speed_and_duplex(hw, &speed, &duplex);
2870 /* Now we call a subroutine to actually force the MAC 2160 if (ret_val) {
2871 * controller to use the correct flow control settings. 2161 DEBUGOUT
2872 */ 2162 ("Error getting link speed and duplex\n");
2873 ret_val = e1000_force_mac_fc(hw); 2163 return ret_val;
2874 if (ret_val) { 2164 }
2875 DEBUGOUT("Error forcing flow control settings\n"); 2165
2876 return ret_val; 2166 if (duplex == HALF_DUPLEX)
2877 } 2167 hw->fc = E1000_FC_NONE;
2878 } else { 2168
2879 DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); 2169 /* Now we call a subroutine to actually force the MAC
2880 } 2170 * controller to use the correct flow control settings.
2881 } 2171 */
2882 return E1000_SUCCESS; 2172 ret_val = e1000_force_mac_fc(hw);
2173 if (ret_val) {
2174 DEBUGOUT
2175 ("Error forcing flow control settings\n");
2176 return ret_val;
2177 }
2178 } else {
2179 DEBUGOUT
2180 ("Copper PHY and Auto Neg has not completed.\n");
2181 }
2182 }
2183 return E1000_SUCCESS;
2883} 2184}
2884 2185
2885/****************************************************************************** 2186/**
2886 * Checks to see if the link status of the hardware has changed. 2187 * e1000_check_for_serdes_link_generic - Check for link (Serdes)
2188 * @hw: pointer to the HW structure
2887 * 2189 *
2888 * hw - Struct containing variables accessed by shared code 2190 * Checks for link up on the hardware. If link is not up and we have
2191 * a signal, then we need to force link up.
2192 */
2193static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2194{
2195 u32 rxcw;
2196 u32 ctrl;
2197 u32 status;
2198 s32 ret_val = E1000_SUCCESS;
2199
2200 DEBUGFUNC("e1000_check_for_serdes_link_generic");
2201
2202 ctrl = er32(CTRL);
2203 status = er32(STATUS);
2204 rxcw = er32(RXCW);
2205
2206 /*
2207 * If we don't have link (auto-negotiation failed or link partner
2208 * cannot auto-negotiate), and our link partner is not trying to
2209 * auto-negotiate with us (we are receiving idles or data),
2210 * we need to force link up. We also need to give auto-negotiation
2211 * time to complete.
2212 */
2213 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
2214 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
2215 if (hw->autoneg_failed == 0) {
2216 hw->autoneg_failed = 1;
2217 goto out;
2218 }
2219 DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
2220
2221 /* Disable auto-negotiation in the TXCW register */
2222 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
2223
2224 /* Force link-up and also force full-duplex. */
2225 ctrl = er32(CTRL);
2226 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
2227 ew32(CTRL, ctrl);
2228
2229 /* Configure Flow Control after forcing link up. */
2230 ret_val = e1000_config_fc_after_link_up(hw);
2231 if (ret_val) {
2232 DEBUGOUT("Error configuring flow control\n");
2233 goto out;
2234 }
2235 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
2236 /*
2237 * If we are forcing link and we are receiving /C/ ordered
2238 * sets, re-enable auto-negotiation in the TXCW register
2239 * and disable forced link in the Device Control register
2240 * in an attempt to auto-negotiate with our link partner.
2241 */
2242 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
2243 ew32(TXCW, hw->txcw);
2244 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
2245
2246 hw->serdes_has_link = true;
2247 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
2248 /*
2249 * If we force link for non-auto-negotiation switch, check
2250 * link status based on MAC synchronization for internal
2251 * serdes media type.
2252 */
2253 /* SYNCH bit and IV bit are sticky. */
2254 udelay(10);
2255 rxcw = er32(RXCW);
2256 if (rxcw & E1000_RXCW_SYNCH) {
2257 if (!(rxcw & E1000_RXCW_IV)) {
2258 hw->serdes_has_link = true;
2259 DEBUGOUT("SERDES: Link up - forced.\n");
2260 }
2261 } else {
2262 hw->serdes_has_link = false;
2263 DEBUGOUT("SERDES: Link down - force failed.\n");
2264 }
2265 }
2266
2267 if (E1000_TXCW_ANE & er32(TXCW)) {
2268 status = er32(STATUS);
2269 if (status & E1000_STATUS_LU) {
2270 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
2271 udelay(10);
2272 rxcw = er32(RXCW);
2273 if (rxcw & E1000_RXCW_SYNCH) {
2274 if (!(rxcw & E1000_RXCW_IV)) {
2275 hw->serdes_has_link = true;
2276 DEBUGOUT("SERDES: Link up - autoneg "
2277 "completed successfully.\n");
2278 } else {
2279 hw->serdes_has_link = false;
2280 DEBUGOUT("SERDES: Link down - invalid"
2281 "codewords detected in autoneg.\n");
2282 }
2283 } else {
2284 hw->serdes_has_link = false;
2285 DEBUGOUT("SERDES: Link down - no sync.\n");
2286 }
2287 } else {
2288 hw->serdes_has_link = false;
2289 DEBUGOUT("SERDES: Link down - autoneg failed\n");
2290 }
2291 }
2292
2293 out:
2294 return ret_val;
2295}
2296
2297/**
2298 * e1000_check_for_link
2299 * @hw: Struct containing variables accessed by shared code
2889 * 2300 *
2301 * Checks to see if the link status of the hardware has changed.
2890 * Called by any function that needs to check the link status of the adapter. 2302 * Called by any function that needs to check the link status of the adapter.
2891 *****************************************************************************/ 2303 */
2892s32 e1000_check_for_link(struct e1000_hw *hw) 2304s32 e1000_check_for_link(struct e1000_hw *hw)
2893{ 2305{
2894 u32 rxcw = 0; 2306 u32 rxcw = 0;
2895 u32 ctrl; 2307 u32 ctrl;
2896 u32 status; 2308 u32 status;
2897 u32 rctl; 2309 u32 rctl;
2898 u32 icr; 2310 u32 icr;
2899 u32 signal = 0; 2311 u32 signal = 0;
2900 s32 ret_val; 2312 s32 ret_val;
2901 u16 phy_data; 2313 u16 phy_data;
2902 2314
2903 DEBUGFUNC("e1000_check_for_link"); 2315 DEBUGFUNC("e1000_check_for_link");
2904 2316
2905 ctrl = er32(CTRL); 2317 ctrl = er32(CTRL);
2906 status = er32(STATUS); 2318 status = er32(STATUS);
2907 2319
2908 /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be 2320 /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
2909 * set when the optics detect a signal. On older adapters, it will be 2321 * set when the optics detect a signal. On older adapters, it will be
2910 * cleared when there is a signal. This applies to fiber media only. 2322 * cleared when there is a signal. This applies to fiber media only.
2911 */ 2323 */
2912 if ((hw->media_type == e1000_media_type_fiber) || 2324 if ((hw->media_type == e1000_media_type_fiber) ||
2913 (hw->media_type == e1000_media_type_internal_serdes)) { 2325 (hw->media_type == e1000_media_type_internal_serdes)) {
2914 rxcw = er32(RXCW); 2326 rxcw = er32(RXCW);
2915 2327
2916 if (hw->media_type == e1000_media_type_fiber) { 2328 if (hw->media_type == e1000_media_type_fiber) {
2917 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 2329 signal =
2918 if (status & E1000_STATUS_LU) 2330 (hw->mac_type >
2919 hw->get_link_status = false; 2331 e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
2920 } 2332 if (status & E1000_STATUS_LU)
2921 } 2333 hw->get_link_status = false;
2922 2334 }
2923 /* If we have a copper PHY then we only want to go out to the PHY 2335 }
2924 * registers to see if Auto-Neg has completed and/or if our link 2336
2925 * status has changed. The get_link_status flag will be set if we 2337 /* If we have a copper PHY then we only want to go out to the PHY
2926 * receive a Link Status Change interrupt or we have Rx Sequence 2338 * registers to see if Auto-Neg has completed and/or if our link
2927 * Errors. 2339 * status has changed. The get_link_status flag will be set if we
2928 */ 2340 * receive a Link Status Change interrupt or we have Rx Sequence
2929 if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { 2341 * Errors.
2930 /* First we want to see if the MII Status Register reports 2342 */
2931 * link. If so, then we want to get the current speed/duplex 2343 if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
2932 * of the PHY. 2344 /* First we want to see if the MII Status Register reports
2933 * Read the register twice since the link bit is sticky. 2345 * link. If so, then we want to get the current speed/duplex
2934 */ 2346 * of the PHY.
2935 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2347 * Read the register twice since the link bit is sticky.
2936 if (ret_val) 2348 */
2937 return ret_val; 2349 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2938 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2350 if (ret_val)
2939 if (ret_val) 2351 return ret_val;
2940 return ret_val; 2352 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2941 2353 if (ret_val)
2942 if (phy_data & MII_SR_LINK_STATUS) { 2354 return ret_val;
2943 hw->get_link_status = false; 2355
2944 /* Check if there was DownShift, must be checked immediately after 2356 if (phy_data & MII_SR_LINK_STATUS) {
2945 * link-up */ 2357 hw->get_link_status = false;
2946 e1000_check_downshift(hw); 2358 /* Check if there was DownShift, must be checked immediately after
2947 2359 * link-up */
2948 /* If we are on 82544 or 82543 silicon and speed/duplex 2360 e1000_check_downshift(hw);
2949 * are forced to 10H or 10F, then we will implement the polarity 2361
2950 * reversal workaround. We disable interrupts first, and upon 2362 /* If we are on 82544 or 82543 silicon and speed/duplex
2951 * returning, place the devices interrupt state to its previous 2363 * are forced to 10H or 10F, then we will implement the polarity
2952 * value except for the link status change interrupt which will 2364 * reversal workaround. We disable interrupts first, and upon
2953 * happen due to the execution of this workaround. 2365 * returning, place the devices interrupt state to its previous
2954 */ 2366 * value except for the link status change interrupt which will
2955 2367 * happen due to the execution of this workaround.
2956 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && 2368 */
2957 (!hw->autoneg) && 2369
2958 (hw->forced_speed_duplex == e1000_10_full || 2370 if ((hw->mac_type == e1000_82544
2959 hw->forced_speed_duplex == e1000_10_half)) { 2371 || hw->mac_type == e1000_82543) && (!hw->autoneg)
2960 ew32(IMC, 0xffffffff); 2372 && (hw->forced_speed_duplex == e1000_10_full
2961 ret_val = e1000_polarity_reversal_workaround(hw); 2373 || hw->forced_speed_duplex == e1000_10_half)) {
2962 icr = er32(ICR); 2374 ew32(IMC, 0xffffffff);
2963 ew32(ICS, (icr & ~E1000_ICS_LSC)); 2375 ret_val =
2964 ew32(IMS, IMS_ENABLE_MASK); 2376 e1000_polarity_reversal_workaround(hw);
2965 } 2377 icr = er32(ICR);
2966 2378 ew32(ICS, (icr & ~E1000_ICS_LSC));
2967 } else { 2379 ew32(IMS, IMS_ENABLE_MASK);
2968 /* No link detected */ 2380 }
2969 e1000_config_dsp_after_link_change(hw, false); 2381
2970 return 0; 2382 } else {
2971 } 2383 /* No link detected */
2972 2384 e1000_config_dsp_after_link_change(hw, false);
2973 /* If we are forcing speed/duplex, then we simply return since 2385 return 0;
2974 * we have already determined whether we have link or not. 2386 }
2975 */ 2387
2976 if (!hw->autoneg) return -E1000_ERR_CONFIG; 2388 /* If we are forcing speed/duplex, then we simply return since
2977 2389 * we have already determined whether we have link or not.
2978 /* optimize the dsp settings for the igp phy */ 2390 */
2979 e1000_config_dsp_after_link_change(hw, true); 2391 if (!hw->autoneg)
2980 2392 return -E1000_ERR_CONFIG;
2981 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we 2393
2982 * have Si on board that is 82544 or newer, Auto 2394 /* optimize the dsp settings for the igp phy */
2983 * Speed Detection takes care of MAC speed/duplex 2395 e1000_config_dsp_after_link_change(hw, true);
2984 * configuration. So we only need to configure Collision 2396
2985 * Distance in the MAC. Otherwise, we need to force 2397 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
2986 * speed/duplex on the MAC to the current PHY speed/duplex 2398 * have Si on board that is 82544 or newer, Auto
2987 * settings. 2399 * Speed Detection takes care of MAC speed/duplex
2988 */ 2400 * configuration. So we only need to configure Collision
2989 if (hw->mac_type >= e1000_82544) 2401 * Distance in the MAC. Otherwise, we need to force
2990 e1000_config_collision_dist(hw); 2402 * speed/duplex on the MAC to the current PHY speed/duplex
2991 else { 2403 * settings.
2992 ret_val = e1000_config_mac_to_phy(hw); 2404 */
2993 if (ret_val) { 2405 if (hw->mac_type >= e1000_82544)
2994 DEBUGOUT("Error configuring MAC to PHY settings\n"); 2406 e1000_config_collision_dist(hw);
2995 return ret_val; 2407 else {
2996 } 2408 ret_val = e1000_config_mac_to_phy(hw);
2997 } 2409 if (ret_val) {
2998 2410 DEBUGOUT
2999 /* Configure Flow Control now that Auto-Neg has completed. First, we 2411 ("Error configuring MAC to PHY settings\n");
3000 * need to restore the desired flow control settings because we may 2412 return ret_val;
3001 * have had to re-autoneg with a different link partner. 2413 }
3002 */ 2414 }
3003 ret_val = e1000_config_fc_after_link_up(hw); 2415
3004 if (ret_val) { 2416 /* Configure Flow Control now that Auto-Neg has completed. First, we
3005 DEBUGOUT("Error configuring flow control\n"); 2417 * need to restore the desired flow control settings because we may
3006 return ret_val; 2418 * have had to re-autoneg with a different link partner.
3007 } 2419 */
3008 2420 ret_val = e1000_config_fc_after_link_up(hw);
3009 /* At this point we know that we are on copper and we have 2421 if (ret_val) {
3010 * auto-negotiated link. These are conditions for checking the link 2422 DEBUGOUT("Error configuring flow control\n");
3011 * partner capability register. We use the link speed to determine if 2423 return ret_val;
3012 * TBI compatibility needs to be turned on or off. If the link is not 2424 }
3013 * at gigabit speed, then TBI compatibility is not needed. If we are 2425
3014 * at gigabit speed, we turn on TBI compatibility. 2426 /* At this point we know that we are on copper and we have
3015 */ 2427 * auto-negotiated link. These are conditions for checking the link
3016 if (hw->tbi_compatibility_en) { 2428 * partner capability register. We use the link speed to determine if
3017 u16 speed, duplex; 2429 * TBI compatibility needs to be turned on or off. If the link is not
3018 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 2430 * at gigabit speed, then TBI compatibility is not needed. If we are
3019 if (ret_val) { 2431 * at gigabit speed, we turn on TBI compatibility.
3020 DEBUGOUT("Error getting link speed and duplex\n"); 2432 */
3021 return ret_val; 2433 if (hw->tbi_compatibility_en) {
3022 } 2434 u16 speed, duplex;
3023 if (speed != SPEED_1000) { 2435 ret_val =
3024 /* If link speed is not set to gigabit speed, we do not need 2436 e1000_get_speed_and_duplex(hw, &speed, &duplex);
3025 * to enable TBI compatibility. 2437 if (ret_val) {
3026 */ 2438 DEBUGOUT
3027 if (hw->tbi_compatibility_on) { 2439 ("Error getting link speed and duplex\n");
3028 /* If we previously were in the mode, turn it off. */ 2440 return ret_val;
3029 rctl = er32(RCTL); 2441 }
3030 rctl &= ~E1000_RCTL_SBP; 2442 if (speed != SPEED_1000) {
3031 ew32(RCTL, rctl); 2443 /* If link speed is not set to gigabit speed, we do not need
3032 hw->tbi_compatibility_on = false; 2444 * to enable TBI compatibility.
3033 } 2445 */
3034 } else { 2446 if (hw->tbi_compatibility_on) {
3035 /* If TBI compatibility is was previously off, turn it on. For 2447 /* If we previously were in the mode, turn it off. */
3036 * compatibility with a TBI link partner, we will store bad 2448 rctl = er32(RCTL);
3037 * packets. Some frames have an additional byte on the end and 2449 rctl &= ~E1000_RCTL_SBP;
3038 * will look like CRC errors to the hardware. 2450 ew32(RCTL, rctl);
3039 */ 2451 hw->tbi_compatibility_on = false;
3040 if (!hw->tbi_compatibility_on) { 2452 }
3041 hw->tbi_compatibility_on = true; 2453 } else {
3042 rctl = er32(RCTL); 2454 /* If TBI compatibility is was previously off, turn it on. For
3043 rctl |= E1000_RCTL_SBP; 2455 * compatibility with a TBI link partner, we will store bad
3044 ew32(RCTL, rctl); 2456 * packets. Some frames have an additional byte on the end and
3045 } 2457 * will look like CRC errors to to the hardware.
3046 } 2458 */
3047 } 2459 if (!hw->tbi_compatibility_on) {
3048 } 2460 hw->tbi_compatibility_on = true;
3049 /* If we don't have link (auto-negotiation failed or link partner cannot 2461 rctl = er32(RCTL);
3050 * auto-negotiate), the cable is plugged in (we have signal), and our 2462 rctl |= E1000_RCTL_SBP;
3051 * link partner is not trying to auto-negotiate with us (we are receiving 2463 ew32(RCTL, rctl);
3052 * idles or data), we need to force link up. We also need to give 2464 }
3053 * auto-negotiation time to complete, in case the cable was just plugged 2465 }
3054 * in. The autoneg_failed flag does this. 2466 }
3055 */ 2467 }
3056 else if ((((hw->media_type == e1000_media_type_fiber) && 2468
3057 ((ctrl & E1000_CTRL_SWDPIN1) == signal)) || 2469 if ((hw->media_type == e1000_media_type_fiber) ||
3058 (hw->media_type == e1000_media_type_internal_serdes)) && 2470 (hw->media_type == e1000_media_type_internal_serdes))
3059 (!(status & E1000_STATUS_LU)) && 2471 e1000_check_for_serdes_link_generic(hw);
3060 (!(rxcw & E1000_RXCW_C))) { 2472
3061 if (hw->autoneg_failed == 0) { 2473 return E1000_SUCCESS;
3062 hw->autoneg_failed = 1;
3063 return 0;
3064 }
3065 DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
3066
3067 /* Disable auto-negotiation in the TXCW register */
3068 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
3069
3070 /* Force link-up and also force full-duplex. */
3071 ctrl = er32(CTRL);
3072 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
3073 ew32(CTRL, ctrl);
3074
3075 /* Configure Flow Control after forcing link up. */
3076 ret_val = e1000_config_fc_after_link_up(hw);
3077 if (ret_val) {
3078 DEBUGOUT("Error configuring flow control\n");
3079 return ret_val;
3080 }
3081 }
3082 /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
3083 * auto-negotiation in the TXCW register and disable forced link in the
3084 * Device Control register in an attempt to auto-negotiate with our link
3085 * partner.
3086 */
3087 else if (((hw->media_type == e1000_media_type_fiber) ||
3088 (hw->media_type == e1000_media_type_internal_serdes)) &&
3089 (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
3090 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
3091 ew32(TXCW, hw->txcw);
3092 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
3093
3094 hw->serdes_link_down = false;
3095 }
3096 /* If we force link for non-auto-negotiation switch, check link status
3097 * based on MAC synchronization for internal serdes media type.
3098 */
3099 else if ((hw->media_type == e1000_media_type_internal_serdes) &&
3100 !(E1000_TXCW_ANE & er32(TXCW))) {
3101 /* SYNCH bit and IV bit are sticky. */
3102 udelay(10);
3103 if (E1000_RXCW_SYNCH & er32(RXCW)) {
3104 if (!(rxcw & E1000_RXCW_IV)) {
3105 hw->serdes_link_down = false;
3106 DEBUGOUT("SERDES: Link is up.\n");
3107 }
3108 } else {
3109 hw->serdes_link_down = true;
3110 DEBUGOUT("SERDES: Link is down.\n");
3111 }
3112 }
3113 if ((hw->media_type == e1000_media_type_internal_serdes) &&
3114 (E1000_TXCW_ANE & er32(TXCW))) {
3115 hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
3116 }
3117 return E1000_SUCCESS;
3118} 2474}
3119 2475
3120/****************************************************************************** 2476/**
2477 * e1000_get_speed_and_duplex
2478 * @hw: Struct containing variables accessed by shared code
2479 * @speed: Speed of the connection
2480 * @duplex: Duplex setting of the connection
2481
3121 * Detects the current speed and duplex settings of the hardware. 2482 * Detects the current speed and duplex settings of the hardware.
3122 * 2483 */
3123 * hw - Struct containing variables accessed by shared code
3124 * speed - Speed of the connection
3125 * duplex - Duplex setting of the connection
3126 *****************************************************************************/
3127s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) 2484s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
3128{ 2485{
3129 u32 status; 2486 u32 status;
3130 s32 ret_val; 2487 s32 ret_val;
3131 u16 phy_data; 2488 u16 phy_data;
3132 2489
3133 DEBUGFUNC("e1000_get_speed_and_duplex"); 2490 DEBUGFUNC("e1000_get_speed_and_duplex");
3134 2491
3135 if (hw->mac_type >= e1000_82543) { 2492 if (hw->mac_type >= e1000_82543) {
3136 status = er32(STATUS); 2493 status = er32(STATUS);
3137 if (status & E1000_STATUS_SPEED_1000) { 2494 if (status & E1000_STATUS_SPEED_1000) {
3138 *speed = SPEED_1000; 2495 *speed = SPEED_1000;
3139 DEBUGOUT("1000 Mbs, "); 2496 DEBUGOUT("1000 Mbs, ");
3140 } else if (status & E1000_STATUS_SPEED_100) { 2497 } else if (status & E1000_STATUS_SPEED_100) {
3141 *speed = SPEED_100; 2498 *speed = SPEED_100;
3142 DEBUGOUT("100 Mbs, "); 2499 DEBUGOUT("100 Mbs, ");
3143 } else { 2500 } else {
3144 *speed = SPEED_10; 2501 *speed = SPEED_10;
3145 DEBUGOUT("10 Mbs, "); 2502 DEBUGOUT("10 Mbs, ");
3146 } 2503 }
3147 2504
3148 if (status & E1000_STATUS_FD) { 2505 if (status & E1000_STATUS_FD) {
3149 *duplex = FULL_DUPLEX; 2506 *duplex = FULL_DUPLEX;
3150 DEBUGOUT("Full Duplex\n"); 2507 DEBUGOUT("Full Duplex\n");
3151 } else { 2508 } else {
3152 *duplex = HALF_DUPLEX; 2509 *duplex = HALF_DUPLEX;
3153 DEBUGOUT(" Half Duplex\n"); 2510 DEBUGOUT(" Half Duplex\n");
3154 } 2511 }
3155 } else { 2512 } else {
3156 DEBUGOUT("1000 Mbs, Full Duplex\n"); 2513 DEBUGOUT("1000 Mbs, Full Duplex\n");
3157 *speed = SPEED_1000; 2514 *speed = SPEED_1000;
3158 *duplex = FULL_DUPLEX; 2515 *duplex = FULL_DUPLEX;
3159 } 2516 }
3160 2517
3161 /* IGP01 PHY may advertise full duplex operation after speed downgrade even 2518 /* IGP01 PHY may advertise full duplex operation after speed downgrade even
3162 * if it is operating at half duplex. Here we set the duplex settings to 2519 * if it is operating at half duplex. Here we set the duplex settings to
3163 * match the duplex in the link partner's capabilities. 2520 * match the duplex in the link partner's capabilities.
3164 */ 2521 */
3165 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { 2522 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
3166 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); 2523 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
3167 if (ret_val) 2524 if (ret_val)
3168 return ret_val; 2525 return ret_val;
3169 2526
3170 if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) 2527 if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
3171 *duplex = HALF_DUPLEX; 2528 *duplex = HALF_DUPLEX;
3172 else { 2529 else {
3173 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); 2530 ret_val =
3174 if (ret_val) 2531 e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
3175 return ret_val; 2532 if (ret_val)
3176 if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || 2533 return ret_val;
3177 (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) 2534 if ((*speed == SPEED_100
3178 *duplex = HALF_DUPLEX; 2535 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
3179 } 2536 || (*speed == SPEED_10
3180 } 2537 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
3181 2538 *duplex = HALF_DUPLEX;
3182 if ((hw->mac_type == e1000_80003es2lan) && 2539 }
3183 (hw->media_type == e1000_media_type_copper)) { 2540 }
3184 if (*speed == SPEED_1000) 2541
3185 ret_val = e1000_configure_kmrn_for_1000(hw); 2542 return E1000_SUCCESS;
3186 else
3187 ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
3188 if (ret_val)
3189 return ret_val;
3190 }
3191
3192 if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
3193 ret_val = e1000_kumeran_lock_loss_workaround(hw);
3194 if (ret_val)
3195 return ret_val;
3196 }
3197
3198 return E1000_SUCCESS;
3199} 2543}
3200 2544
3201/****************************************************************************** 2545/**
3202* Blocks until autoneg completes or times out (~4.5 seconds) 2546 * e1000_wait_autoneg
3203* 2547 * @hw: Struct containing variables accessed by shared code
3204* hw - Struct containing variables accessed by shared code 2548 *
3205******************************************************************************/ 2549 * Blocks until autoneg completes or times out (~4.5 seconds)
2550 */
3206static s32 e1000_wait_autoneg(struct e1000_hw *hw) 2551static s32 e1000_wait_autoneg(struct e1000_hw *hw)
3207{ 2552{
3208 s32 ret_val; 2553 s32 ret_val;
3209 u16 i; 2554 u16 i;
3210 u16 phy_data; 2555 u16 phy_data;
3211 2556
3212 DEBUGFUNC("e1000_wait_autoneg"); 2557 DEBUGFUNC("e1000_wait_autoneg");
3213 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 2558 DEBUGOUT("Waiting for Auto-Neg to complete.\n");
3214 2559
3215 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 2560 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
3216 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { 2561 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
3217 /* Read the MII Status Register and wait for Auto-Neg 2562 /* Read the MII Status Register and wait for Auto-Neg
3218 * Complete bit to be set. 2563 * Complete bit to be set.
3219 */ 2564 */
3220 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2565 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3221 if (ret_val) 2566 if (ret_val)
3222 return ret_val; 2567 return ret_val;
3223 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2568 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3224 if (ret_val) 2569 if (ret_val)
3225 return ret_val; 2570 return ret_val;
3226 if (phy_data & MII_SR_AUTONEG_COMPLETE) { 2571 if (phy_data & MII_SR_AUTONEG_COMPLETE) {
3227 return E1000_SUCCESS; 2572 return E1000_SUCCESS;
3228 } 2573 }
3229 msleep(100); 2574 msleep(100);
3230 } 2575 }
3231 return E1000_SUCCESS; 2576 return E1000_SUCCESS;
3232} 2577}
3233 2578
3234/****************************************************************************** 2579/**
3235* Raises the Management Data Clock 2580 * e1000_raise_mdi_clk - Raises the Management Data Clock
3236* 2581 * @hw: Struct containing variables accessed by shared code
3237* hw - Struct containing variables accessed by shared code 2582 * @ctrl: Device control register's current value
3238* ctrl - Device control register's current value 2583 */
3239******************************************************************************/
3240static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) 2584static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
3241{ 2585{
3242 /* Raise the clock input to the Management Data Clock (by setting the MDC 2586 /* Raise the clock input to the Management Data Clock (by setting the MDC
3243 * bit), and then delay 10 microseconds. 2587 * bit), and then delay 10 microseconds.
3244 */ 2588 */
3245 ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); 2589 ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
3246 E1000_WRITE_FLUSH(); 2590 E1000_WRITE_FLUSH();
3247 udelay(10); 2591 udelay(10);
3248} 2592}
3249 2593
3250/****************************************************************************** 2594/**
3251* Lowers the Management Data Clock 2595 * e1000_lower_mdi_clk - Lowers the Management Data Clock
3252* 2596 * @hw: Struct containing variables accessed by shared code
3253* hw - Struct containing variables accessed by shared code 2597 * @ctrl: Device control register's current value
3254* ctrl - Device control register's current value 2598 */
3255******************************************************************************/
3256static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) 2599static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
3257{ 2600{
3258 /* Lower the clock input to the Management Data Clock (by clearing the MDC 2601 /* Lower the clock input to the Management Data Clock (by clearing the MDC
3259 * bit), and then delay 10 microseconds. 2602 * bit), and then delay 10 microseconds.
3260 */ 2603 */
3261 ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); 2604 ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
3262 E1000_WRITE_FLUSH(); 2605 E1000_WRITE_FLUSH();
3263 udelay(10); 2606 udelay(10);
3264} 2607}
3265 2608
3266/****************************************************************************** 2609/**
3267* Shifts data bits out to the PHY 2610 * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY
3268* 2611 * @hw: Struct containing variables accessed by shared code
3269* hw - Struct containing variables accessed by shared code 2612 * @data: Data to send out to the PHY
3270* data - Data to send out to the PHY 2613 * @count: Number of bits to shift out
3271* count - Number of bits to shift out 2614 *
3272* 2615 * Bits are shifted out in MSB to LSB order.
3273* Bits are shifted out in MSB to LSB order. 2616 */
3274******************************************************************************/
3275static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) 2617static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
3276{ 2618{
3277 u32 ctrl; 2619 u32 ctrl;
3278 u32 mask; 2620 u32 mask;
3279
3280 /* We need to shift "count" number of bits out to the PHY. So, the value
3281 * in the "data" parameter will be shifted out to the PHY one bit at a
3282 * time. In order to do this, "data" must be broken down into bits.
3283 */
3284 mask = 0x01;
3285 mask <<= (count - 1);
3286
3287 ctrl = er32(CTRL);
3288
3289 /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
3290 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
3291
3292 while (mask) {
3293 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
3294 * then raising and lowering the Management Data Clock. A "0" is
3295 * shifted out to the PHY by setting the MDIO bit to "0" and then
3296 * raising and lowering the clock.
3297 */
3298 if (data & mask)
3299 ctrl |= E1000_CTRL_MDIO;
3300 else
3301 ctrl &= ~E1000_CTRL_MDIO;
3302
3303 ew32(CTRL, ctrl);
3304 E1000_WRITE_FLUSH();
3305
3306 udelay(10);
3307
3308 e1000_raise_mdi_clk(hw, &ctrl);
3309 e1000_lower_mdi_clk(hw, &ctrl);
3310
3311 mask = mask >> 1;
3312 }
3313}
3314
3315/******************************************************************************
3316* Shifts data bits in from the PHY
3317*
3318* hw - Struct containing variables accessed by shared code
3319*
3320* Bits are shifted in in MSB to LSB order.
3321******************************************************************************/
3322static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3323{
3324 u32 ctrl;
3325 u16 data = 0;
3326 u8 i;
3327
3328 /* In order to read a register from the PHY, we need to shift in a total
3329 * of 18 bits from the PHY. The first two bit (turnaround) times are used
3330 * to avoid contention on the MDIO pin when a read operation is performed.
3331 * These two bits are ignored by us and thrown away. Bits are "shifted in"
3332 * by raising the input to the Management Data Clock (setting the MDC bit),
3333 * and then reading the value of the MDIO bit.
3334 */
3335 ctrl = er32(CTRL);
3336
3337 /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
3338 ctrl &= ~E1000_CTRL_MDIO_DIR;
3339 ctrl &= ~E1000_CTRL_MDIO;
3340
3341 ew32(CTRL, ctrl);
3342 E1000_WRITE_FLUSH();
3343
3344 /* Raise and Lower the clock before reading in the data. This accounts for
3345 * the turnaround bits. The first clock occurred when we clocked out the
3346 * last bit of the Register Address.
3347 */
3348 e1000_raise_mdi_clk(hw, &ctrl);
3349 e1000_lower_mdi_clk(hw, &ctrl);
3350
3351 for (data = 0, i = 0; i < 16; i++) {
3352 data = data << 1;
3353 e1000_raise_mdi_clk(hw, &ctrl);
3354 ctrl = er32(CTRL);
3355 /* Check to see if we shifted in a "1". */
3356 if (ctrl & E1000_CTRL_MDIO)
3357 data |= 1;
3358 e1000_lower_mdi_clk(hw, &ctrl);
3359 }
3360
3361 e1000_raise_mdi_clk(hw, &ctrl);
3362 e1000_lower_mdi_clk(hw, &ctrl);
3363
3364 return data;
3365}
3366
3367static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
3368{
3369 u32 swfw_sync = 0;
3370 u32 swmask = mask;
3371 u32 fwmask = mask << 16;
3372 s32 timeout = 200;
3373 2621
3374 DEBUGFUNC("e1000_swfw_sync_acquire"); 2622 /* We need to shift "count" number of bits out to the PHY. So, the value
3375 2623 * in the "data" parameter will be shifted out to the PHY one bit at a
3376 if (hw->swfwhw_semaphore_present) 2624 * time. In order to do this, "data" must be broken down into bits.
3377 return e1000_get_software_flag(hw); 2625 */
2626 mask = 0x01;
2627 mask <<= (count - 1);
3378 2628
3379 if (!hw->swfw_sync_present) 2629 ctrl = er32(CTRL);
3380 return e1000_get_hw_eeprom_semaphore(hw);
3381 2630
3382 while (timeout) { 2631 /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
3383 if (e1000_get_hw_eeprom_semaphore(hw)) 2632 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
3384 return -E1000_ERR_SWFW_SYNC;
3385 2633
3386 swfw_sync = er32(SW_FW_SYNC); 2634 while (mask) {
3387 if (!(swfw_sync & (fwmask | swmask))) { 2635 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
3388 break; 2636 * then raising and lowering the Management Data Clock. A "0" is
3389 } 2637 * shifted out to the PHY by setting the MDIO bit to "0" and then
2638 * raising and lowering the clock.
2639 */
2640 if (data & mask)
2641 ctrl |= E1000_CTRL_MDIO;
2642 else
2643 ctrl &= ~E1000_CTRL_MDIO;
3390 2644
3391 /* firmware currently using resource (fwmask) */ 2645 ew32(CTRL, ctrl);
3392 /* or other software thread currently using resource (swmask) */ 2646 E1000_WRITE_FLUSH();
3393 e1000_put_hw_eeprom_semaphore(hw);
3394 mdelay(5);
3395 timeout--;
3396 }
3397 2647
3398 if (!timeout) { 2648 udelay(10);
3399 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3400 return -E1000_ERR_SWFW_SYNC;
3401 }
3402 2649
3403 swfw_sync |= swmask; 2650 e1000_raise_mdi_clk(hw, &ctrl);
3404 ew32(SW_FW_SYNC, swfw_sync); 2651 e1000_lower_mdi_clk(hw, &ctrl);
3405 2652
3406 e1000_put_hw_eeprom_semaphore(hw); 2653 mask = mask >> 1;
3407 return E1000_SUCCESS; 2654 }
3408} 2655}
3409 2656
3410static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) 2657/**
2658 * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
2659 * @hw: Struct containing variables accessed by shared code
2660 *
2661 * Bits are shifted in in MSB to LSB order.
2662 */
2663static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3411{ 2664{
3412 u32 swfw_sync; 2665 u32 ctrl;
3413 u32 swmask = mask; 2666 u16 data = 0;
2667 u8 i;
3414 2668
3415 DEBUGFUNC("e1000_swfw_sync_release"); 2669 /* In order to read a register from the PHY, we need to shift in a total
2670 * of 18 bits from the PHY. The first two bit (turnaround) times are used
2671 * to avoid contention on the MDIO pin when a read operation is performed.
2672 * These two bits are ignored by us and thrown away. Bits are "shifted in"
2673 * by raising the input to the Management Data Clock (setting the MDC bit),
2674 * and then reading the value of the MDIO bit.
2675 */
2676 ctrl = er32(CTRL);
3416 2677
3417 if (hw->swfwhw_semaphore_present) { 2678 /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
3418 e1000_release_software_flag(hw); 2679 ctrl &= ~E1000_CTRL_MDIO_DIR;
3419 return; 2680 ctrl &= ~E1000_CTRL_MDIO;
3420 }
3421 2681
3422 if (!hw->swfw_sync_present) { 2682 ew32(CTRL, ctrl);
3423 e1000_put_hw_eeprom_semaphore(hw); 2683 E1000_WRITE_FLUSH();
3424 return;
3425 }
3426 2684
3427 /* if (e1000_get_hw_eeprom_semaphore(hw)) 2685 /* Raise and Lower the clock before reading in the data. This accounts for
3428 * return -E1000_ERR_SWFW_SYNC; */ 2686 * the turnaround bits. The first clock occurred when we clocked out the
3429 while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); 2687 * last bit of the Register Address.
3430 /* empty */ 2688 */
2689 e1000_raise_mdi_clk(hw, &ctrl);
2690 e1000_lower_mdi_clk(hw, &ctrl);
2691
2692 for (data = 0, i = 0; i < 16; i++) {
2693 data = data << 1;
2694 e1000_raise_mdi_clk(hw, &ctrl);
2695 ctrl = er32(CTRL);
2696 /* Check to see if we shifted in a "1". */
2697 if (ctrl & E1000_CTRL_MDIO)
2698 data |= 1;
2699 e1000_lower_mdi_clk(hw, &ctrl);
2700 }
3431 2701
3432 swfw_sync = er32(SW_FW_SYNC); 2702 e1000_raise_mdi_clk(hw, &ctrl);
3433 swfw_sync &= ~swmask; 2703 e1000_lower_mdi_clk(hw, &ctrl);
3434 ew32(SW_FW_SYNC, swfw_sync);
3435 2704
3436 e1000_put_hw_eeprom_semaphore(hw); 2705 return data;
3437} 2706}
3438 2707
3439/***************************************************************************** 2708
3440* Reads the value from a PHY register, if the value is on a specific non zero 2709/**
3441* page, sets the page first. 2710 * e1000_read_phy_reg - read a phy register
3442* hw - Struct containing variables accessed by shared code 2711 * @hw: Struct containing variables accessed by shared code
3443* reg_addr - address of the PHY register to read 2712 * @reg_addr: address of the PHY register to read
3444******************************************************************************/ 2713 *
2714 * Reads the value from a PHY register, if the value is on a specific non zero
2715 * page, sets the page first.
2716 */
3445s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) 2717s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
3446{ 2718{
3447 u32 ret_val; 2719 u32 ret_val;
3448 u16 swfw; 2720
3449 2721 DEBUGFUNC("e1000_read_phy_reg");
3450 DEBUGFUNC("e1000_read_phy_reg"); 2722
3451 2723 if ((hw->phy_type == e1000_phy_igp) &&
3452 if ((hw->mac_type == e1000_80003es2lan) && 2724 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3453 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2725 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3454 swfw = E1000_SWFW_PHY1_SM; 2726 (u16) reg_addr);
3455 } else { 2727 if (ret_val)
3456 swfw = E1000_SWFW_PHY0_SM; 2728 return ret_val;
3457 } 2729 }
3458 if (e1000_swfw_sync_acquire(hw, swfw)) 2730
3459 return -E1000_ERR_SWFW_SYNC; 2731 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3460 2732 phy_data);
3461 if ((hw->phy_type == e1000_phy_igp || 2733
3462 hw->phy_type == e1000_phy_igp_3 || 2734 return ret_val;
3463 hw->phy_type == e1000_phy_igp_2) &&
3464 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3465 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3466 (u16)reg_addr);
3467 if (ret_val) {
3468 e1000_swfw_sync_release(hw, swfw);
3469 return ret_val;
3470 }
3471 } else if (hw->phy_type == e1000_phy_gg82563) {
3472 if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
3473 (hw->mac_type == e1000_80003es2lan)) {
3474 /* Select Configuration Page */
3475 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3476 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3477 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3478 } else {
3479 /* Use Alternative Page Select register to access
3480 * registers 30 and 31
3481 */
3482 ret_val = e1000_write_phy_reg_ex(hw,
3483 GG82563_PHY_PAGE_SELECT_ALT,
3484 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3485 }
3486
3487 if (ret_val) {
3488 e1000_swfw_sync_release(hw, swfw);
3489 return ret_val;
3490 }
3491 }
3492 }
3493
3494 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3495 phy_data);
3496
3497 e1000_swfw_sync_release(hw, swfw);
3498 return ret_val;
3499} 2735}
3500 2736
3501static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, 2737static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3502 u16 *phy_data) 2738 u16 *phy_data)
3503{ 2739{
3504 u32 i; 2740 u32 i;
3505 u32 mdic = 0; 2741 u32 mdic = 0;
3506 const u32 phy_addr = 1; 2742 const u32 phy_addr = 1;
3507 2743
3508 DEBUGFUNC("e1000_read_phy_reg_ex"); 2744 DEBUGFUNC("e1000_read_phy_reg_ex");
3509 2745
3510 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2746 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3511 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 2747 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3512 return -E1000_ERR_PARAM; 2748 return -E1000_ERR_PARAM;
3513 } 2749 }
3514 2750
3515 if (hw->mac_type > e1000_82543) { 2751 if (hw->mac_type > e1000_82543) {
3516 /* Set up Op-code, Phy Address, and register address in the MDI 2752 /* Set up Op-code, Phy Address, and register address in the MDI
3517 * Control register. The MAC will take care of interfacing with the 2753 * Control register. The MAC will take care of interfacing with the
3518 * PHY to retrieve the desired data. 2754 * PHY to retrieve the desired data.
3519 */ 2755 */
3520 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2756 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
3521 (phy_addr << E1000_MDIC_PHY_SHIFT) | 2757 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3522 (E1000_MDIC_OP_READ)); 2758 (E1000_MDIC_OP_READ));
3523 2759
3524 ew32(MDIC, mdic); 2760 ew32(MDIC, mdic);
3525 2761
3526 /* Poll the ready bit to see if the MDI read completed */ 2762 /* Poll the ready bit to see if the MDI read completed */
3527 for (i = 0; i < 64; i++) { 2763 for (i = 0; i < 64; i++) {
3528 udelay(50); 2764 udelay(50);
3529 mdic = er32(MDIC); 2765 mdic = er32(MDIC);
3530 if (mdic & E1000_MDIC_READY) break; 2766 if (mdic & E1000_MDIC_READY)
3531 } 2767 break;
3532 if (!(mdic & E1000_MDIC_READY)) { 2768 }
3533 DEBUGOUT("MDI Read did not complete\n"); 2769 if (!(mdic & E1000_MDIC_READY)) {
3534 return -E1000_ERR_PHY; 2770 DEBUGOUT("MDI Read did not complete\n");
3535 } 2771 return -E1000_ERR_PHY;
3536 if (mdic & E1000_MDIC_ERROR) { 2772 }
3537 DEBUGOUT("MDI Error\n"); 2773 if (mdic & E1000_MDIC_ERROR) {
3538 return -E1000_ERR_PHY; 2774 DEBUGOUT("MDI Error\n");
3539 } 2775 return -E1000_ERR_PHY;
3540 *phy_data = (u16)mdic; 2776 }
3541 } else { 2777 *phy_data = (u16) mdic;
3542 /* We must first send a preamble through the MDIO pin to signal the 2778 } else {
3543 * beginning of an MII instruction. This is done by sending 32 2779 /* We must first send a preamble through the MDIO pin to signal the
3544 * consecutive "1" bits. 2780 * beginning of an MII instruction. This is done by sending 32
3545 */ 2781 * consecutive "1" bits.
3546 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); 2782 */
3547 2783 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
3548 /* Now combine the next few fields that are required for a read 2784
3549 * operation. We use this method instead of calling the 2785 /* Now combine the next few fields that are required for a read
3550 * e1000_shift_out_mdi_bits routine five different times. The format of 2786 * operation. We use this method instead of calling the
3551 * a MII read instruction consists of a shift out of 14 bits and is 2787 * e1000_shift_out_mdi_bits routine five different times. The format of
3552 * defined as follows: 2788 * a MII read instruction consists of a shift out of 14 bits and is
3553 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> 2789 * defined as follows:
3554 * followed by a shift in of 18 bits. This first two bits shifted in 2790 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
3555 * are TurnAround bits used to avoid contention on the MDIO pin when a 2791 * followed by a shift in of 18 bits. This first two bits shifted in
3556 * READ operation is performed. These two bits are thrown away 2792 * are TurnAround bits used to avoid contention on the MDIO pin when a
3557 * followed by a shift in of 16 bits which contains the desired data. 2793 * READ operation is performed. These two bits are thrown away
3558 */ 2794 * followed by a shift in of 16 bits which contains the desired data.
3559 mdic = ((reg_addr) | (phy_addr << 5) | 2795 */
3560 (PHY_OP_READ << 10) | (PHY_SOF << 12)); 2796 mdic = ((reg_addr) | (phy_addr << 5) |
3561 2797 (PHY_OP_READ << 10) | (PHY_SOF << 12));
3562 e1000_shift_out_mdi_bits(hw, mdic, 14); 2798
3563 2799 e1000_shift_out_mdi_bits(hw, mdic, 14);
3564 /* Now that we've shifted out the read command to the MII, we need to 2800
3565 * "shift in" the 16-bit value (18 total bits) of the requested PHY 2801 /* Now that we've shifted out the read command to the MII, we need to
3566 * register address. 2802 * "shift in" the 16-bit value (18 total bits) of the requested PHY
3567 */ 2803 * register address.
3568 *phy_data = e1000_shift_in_mdi_bits(hw); 2804 */
3569 } 2805 *phy_data = e1000_shift_in_mdi_bits(hw);
3570 return E1000_SUCCESS; 2806 }
2807 return E1000_SUCCESS;
3571} 2808}
3572 2809
3573/****************************************************************************** 2810/**
3574* Writes a value to a PHY register 2811 * e1000_write_phy_reg - write a phy register
3575* 2812 *
3576* hw - Struct containing variables accessed by shared code 2813 * @hw: Struct containing variables accessed by shared code
3577* reg_addr - address of the PHY register to write 2814 * @reg_addr: address of the PHY register to write
3578* data - data to write to the PHY 2815 * @data: data to write to the PHY
3579******************************************************************************/ 2816
2817 * Writes a value to a PHY register
2818 */
3580s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) 2819s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
3581{ 2820{
3582 u32 ret_val; 2821 u32 ret_val;
3583 u16 swfw; 2822
3584 2823 DEBUGFUNC("e1000_write_phy_reg");
3585 DEBUGFUNC("e1000_write_phy_reg"); 2824
3586 2825 if ((hw->phy_type == e1000_phy_igp) &&
3587 if ((hw->mac_type == e1000_80003es2lan) && 2826 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3588 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2827 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3589 swfw = E1000_SWFW_PHY1_SM; 2828 (u16) reg_addr);
3590 } else { 2829 if (ret_val)
3591 swfw = E1000_SWFW_PHY0_SM; 2830 return ret_val;
3592 } 2831 }
3593 if (e1000_swfw_sync_acquire(hw, swfw)) 2832
3594 return -E1000_ERR_SWFW_SYNC; 2833 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3595 2834 phy_data);
3596 if ((hw->phy_type == e1000_phy_igp || 2835
3597 hw->phy_type == e1000_phy_igp_3 || 2836 return ret_val;
3598 hw->phy_type == e1000_phy_igp_2) &&
3599 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3600 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3601 (u16)reg_addr);
3602 if (ret_val) {
3603 e1000_swfw_sync_release(hw, swfw);
3604 return ret_val;
3605 }
3606 } else if (hw->phy_type == e1000_phy_gg82563) {
3607 if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
3608 (hw->mac_type == e1000_80003es2lan)) {
3609 /* Select Configuration Page */
3610 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3611 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3612 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3613 } else {
3614 /* Use Alternative Page Select register to access
3615 * registers 30 and 31
3616 */
3617 ret_val = e1000_write_phy_reg_ex(hw,
3618 GG82563_PHY_PAGE_SELECT_ALT,
3619 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3620 }
3621
3622 if (ret_val) {
3623 e1000_swfw_sync_release(hw, swfw);
3624 return ret_val;
3625 }
3626 }
3627 }
3628
3629 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3630 phy_data);
3631
3632 e1000_swfw_sync_release(hw, swfw);
3633 return ret_val;
3634} 2837}
3635 2838
3636static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, 2839static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3637 u16 phy_data) 2840 u16 phy_data)
3638{ 2841{
3639 u32 i; 2842 u32 i;
3640 u32 mdic = 0; 2843 u32 mdic = 0;
3641 const u32 phy_addr = 1; 2844 const u32 phy_addr = 1;
3642
3643 DEBUGFUNC("e1000_write_phy_reg_ex");
3644
3645 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3646 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3647 return -E1000_ERR_PARAM;
3648 }
3649
3650 if (hw->mac_type > e1000_82543) {
3651 /* Set up Op-code, Phy Address, register address, and data intended
3652 * for the PHY register in the MDI Control register. The MAC will take
3653 * care of interfacing with the PHY to send the desired data.
3654 */
3655 mdic = (((u32)phy_data) |
3656 (reg_addr << E1000_MDIC_REG_SHIFT) |
3657 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3658 (E1000_MDIC_OP_WRITE));
3659
3660 ew32(MDIC, mdic);
3661
3662 /* Poll the ready bit to see if the MDI read completed */
3663 for (i = 0; i < 641; i++) {
3664 udelay(5);
3665 mdic = er32(MDIC);
3666 if (mdic & E1000_MDIC_READY) break;
3667 }
3668 if (!(mdic & E1000_MDIC_READY)) {
3669 DEBUGOUT("MDI Write did not complete\n");
3670 return -E1000_ERR_PHY;
3671 }
3672 } else {
3673 /* We'll need to use the SW defined pins to shift the write command
3674 * out to the PHY. We first send a preamble to the PHY to signal the
3675 * beginning of the MII instruction. This is done by sending 32
3676 * consecutive "1" bits.
3677 */
3678 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
3679
3680 /* Now combine the remaining required fields that will indicate a
3681 * write operation. We use this method instead of calling the
3682 * e1000_shift_out_mdi_bits routine for each field in the command. The
3683 * format of a MII write instruction is as follows:
3684 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
3685 */
3686 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
3687 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
3688 mdic <<= 16;
3689 mdic |= (u32)phy_data;
3690
3691 e1000_shift_out_mdi_bits(hw, mdic, 32);
3692 }
3693
3694 return E1000_SUCCESS;
3695}
3696 2845
3697static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data) 2846 DEBUGFUNC("e1000_write_phy_reg_ex");
3698{
3699 u32 reg_val;
3700 u16 swfw;
3701 DEBUGFUNC("e1000_read_kmrn_reg");
3702
3703 if ((hw->mac_type == e1000_80003es2lan) &&
3704 (er32(STATUS) & E1000_STATUS_FUNC_1)) {
3705 swfw = E1000_SWFW_PHY1_SM;
3706 } else {
3707 swfw = E1000_SWFW_PHY0_SM;
3708 }
3709 if (e1000_swfw_sync_acquire(hw, swfw))
3710 return -E1000_ERR_SWFW_SYNC;
3711
3712 /* Write register address */
3713 reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
3714 E1000_KUMCTRLSTA_OFFSET) |
3715 E1000_KUMCTRLSTA_REN;
3716 ew32(KUMCTRLSTA, reg_val);
3717 udelay(2);
3718
3719 /* Read the data returned */
3720 reg_val = er32(KUMCTRLSTA);
3721 *data = (u16)reg_val;
3722
3723 e1000_swfw_sync_release(hw, swfw);
3724 return E1000_SUCCESS;
3725}
3726 2847
3727static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data) 2848 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3728{ 2849 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3729 u32 reg_val; 2850 return -E1000_ERR_PARAM;
3730 u16 swfw; 2851 }
3731 DEBUGFUNC("e1000_write_kmrn_reg"); 2852
3732 2853 if (hw->mac_type > e1000_82543) {
3733 if ((hw->mac_type == e1000_80003es2lan) && 2854 /* Set up Op-code, Phy Address, register address, and data intended
3734 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2855 * for the PHY register in the MDI Control register. The MAC will take
3735 swfw = E1000_SWFW_PHY1_SM; 2856 * care of interfacing with the PHY to send the desired data.
3736 } else { 2857 */
3737 swfw = E1000_SWFW_PHY0_SM; 2858 mdic = (((u32) phy_data) |
3738 } 2859 (reg_addr << E1000_MDIC_REG_SHIFT) |
3739 if (e1000_swfw_sync_acquire(hw, swfw)) 2860 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3740 return -E1000_ERR_SWFW_SYNC; 2861 (E1000_MDIC_OP_WRITE));
3741 2862
3742 reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & 2863 ew32(MDIC, mdic);
3743 E1000_KUMCTRLSTA_OFFSET) | data; 2864
3744 ew32(KUMCTRLSTA, reg_val); 2865 /* Poll the ready bit to see if the MDI read completed */
3745 udelay(2); 2866 for (i = 0; i < 641; i++) {
3746 2867 udelay(5);
3747 e1000_swfw_sync_release(hw, swfw); 2868 mdic = er32(MDIC);
3748 return E1000_SUCCESS; 2869 if (mdic & E1000_MDIC_READY)
2870 break;
2871 }
2872 if (!(mdic & E1000_MDIC_READY)) {
2873 DEBUGOUT("MDI Write did not complete\n");
2874 return -E1000_ERR_PHY;
2875 }
2876 } else {
2877 /* We'll need to use the SW defined pins to shift the write command
2878 * out to the PHY. We first send a preamble to the PHY to signal the
2879 * beginning of the MII instruction. This is done by sending 32
2880 * consecutive "1" bits.
2881 */
2882 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
2883
2884 /* Now combine the remaining required fields that will indicate a
2885 * write operation. We use this method instead of calling the
2886 * e1000_shift_out_mdi_bits routine for each field in the command. The
2887 * format of a MII write instruction is as follows:
2888 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
2889 */
2890 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
2891 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
2892 mdic <<= 16;
2893 mdic |= (u32) phy_data;
2894
2895 e1000_shift_out_mdi_bits(hw, mdic, 32);
2896 }
2897
2898 return E1000_SUCCESS;
3749} 2899}
3750 2900
3751/****************************************************************************** 2901/**
3752* Returns the PHY to the power-on reset state 2902 * e1000_phy_hw_reset - reset the phy, hardware style
3753* 2903 * @hw: Struct containing variables accessed by shared code
3754* hw - Struct containing variables accessed by shared code 2904 *
3755******************************************************************************/ 2905 * Returns the PHY to the power-on reset state
2906 */
3756s32 e1000_phy_hw_reset(struct e1000_hw *hw) 2907s32 e1000_phy_hw_reset(struct e1000_hw *hw)
3757{ 2908{
3758 u32 ctrl, ctrl_ext; 2909 u32 ctrl, ctrl_ext;
3759 u32 led_ctrl; 2910 u32 led_ctrl;
3760 s32 ret_val; 2911 s32 ret_val;
3761 u16 swfw; 2912
3762 2913 DEBUGFUNC("e1000_phy_hw_reset");
3763 DEBUGFUNC("e1000_phy_hw_reset"); 2914
3764 2915 DEBUGOUT("Resetting Phy...\n");
3765 /* In the case of the phy reset being blocked, it's not an error, we 2916
3766 * simply return success without performing the reset. */ 2917 if (hw->mac_type > e1000_82543) {
3767 ret_val = e1000_check_phy_reset_block(hw); 2918 /* Read the device control register and assert the E1000_CTRL_PHY_RST
3768 if (ret_val) 2919 * bit. Then, take it out of reset.
3769 return E1000_SUCCESS; 2920 * For e1000 hardware, we delay for 10ms between the assert
3770 2921 * and deassert.
3771 DEBUGOUT("Resetting Phy...\n"); 2922 */
3772 2923 ctrl = er32(CTRL);
3773 if (hw->mac_type > e1000_82543) { 2924 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
3774 if ((hw->mac_type == e1000_80003es2lan) && 2925 E1000_WRITE_FLUSH();
3775 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2926
3776 swfw = E1000_SWFW_PHY1_SM; 2927 msleep(10);
3777 } else { 2928
3778 swfw = E1000_SWFW_PHY0_SM; 2929 ew32(CTRL, ctrl);
3779 } 2930 E1000_WRITE_FLUSH();
3780 if (e1000_swfw_sync_acquire(hw, swfw)) { 2931
3781 DEBUGOUT("Unable to acquire swfw sync\n"); 2932 } else {
3782 return -E1000_ERR_SWFW_SYNC; 2933 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
3783 } 2934 * bit to put the PHY into reset. Then, take it out of reset.
3784 /* Read the device control register and assert the E1000_CTRL_PHY_RST 2935 */
3785 * bit. Then, take it out of reset. 2936 ctrl_ext = er32(CTRL_EXT);
3786 * For pre-e1000_82571 hardware, we delay for 10ms between the assert 2937 ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
3787 * and deassert. For e1000_82571 hardware and later, we instead delay 2938 ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
3788 * for 50us between and 10ms after the deassertion. 2939 ew32(CTRL_EXT, ctrl_ext);
3789 */ 2940 E1000_WRITE_FLUSH();
3790 ctrl = er32(CTRL); 2941 msleep(10);
3791 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); 2942 ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
3792 E1000_WRITE_FLUSH(); 2943 ew32(CTRL_EXT, ctrl_ext);
3793 2944 E1000_WRITE_FLUSH();
3794 if (hw->mac_type < e1000_82571) 2945 }
3795 msleep(10); 2946 udelay(150);
3796 else 2947
3797 udelay(100); 2948 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
3798 2949 /* Configure activity LED after PHY reset */
3799 ew32(CTRL, ctrl); 2950 led_ctrl = er32(LEDCTL);
3800 E1000_WRITE_FLUSH(); 2951 led_ctrl &= IGP_ACTIVITY_LED_MASK;
3801 2952 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
3802 if (hw->mac_type >= e1000_82571) 2953 ew32(LEDCTL, led_ctrl);
3803 mdelay(10); 2954 }
3804 2955
3805 e1000_swfw_sync_release(hw, swfw); 2956 /* Wait for FW to finish PHY configuration. */
3806 } else { 2957 ret_val = e1000_get_phy_cfg_done(hw);
3807 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR 2958 if (ret_val != E1000_SUCCESS)
3808 * bit to put the PHY into reset. Then, take it out of reset. 2959 return ret_val;
3809 */ 2960
3810 ctrl_ext = er32(CTRL_EXT); 2961 return ret_val;
3811 ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
3812 ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
3813 ew32(CTRL_EXT, ctrl_ext);
3814 E1000_WRITE_FLUSH();
3815 msleep(10);
3816 ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
3817 ew32(CTRL_EXT, ctrl_ext);
3818 E1000_WRITE_FLUSH();
3819 }
3820 udelay(150);
3821
3822 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
3823 /* Configure activity LED after PHY reset */
3824 led_ctrl = er32(LEDCTL);
3825 led_ctrl &= IGP_ACTIVITY_LED_MASK;
3826 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
3827 ew32(LEDCTL, led_ctrl);
3828 }
3829
3830 /* Wait for FW to finish PHY configuration. */
3831 ret_val = e1000_get_phy_cfg_done(hw);
3832 if (ret_val != E1000_SUCCESS)
3833 return ret_val;
3834 e1000_release_software_semaphore(hw);
3835
3836 if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
3837 ret_val = e1000_init_lcd_from_nvm(hw);
3838
3839 return ret_val;
3840} 2962}
3841 2963
3842/****************************************************************************** 2964/**
3843* Resets the PHY 2965 * e1000_phy_reset - reset the phy to commit settings
3844* 2966 * @hw: Struct containing variables accessed by shared code
3845* hw - Struct containing variables accessed by shared code 2967 *
3846* 2968 * Resets the PHY
3847* Sets bit 15 of the MII Control register 2969 * Sets bit 15 of the MII Control register
3848******************************************************************************/ 2970 */
3849s32 e1000_phy_reset(struct e1000_hw *hw) 2971s32 e1000_phy_reset(struct e1000_hw *hw)
3850{ 2972{
3851 s32 ret_val; 2973 s32 ret_val;
3852 u16 phy_data; 2974 u16 phy_data;
3853
3854 DEBUGFUNC("e1000_phy_reset");
3855
3856 /* In the case of the phy reset being blocked, it's not an error, we
3857 * simply return success without performing the reset. */
3858 ret_val = e1000_check_phy_reset_block(hw);
3859 if (ret_val)
3860 return E1000_SUCCESS;
3861
3862 switch (hw->phy_type) {
3863 case e1000_phy_igp:
3864 case e1000_phy_igp_2:
3865 case e1000_phy_igp_3:
3866 case e1000_phy_ife:
3867 ret_val = e1000_phy_hw_reset(hw);
3868 if (ret_val)
3869 return ret_val;
3870 break;
3871 default:
3872 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
3873 if (ret_val)
3874 return ret_val;
3875
3876 phy_data |= MII_CR_RESET;
3877 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
3878 if (ret_val)
3879 return ret_val;
3880
3881 udelay(1);
3882 break;
3883 }
3884
3885 if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
3886 e1000_phy_init_script(hw);
3887
3888 return E1000_SUCCESS;
3889}
3890 2975
3891/****************************************************************************** 2976 DEBUGFUNC("e1000_phy_reset");
3892* Work-around for 82566 power-down: on D3 entry-
3893* 1) disable gigabit link
3894* 2) write VR power-down enable
3895* 3) read it back
3896* if successful continue, else issue LCD reset and repeat
3897*
3898* hw - struct containing variables accessed by shared code
3899******************************************************************************/
3900void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3901{
3902 s32 reg;
3903 u16 phy_data;
3904 s32 retry = 0;
3905 2977
3906 DEBUGFUNC("e1000_phy_powerdown_workaround"); 2978 switch (hw->phy_type) {
2979 case e1000_phy_igp:
2980 ret_val = e1000_phy_hw_reset(hw);
2981 if (ret_val)
2982 return ret_val;
2983 break;
2984 default:
2985 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
2986 if (ret_val)
2987 return ret_val;
3907 2988
3908 if (hw->phy_type != e1000_phy_igp_3) 2989 phy_data |= MII_CR_RESET;
3909 return; 2990 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
2991 if (ret_val)
2992 return ret_val;
3910 2993
3911 do { 2994 udelay(1);
3912 /* Disable link */ 2995 break;
3913 reg = er32(PHY_CTRL); 2996 }
3914 ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
3915 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3916 2997
3917 /* Write VR power-down enable - bits 9:8 should be 10b */ 2998 if (hw->phy_type == e1000_phy_igp)
3918 e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); 2999 e1000_phy_init_script(hw);
3919 phy_data |= (1 << 9);
3920 phy_data &= ~(1 << 8);
3921 e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
3922 3000
3923 /* Read it back and test */ 3001 return E1000_SUCCESS;
3924 e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); 3002}
3925 if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
3926 break;
3927 3003
3928 /* Issue PHY reset and repeat at most one more time */ 3004/**
3929 reg = er32(CTRL); 3005 * e1000_detect_gig_phy - check the phy type
3930 ew32(CTRL, reg | E1000_CTRL_PHY_RST); 3006 * @hw: Struct containing variables accessed by shared code
3931 retry++; 3007 *
3932 } while (retry); 3008 * Probes the expected PHY address for known PHY IDs
3009 */
3010static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3011{
3012 s32 phy_init_status, ret_val;
3013 u16 phy_id_high, phy_id_low;
3014 bool match = false;
3933 3015
3934 return; 3016 DEBUGFUNC("e1000_detect_gig_phy");
3935 3017
3936} 3018 if (hw->phy_id != 0)
3019 return E1000_SUCCESS;
3937 3020
3938/****************************************************************************** 3021 /* Read the PHY ID Registers to identify which PHY is onboard. */
3939* Work-around for 82566 Kumeran PCS lock loss: 3022 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
3940* On link status change (i.e. PCI reset, speed change) and link is up and 3023 if (ret_val)
3941* speed is gigabit- 3024 return ret_val;
3942* 0) if workaround is optionally disabled do nothing
3943* 1) wait 1ms for Kumeran link to come up
3944* 2) check Kumeran Diagnostic register PCS lock loss bit
3945* 3) if not set the link is locked (all is good), otherwise...
3946* 4) reset the PHY
3947* 5) repeat up to 10 times
3948* Note: this is only called for IGP3 copper when speed is 1gb.
3949*
3950* hw - struct containing variables accessed by shared code
3951******************************************************************************/
3952static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3953{
3954 s32 ret_val;
3955 s32 reg;
3956 s32 cnt;
3957 u16 phy_data;
3958
3959 if (hw->kmrn_lock_loss_workaround_disabled)
3960 return E1000_SUCCESS;
3961
3962 /* Make sure link is up before proceeding. If not just return.
3963 * Attempting this while link is negotiating fouled up link
3964 * stability */
3965 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3966 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3967
3968 if (phy_data & MII_SR_LINK_STATUS) {
3969 for (cnt = 0; cnt < 10; cnt++) {
3970 /* read once to clear */
3971 ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
3972 if (ret_val)
3973 return ret_val;
3974 /* and again to get new status */
3975 ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
3976 if (ret_val)
3977 return ret_val;
3978
3979 /* check for PCS lock */
3980 if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3981 return E1000_SUCCESS;
3982
3983 /* Issue PHY reset */
3984 e1000_phy_hw_reset(hw);
3985 mdelay(5);
3986 }
3987 /* Disable GigE link negotiation */
3988 reg = er32(PHY_CTRL);
3989 ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
3990 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3991
3992 /* unable to acquire PCS lock */
3993 return E1000_ERR_PHY;
3994 }
3995
3996 return E1000_SUCCESS;
3997}
3998 3025
3999/****************************************************************************** 3026 hw->phy_id = (u32) (phy_id_high << 16);
4000* Probes the expected PHY address for known PHY IDs 3027 udelay(20);
4001* 3028 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
4002* hw - Struct containing variables accessed by shared code 3029 if (ret_val)
4003******************************************************************************/ 3030 return ret_val;
4004static s32 e1000_detect_gig_phy(struct e1000_hw *hw) 3031
4005{ 3032 hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
4006 s32 phy_init_status, ret_val; 3033 hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
4007 u16 phy_id_high, phy_id_low; 3034
4008 bool match = false; 3035 switch (hw->mac_type) {
4009 3036 case e1000_82543:
4010 DEBUGFUNC("e1000_detect_gig_phy"); 3037 if (hw->phy_id == M88E1000_E_PHY_ID)
4011 3038 match = true;
4012 if (hw->phy_id != 0) 3039 break;
4013 return E1000_SUCCESS; 3040 case e1000_82544:
4014 3041 if (hw->phy_id == M88E1000_I_PHY_ID)
4015 /* The 82571 firmware may still be configuring the PHY. In this 3042 match = true;
4016 * case, we cannot access the PHY until the configuration is done. So 3043 break;
4017 * we explicitly set the PHY values. */ 3044 case e1000_82540:
4018 if (hw->mac_type == e1000_82571 || 3045 case e1000_82545:
4019 hw->mac_type == e1000_82572) { 3046 case e1000_82545_rev_3:
4020 hw->phy_id = IGP01E1000_I_PHY_ID; 3047 case e1000_82546:
4021 hw->phy_type = e1000_phy_igp_2; 3048 case e1000_82546_rev_3:
4022 return E1000_SUCCESS; 3049 if (hw->phy_id == M88E1011_I_PHY_ID)
4023 } 3050 match = true;
4024 3051 break;
4025 /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work- 3052 case e1000_82541:
4026 * around that forces PHY page 0 to be set or the reads fail. The rest of 3053 case e1000_82541_rev_2:
4027 * the code in this routine uses e1000_read_phy_reg to read the PHY ID. 3054 case e1000_82547:
4028 * So for ESB-2 we need to have this set so our reads won't fail. If the 3055 case e1000_82547_rev_2:
4029 * attached PHY is not a e1000_phy_gg82563, the routines below will figure 3056 if (hw->phy_id == IGP01E1000_I_PHY_ID)
4030 * this out as well. */ 3057 match = true;
4031 if (hw->mac_type == e1000_80003es2lan) 3058 break;
4032 hw->phy_type = e1000_phy_gg82563; 3059 default:
4033 3060 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
4034 /* Read the PHY ID Registers to identify which PHY is onboard. */ 3061 return -E1000_ERR_CONFIG;
4035 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); 3062 }
4036 if (ret_val) 3063 phy_init_status = e1000_set_phy_type(hw);
4037 return ret_val; 3064
4038 3065 if ((match) && (phy_init_status == E1000_SUCCESS)) {
4039 hw->phy_id = (u32)(phy_id_high << 16); 3066 DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
4040 udelay(20); 3067 return E1000_SUCCESS;
4041 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); 3068 }
4042 if (ret_val) 3069 DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
4043 return ret_val; 3070 return -E1000_ERR_PHY;
4044
4045 hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
4046 hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
4047
4048 switch (hw->mac_type) {
4049 case e1000_82543:
4050 if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
4051 break;
4052 case e1000_82544:
4053 if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
4054 break;
4055 case e1000_82540:
4056 case e1000_82545:
4057 case e1000_82545_rev_3:
4058 case e1000_82546:
4059 case e1000_82546_rev_3:
4060 if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
4061 break;
4062 case e1000_82541:
4063 case e1000_82541_rev_2:
4064 case e1000_82547:
4065 case e1000_82547_rev_2:
4066 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
4067 break;
4068 case e1000_82573:
4069 if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
4070 break;
4071 case e1000_80003es2lan:
4072 if (hw->phy_id == GG82563_E_PHY_ID) match = true;
4073 break;
4074 case e1000_ich8lan:
4075 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
4076 if (hw->phy_id == IFE_E_PHY_ID) match = true;
4077 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
4078 if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
4079 break;
4080 default:
4081 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
4082 return -E1000_ERR_CONFIG;
4083 }
4084 phy_init_status = e1000_set_phy_type(hw);
4085
4086 if ((match) && (phy_init_status == E1000_SUCCESS)) {
4087 DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
4088 return E1000_SUCCESS;
4089 }
4090 DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
4091 return -E1000_ERR_PHY;
4092} 3071}
4093 3072
4094/****************************************************************************** 3073/**
4095* Resets the PHY's DSP 3074 * e1000_phy_reset_dsp - reset DSP
4096* 3075 * @hw: Struct containing variables accessed by shared code
4097* hw - Struct containing variables accessed by shared code 3076 *
4098******************************************************************************/ 3077 * Resets the PHY's DSP
3078 */
4099static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3079static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
4100{ 3080{
4101 s32 ret_val; 3081 s32 ret_val;
4102 DEBUGFUNC("e1000_phy_reset_dsp"); 3082 DEBUGFUNC("e1000_phy_reset_dsp");
4103 3083
4104 do { 3084 do {
4105 if (hw->phy_type != e1000_phy_gg82563) { 3085 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
4106 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 3086 if (ret_val)
4107 if (ret_val) break; 3087 break;
4108 } 3088 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
4109 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); 3089 if (ret_val)
4110 if (ret_val) break; 3090 break;
4111 ret_val = e1000_write_phy_reg(hw, 30, 0x0000); 3091 ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
4112 if (ret_val) break; 3092 if (ret_val)
4113 ret_val = E1000_SUCCESS; 3093 break;
4114 } while (0); 3094 ret_val = E1000_SUCCESS;
4115 3095 } while (0);
4116 return ret_val; 3096
3097 return ret_val;
4117} 3098}
4118 3099
4119/****************************************************************************** 3100/**
4120* Get PHY information from various PHY registers for igp PHY only. 3101 * e1000_phy_igp_get_info - get igp specific registers
4121* 3102 * @hw: Struct containing variables accessed by shared code
4122* hw - Struct containing variables accessed by shared code 3103 * @phy_info: PHY information structure
4123* phy_info - PHY information structure 3104 *
4124******************************************************************************/ 3105 * Get PHY information from various PHY registers for igp PHY only.
3106 */
4125static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, 3107static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
4126 struct e1000_phy_info *phy_info) 3108 struct e1000_phy_info *phy_info)
4127{ 3109{
4128 s32 ret_val; 3110 s32 ret_val;
4129 u16 phy_data, min_length, max_length, average; 3111 u16 phy_data, min_length, max_length, average;
4130 e1000_rev_polarity polarity; 3112 e1000_rev_polarity polarity;
4131 3113
4132 DEBUGFUNC("e1000_phy_igp_get_info"); 3114 DEBUGFUNC("e1000_phy_igp_get_info");
4133 3115
4134 /* The downshift status is checked only once, after link is established, 3116 /* The downshift status is checked only once, after link is established,
4135 * and it stored in the hw->speed_downgraded parameter. */ 3117 * and it stored in the hw->speed_downgraded parameter. */
4136 phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 3118 phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
4137 3119
4138 /* IGP01E1000 does not need to support it. */ 3120 /* IGP01E1000 does not need to support it. */
4139 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; 3121 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
4140 3122
4141 /* IGP01E1000 always correct polarity reversal */ 3123 /* IGP01E1000 always correct polarity reversal */
4142 phy_info->polarity_correction = e1000_polarity_reversal_enabled; 3124 phy_info->polarity_correction = e1000_polarity_reversal_enabled;
4143 3125
4144 /* Check polarity status */ 3126 /* Check polarity status */
4145 ret_val = e1000_check_polarity(hw, &polarity); 3127 ret_val = e1000_check_polarity(hw, &polarity);
4146 if (ret_val) 3128 if (ret_val)
4147 return ret_val; 3129 return ret_val;
4148 3130
4149 phy_info->cable_polarity = polarity; 3131 phy_info->cable_polarity = polarity;
4150 3132
4151 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); 3133 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
4152 if (ret_val) 3134 if (ret_val)
4153 return ret_val; 3135 return ret_val;
4154 3136
4155 phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >> 3137 phy_info->mdix_mode =
4156 IGP01E1000_PSSR_MDIX_SHIFT); 3138 (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
4157 3139 IGP01E1000_PSSR_MDIX_SHIFT);
4158 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 3140
4159 IGP01E1000_PSSR_SPEED_1000MBPS) { 3141 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
4160 /* Local/Remote Receiver Information are only valid at 1000 Mbps */ 3142 IGP01E1000_PSSR_SPEED_1000MBPS) {
4161 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 3143 /* Local/Remote Receiver Information are only valid at 1000 Mbps */
4162 if (ret_val) 3144 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
4163 return ret_val; 3145 if (ret_val)
4164 3146 return ret_val;
4165 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> 3147
4166 SR_1000T_LOCAL_RX_STATUS_SHIFT) ? 3148 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
4167 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; 3149 SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
4168 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> 3150 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4169 SR_1000T_REMOTE_RX_STATUS_SHIFT) ? 3151 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
4170 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; 3152 SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
4171 3153 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4172 /* Get cable length */ 3154
4173 ret_val = e1000_get_cable_length(hw, &min_length, &max_length); 3155 /* Get cable length */
4174 if (ret_val) 3156 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
4175 return ret_val; 3157 if (ret_val)
4176 3158 return ret_val;
4177 /* Translate to old method */ 3159
4178 average = (max_length + min_length) / 2; 3160 /* Translate to old method */
4179 3161 average = (max_length + min_length) / 2;
4180 if (average <= e1000_igp_cable_length_50) 3162
4181 phy_info->cable_length = e1000_cable_length_50; 3163 if (average <= e1000_igp_cable_length_50)
4182 else if (average <= e1000_igp_cable_length_80) 3164 phy_info->cable_length = e1000_cable_length_50;
4183 phy_info->cable_length = e1000_cable_length_50_80; 3165 else if (average <= e1000_igp_cable_length_80)
4184 else if (average <= e1000_igp_cable_length_110) 3166 phy_info->cable_length = e1000_cable_length_50_80;
4185 phy_info->cable_length = e1000_cable_length_80_110; 3167 else if (average <= e1000_igp_cable_length_110)
4186 else if (average <= e1000_igp_cable_length_140) 3168 phy_info->cable_length = e1000_cable_length_80_110;
4187 phy_info->cable_length = e1000_cable_length_110_140; 3169 else if (average <= e1000_igp_cable_length_140)
4188 else 3170 phy_info->cable_length = e1000_cable_length_110_140;
4189 phy_info->cable_length = e1000_cable_length_140; 3171 else
4190 } 3172 phy_info->cable_length = e1000_cable_length_140;
4191 3173 }
4192 return E1000_SUCCESS;
4193}
4194 3174
4195/****************************************************************************** 3175 return E1000_SUCCESS;
4196* Get PHY information from various PHY registers for ife PHY only.
4197*
4198* hw - Struct containing variables accessed by shared code
4199* phy_info - PHY information structure
4200******************************************************************************/
4201static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
4202 struct e1000_phy_info *phy_info)
4203{
4204 s32 ret_val;
4205 u16 phy_data;
4206 e1000_rev_polarity polarity;
4207
4208 DEBUGFUNC("e1000_phy_ife_get_info");
4209
4210 phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
4211 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
4212
4213 ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
4214 if (ret_val)
4215 return ret_val;
4216 phy_info->polarity_correction =
4217 ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
4218 IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
4219 e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
4220
4221 if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
4222 ret_val = e1000_check_polarity(hw, &polarity);
4223 if (ret_val)
4224 return ret_val;
4225 } else {
4226 /* Polarity is forced. */
4227 polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
4228 IFE_PSC_FORCE_POLARITY_SHIFT) ?
4229 e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
4230 }
4231 phy_info->cable_polarity = polarity;
4232
4233 ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
4234 if (ret_val)
4235 return ret_val;
4236
4237 phy_info->mdix_mode = (e1000_auto_x_mode)
4238 ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
4239 IFE_PMC_MDIX_MODE_SHIFT);
4240
4241 return E1000_SUCCESS;
4242} 3176}
4243 3177
4244/****************************************************************************** 3178/**
4245* Get PHY information from various PHY registers fot m88 PHY only. 3179 * e1000_phy_m88_get_info - get m88 specific registers
4246* 3180 * @hw: Struct containing variables accessed by shared code
4247* hw - Struct containing variables accessed by shared code 3181 * @phy_info: PHY information structure
4248* phy_info - PHY information structure 3182 *
4249******************************************************************************/ 3183 * Get PHY information from various PHY registers for m88 PHY only.
3184 */
4250static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, 3185static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
4251 struct e1000_phy_info *phy_info) 3186 struct e1000_phy_info *phy_info)
4252{ 3187{
4253 s32 ret_val; 3188 s32 ret_val;
4254 u16 phy_data; 3189 u16 phy_data;
4255 e1000_rev_polarity polarity; 3190 e1000_rev_polarity polarity;
4256 3191
4257 DEBUGFUNC("e1000_phy_m88_get_info"); 3192 DEBUGFUNC("e1000_phy_m88_get_info");
4258 3193
4259 /* The downshift status is checked only once, after link is established, 3194 /* The downshift status is checked only once, after link is established,
4260 * and it stored in the hw->speed_downgraded parameter. */ 3195 * and it stored in the hw->speed_downgraded parameter. */
4261 phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 3196 phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
4262 3197
4263 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 3198 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
4264 if (ret_val) 3199 if (ret_val)
4265 return ret_val; 3200 return ret_val;
4266 3201
4267 phy_info->extended_10bt_distance = 3202 phy_info->extended_10bt_distance =
4268 ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> 3203 ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
4269 M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? 3204 M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
4270 e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal; 3205 e1000_10bt_ext_dist_enable_lower :
4271 3206 e1000_10bt_ext_dist_enable_normal;
4272 phy_info->polarity_correction = 3207
4273 ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> 3208 phy_info->polarity_correction =
4274 M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? 3209 ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
4275 e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; 3210 M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
4276 3211 e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
4277 /* Check polarity status */ 3212
4278 ret_val = e1000_check_polarity(hw, &polarity); 3213 /* Check polarity status */
4279 if (ret_val) 3214 ret_val = e1000_check_polarity(hw, &polarity);
4280 return ret_val; 3215 if (ret_val)
4281 phy_info->cable_polarity = polarity; 3216 return ret_val;
4282 3217 phy_info->cable_polarity = polarity;
4283 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 3218
4284 if (ret_val) 3219 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
4285 return ret_val; 3220 if (ret_val)
4286 3221 return ret_val;
4287 phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >> 3222
4288 M88E1000_PSSR_MDIX_SHIFT); 3223 phy_info->mdix_mode =
4289 3224 (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
4290 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 3225 M88E1000_PSSR_MDIX_SHIFT);
4291 /* Cable Length Estimation and Local/Remote Receiver Information 3226
4292 * are only valid at 1000 Mbps. 3227 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
4293 */ 3228 /* Cable Length Estimation and Local/Remote Receiver Information
4294 if (hw->phy_type != e1000_phy_gg82563) { 3229 * are only valid at 1000 Mbps.
4295 phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 3230 */
4296 M88E1000_PSSR_CABLE_LENGTH_SHIFT); 3231 phy_info->cable_length =
4297 } else { 3232 (e1000_cable_length) ((phy_data &
4298 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, 3233 M88E1000_PSSR_CABLE_LENGTH) >>
4299 &phy_data); 3234 M88E1000_PSSR_CABLE_LENGTH_SHIFT);
4300 if (ret_val) 3235
4301 return ret_val; 3236 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
4302 3237 if (ret_val)
4303 phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH); 3238 return ret_val;
4304 } 3239
4305 3240 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
4306 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 3241 SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
4307 if (ret_val) 3242 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4308 return ret_val; 3243 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
4309 3244 SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
4310 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> 3245 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4311 SR_1000T_LOCAL_RX_STATUS_SHIFT) ? 3246
4312 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; 3247 }
4313 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> 3248
4314 SR_1000T_REMOTE_RX_STATUS_SHIFT) ? 3249 return E1000_SUCCESS;
4315 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4316
4317 }
4318
4319 return E1000_SUCCESS;
4320} 3250}
4321 3251
4322/****************************************************************************** 3252/**
4323* Get PHY information from various PHY registers 3253 * e1000_phy_get_info - request phy info
4324* 3254 * @hw: Struct containing variables accessed by shared code
4325* hw - Struct containing variables accessed by shared code 3255 * @phy_info: PHY information structure
4326* phy_info - PHY information structure 3256 *
4327******************************************************************************/ 3257 * Get PHY information from various PHY registers
3258 */
4328s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) 3259s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
4329{ 3260{
4330 s32 ret_val; 3261 s32 ret_val;
4331 u16 phy_data; 3262 u16 phy_data;
4332 3263
4333 DEBUGFUNC("e1000_phy_get_info"); 3264 DEBUGFUNC("e1000_phy_get_info");
4334 3265
4335 phy_info->cable_length = e1000_cable_length_undefined; 3266 phy_info->cable_length = e1000_cable_length_undefined;
4336 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3267 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
4337 phy_info->cable_polarity = e1000_rev_polarity_undefined; 3268 phy_info->cable_polarity = e1000_rev_polarity_undefined;
4338 phy_info->downshift = e1000_downshift_undefined; 3269 phy_info->downshift = e1000_downshift_undefined;
4339 phy_info->polarity_correction = e1000_polarity_reversal_undefined; 3270 phy_info->polarity_correction = e1000_polarity_reversal_undefined;
4340 phy_info->mdix_mode = e1000_auto_x_mode_undefined; 3271 phy_info->mdix_mode = e1000_auto_x_mode_undefined;
4341 phy_info->local_rx = e1000_1000t_rx_status_undefined; 3272 phy_info->local_rx = e1000_1000t_rx_status_undefined;
4342 phy_info->remote_rx = e1000_1000t_rx_status_undefined; 3273 phy_info->remote_rx = e1000_1000t_rx_status_undefined;
4343 3274
4344 if (hw->media_type != e1000_media_type_copper) { 3275 if (hw->media_type != e1000_media_type_copper) {
4345 DEBUGOUT("PHY info is only valid for copper media\n"); 3276 DEBUGOUT("PHY info is only valid for copper media\n");
4346 return -E1000_ERR_CONFIG; 3277 return -E1000_ERR_CONFIG;
4347 } 3278 }
4348 3279
4349 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3280 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
4350 if (ret_val) 3281 if (ret_val)
4351 return ret_val; 3282 return ret_val;
4352 3283
4353 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3284 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
4354 if (ret_val) 3285 if (ret_val)
4355 return ret_val; 3286 return ret_val;
4356 3287
4357 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { 3288 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
4358 DEBUGOUT("PHY info is only valid if link is up\n"); 3289 DEBUGOUT("PHY info is only valid if link is up\n");
4359 return -E1000_ERR_CONFIG; 3290 return -E1000_ERR_CONFIG;
4360 } 3291 }
4361 3292
4362 if (hw->phy_type == e1000_phy_igp || 3293 if (hw->phy_type == e1000_phy_igp)
4363 hw->phy_type == e1000_phy_igp_3 || 3294 return e1000_phy_igp_get_info(hw, phy_info);
4364 hw->phy_type == e1000_phy_igp_2) 3295 else
4365 return e1000_phy_igp_get_info(hw, phy_info); 3296 return e1000_phy_m88_get_info(hw, phy_info);
4366 else if (hw->phy_type == e1000_phy_ife)
4367 return e1000_phy_ife_get_info(hw, phy_info);
4368 else
4369 return e1000_phy_m88_get_info(hw, phy_info);
4370} 3297}
4371 3298
4372s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3299s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
4373{ 3300{
4374 DEBUGFUNC("e1000_validate_mdi_settings"); 3301 DEBUGFUNC("e1000_validate_mdi_settings");
4375
4376 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
4377 DEBUGOUT("Invalid MDI setting detected\n");
4378 hw->mdix = 1;
4379 return -E1000_ERR_CONFIG;
4380 }
4381 return E1000_SUCCESS;
4382}
4383 3302
3303 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
3304 DEBUGOUT("Invalid MDI setting detected\n");
3305 hw->mdix = 1;
3306 return -E1000_ERR_CONFIG;
3307 }
3308 return E1000_SUCCESS;
3309}
4384 3310
4385/****************************************************************************** 3311/**
4386 * Sets up eeprom variables in the hw struct. Must be called after mac_type 3312 * e1000_init_eeprom_params - initialize sw eeprom vars
4387 * is configured. Additionally, if this is ICH8, the flash controller GbE 3313 * @hw: Struct containing variables accessed by shared code
4388 * registers must be mapped, or this will crash.
4389 * 3314 *
4390 * hw - Struct containing variables accessed by shared code 3315 * Sets up eeprom variables in the hw struct. Must be called after mac_type
4391 *****************************************************************************/ 3316 * is configured.
3317 */
4392s32 e1000_init_eeprom_params(struct e1000_hw *hw) 3318s32 e1000_init_eeprom_params(struct e1000_hw *hw)
4393{ 3319{
4394 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3320 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4395 u32 eecd = er32(EECD); 3321 u32 eecd = er32(EECD);
4396 s32 ret_val = E1000_SUCCESS; 3322 s32 ret_val = E1000_SUCCESS;
4397 u16 eeprom_size; 3323 u16 eeprom_size;
4398 3324
4399 DEBUGFUNC("e1000_init_eeprom_params"); 3325 DEBUGFUNC("e1000_init_eeprom_params");
4400 3326
4401 switch (hw->mac_type) { 3327 switch (hw->mac_type) {
4402 case e1000_82542_rev2_0: 3328 case e1000_82542_rev2_0:
4403 case e1000_82542_rev2_1: 3329 case e1000_82542_rev2_1:
4404 case e1000_82543: 3330 case e1000_82543:
4405 case e1000_82544: 3331 case e1000_82544:
4406 eeprom->type = e1000_eeprom_microwire; 3332 eeprom->type = e1000_eeprom_microwire;
4407 eeprom->word_size = 64; 3333 eeprom->word_size = 64;
4408 eeprom->opcode_bits = 3; 3334 eeprom->opcode_bits = 3;
4409 eeprom->address_bits = 6; 3335 eeprom->address_bits = 6;
4410 eeprom->delay_usec = 50; 3336 eeprom->delay_usec = 50;
4411 eeprom->use_eerd = false; 3337 break;
4412 eeprom->use_eewr = false; 3338 case e1000_82540:
4413 break; 3339 case e1000_82545:
4414 case e1000_82540: 3340 case e1000_82545_rev_3:
4415 case e1000_82545: 3341 case e1000_82546:
4416 case e1000_82545_rev_3: 3342 case e1000_82546_rev_3:
4417 case e1000_82546: 3343 eeprom->type = e1000_eeprom_microwire;
4418 case e1000_82546_rev_3: 3344 eeprom->opcode_bits = 3;
4419 eeprom->type = e1000_eeprom_microwire; 3345 eeprom->delay_usec = 50;
4420 eeprom->opcode_bits = 3; 3346 if (eecd & E1000_EECD_SIZE) {
4421 eeprom->delay_usec = 50; 3347 eeprom->word_size = 256;
4422 if (eecd & E1000_EECD_SIZE) { 3348 eeprom->address_bits = 8;
4423 eeprom->word_size = 256; 3349 } else {
4424 eeprom->address_bits = 8; 3350 eeprom->word_size = 64;
4425 } else { 3351 eeprom->address_bits = 6;
4426 eeprom->word_size = 64; 3352 }
4427 eeprom->address_bits = 6; 3353 break;
4428 } 3354 case e1000_82541:
4429 eeprom->use_eerd = false; 3355 case e1000_82541_rev_2:
4430 eeprom->use_eewr = false; 3356 case e1000_82547:
4431 break; 3357 case e1000_82547_rev_2:
4432 case e1000_82541: 3358 if (eecd & E1000_EECD_TYPE) {
4433 case e1000_82541_rev_2: 3359 eeprom->type = e1000_eeprom_spi;
4434 case e1000_82547: 3360 eeprom->opcode_bits = 8;
4435 case e1000_82547_rev_2: 3361 eeprom->delay_usec = 1;
4436 if (eecd & E1000_EECD_TYPE) { 3362 if (eecd & E1000_EECD_ADDR_BITS) {
4437 eeprom->type = e1000_eeprom_spi; 3363 eeprom->page_size = 32;
4438 eeprom->opcode_bits = 8; 3364 eeprom->address_bits = 16;
4439 eeprom->delay_usec = 1; 3365 } else {
4440 if (eecd & E1000_EECD_ADDR_BITS) { 3366 eeprom->page_size = 8;
4441 eeprom->page_size = 32; 3367 eeprom->address_bits = 8;
4442 eeprom->address_bits = 16; 3368 }
4443 } else { 3369 } else {
4444 eeprom->page_size = 8; 3370 eeprom->type = e1000_eeprom_microwire;
4445 eeprom->address_bits = 8; 3371 eeprom->opcode_bits = 3;
4446 } 3372 eeprom->delay_usec = 50;
4447 } else { 3373 if (eecd & E1000_EECD_ADDR_BITS) {
4448 eeprom->type = e1000_eeprom_microwire; 3374 eeprom->word_size = 256;
4449 eeprom->opcode_bits = 3; 3375 eeprom->address_bits = 8;
4450 eeprom->delay_usec = 50; 3376 } else {
4451 if (eecd & E1000_EECD_ADDR_BITS) { 3377 eeprom->word_size = 64;
4452 eeprom->word_size = 256; 3378 eeprom->address_bits = 6;
4453 eeprom->address_bits = 8; 3379 }
4454 } else { 3380 }
4455 eeprom->word_size = 64; 3381 break;
4456 eeprom->address_bits = 6; 3382 default:
4457 } 3383 break;
4458 } 3384 }
4459 eeprom->use_eerd = false; 3385
4460 eeprom->use_eewr = false; 3386 if (eeprom->type == e1000_eeprom_spi) {
4461 break; 3387 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
4462 case e1000_82571: 3388 * 32KB (incremented by powers of 2).
4463 case e1000_82572: 3389 */
4464 eeprom->type = e1000_eeprom_spi; 3390 /* Set to default value for initial eeprom read. */
4465 eeprom->opcode_bits = 8; 3391 eeprom->word_size = 64;
4466 eeprom->delay_usec = 1; 3392 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
4467 if (eecd & E1000_EECD_ADDR_BITS) { 3393 if (ret_val)
4468 eeprom->page_size = 32; 3394 return ret_val;
4469 eeprom->address_bits = 16; 3395 eeprom_size =
4470 } else { 3396 (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
4471 eeprom->page_size = 8; 3397 /* 256B eeprom size was not supported in earlier hardware, so we
4472 eeprom->address_bits = 8; 3398 * bump eeprom_size up one to ensure that "1" (which maps to 256B)
4473 } 3399 * is never the result used in the shifting logic below. */
4474 eeprom->use_eerd = false; 3400 if (eeprom_size)
4475 eeprom->use_eewr = false; 3401 eeprom_size++;
4476 break; 3402
4477 case e1000_82573: 3403 eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
4478 eeprom->type = e1000_eeprom_spi; 3404 }
4479 eeprom->opcode_bits = 8; 3405 return ret_val;
4480 eeprom->delay_usec = 1;
4481 if (eecd & E1000_EECD_ADDR_BITS) {
4482 eeprom->page_size = 32;
4483 eeprom->address_bits = 16;
4484 } else {
4485 eeprom->page_size = 8;
4486 eeprom->address_bits = 8;
4487 }
4488 eeprom->use_eerd = true;
4489 eeprom->use_eewr = true;
4490 if (!e1000_is_onboard_nvm_eeprom(hw)) {
4491 eeprom->type = e1000_eeprom_flash;
4492 eeprom->word_size = 2048;
4493
4494 /* Ensure that the Autonomous FLASH update bit is cleared due to
4495 * Flash update issue on parts which use a FLASH for NVM. */
4496 eecd &= ~E1000_EECD_AUPDEN;
4497 ew32(EECD, eecd);
4498 }
4499 break;
4500 case e1000_80003es2lan:
4501 eeprom->type = e1000_eeprom_spi;
4502 eeprom->opcode_bits = 8;
4503 eeprom->delay_usec = 1;
4504 if (eecd & E1000_EECD_ADDR_BITS) {
4505 eeprom->page_size = 32;
4506 eeprom->address_bits = 16;
4507 } else {
4508 eeprom->page_size = 8;
4509 eeprom->address_bits = 8;
4510 }
4511 eeprom->use_eerd = true;
4512 eeprom->use_eewr = false;
4513 break;
4514 case e1000_ich8lan:
4515 {
4516 s32 i = 0;
4517 u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
4518
4519 eeprom->type = e1000_eeprom_ich8;
4520 eeprom->use_eerd = false;
4521 eeprom->use_eewr = false;
4522 eeprom->word_size = E1000_SHADOW_RAM_WORDS;
4523
4524 /* Zero the shadow RAM structure. But don't load it from NVM
4525 * so as to save time for driver init */
4526 if (hw->eeprom_shadow_ram != NULL) {
4527 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4528 hw->eeprom_shadow_ram[i].modified = false;
4529 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
4530 }
4531 }
4532
4533 hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
4534 ICH_FLASH_SECTOR_SIZE;
4535
4536 hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
4537 hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
4538
4539 hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
4540
4541 hw->flash_bank_size /= 2 * sizeof(u16);
4542
4543 break;
4544 }
4545 default:
4546 break;
4547 }
4548
4549 if (eeprom->type == e1000_eeprom_spi) {
4550 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
4551 * 32KB (incremented by powers of 2).
4552 */
4553 if (hw->mac_type <= e1000_82547_rev_2) {
4554 /* Set to default value for initial eeprom read. */
4555 eeprom->word_size = 64;
4556 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
4557 if (ret_val)
4558 return ret_val;
4559 eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
4560 /* 256B eeprom size was not supported in earlier hardware, so we
4561 * bump eeprom_size up one to ensure that "1" (which maps to 256B)
4562 * is never the result used in the shifting logic below. */
4563 if (eeprom_size)
4564 eeprom_size++;
4565 } else {
4566 eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
4567 E1000_EECD_SIZE_EX_SHIFT);
4568 }
4569
4570 eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
4571 }
4572 return ret_val;
4573} 3406}
4574 3407
4575/****************************************************************************** 3408/**
4576 * Raises the EEPROM's clock input. 3409 * e1000_raise_ee_clk - Raises the EEPROM's clock input.
4577 * 3410 * @hw: Struct containing variables accessed by shared code
4578 * hw - Struct containing variables accessed by shared code 3411 * @eecd: EECD's current value
4579 * eecd - EECD's current value 3412 */
4580 *****************************************************************************/
4581static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) 3413static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
4582{ 3414{
4583 /* Raise the clock input to the EEPROM (by setting the SK bit), and then 3415 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
4584 * wait <delay> microseconds. 3416 * wait <delay> microseconds.
4585 */ 3417 */
4586 *eecd = *eecd | E1000_EECD_SK; 3418 *eecd = *eecd | E1000_EECD_SK;
4587 ew32(EECD, *eecd); 3419 ew32(EECD, *eecd);
4588 E1000_WRITE_FLUSH(); 3420 E1000_WRITE_FLUSH();
4589 udelay(hw->eeprom.delay_usec); 3421 udelay(hw->eeprom.delay_usec);
4590} 3422}
4591 3423
4592/****************************************************************************** 3424/**
4593 * Lowers the EEPROM's clock input. 3425 * e1000_lower_ee_clk - Lowers the EEPROM's clock input.
4594 * 3426 * @hw: Struct containing variables accessed by shared code
4595 * hw - Struct containing variables accessed by shared code 3427 * @eecd: EECD's current value
4596 * eecd - EECD's current value 3428 */
4597 *****************************************************************************/
4598static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) 3429static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
4599{ 3430{
4600 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 3431 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
4601 * wait 50 microseconds. 3432 * wait 50 microseconds.
4602 */ 3433 */
4603 *eecd = *eecd & ~E1000_EECD_SK; 3434 *eecd = *eecd & ~E1000_EECD_SK;
4604 ew32(EECD, *eecd); 3435 ew32(EECD, *eecd);
4605 E1000_WRITE_FLUSH(); 3436 E1000_WRITE_FLUSH();
4606 udelay(hw->eeprom.delay_usec); 3437 udelay(hw->eeprom.delay_usec);
4607} 3438}
4608 3439
4609/****************************************************************************** 3440/**
4610 * Shift data bits out to the EEPROM. 3441 * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM.
4611 * 3442 * @hw: Struct containing variables accessed by shared code
4612 * hw - Struct containing variables accessed by shared code 3443 * @data: data to send to the EEPROM
4613 * data - data to send to the EEPROM 3444 * @count: number of bits to shift out
4614 * count - number of bits to shift out 3445 */
4615 *****************************************************************************/
4616static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) 3446static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
4617{ 3447{
4618 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3448 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4619 u32 eecd; 3449 u32 eecd;
4620 u32 mask; 3450 u32 mask;
4621 3451
4622 /* We need to shift "count" bits out to the EEPROM. So, value in the 3452 /* We need to shift "count" bits out to the EEPROM. So, value in the
4623 * "data" parameter will be shifted out to the EEPROM one bit at a time. 3453 * "data" parameter will be shifted out to the EEPROM one bit at a time.
4624 * In order to do this, "data" must be broken down into bits. 3454 * In order to do this, "data" must be broken down into bits.
4625 */ 3455 */
4626 mask = 0x01 << (count - 1); 3456 mask = 0x01 << (count - 1);
4627 eecd = er32(EECD); 3457 eecd = er32(EECD);
4628 if (eeprom->type == e1000_eeprom_microwire) { 3458 if (eeprom->type == e1000_eeprom_microwire) {
4629 eecd &= ~E1000_EECD_DO; 3459 eecd &= ~E1000_EECD_DO;
4630 } else if (eeprom->type == e1000_eeprom_spi) { 3460 } else if (eeprom->type == e1000_eeprom_spi) {
4631 eecd |= E1000_EECD_DO; 3461 eecd |= E1000_EECD_DO;
4632 } 3462 }
4633 do { 3463 do {
4634 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", 3464 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
4635 * and then raising and then lowering the clock (the SK bit controls 3465 * and then raising and then lowering the clock (the SK bit controls
4636 * the clock input to the EEPROM). A "0" is shifted out to the EEPROM 3466 * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
4637 * by setting "DI" to "0" and then raising and then lowering the clock. 3467 * by setting "DI" to "0" and then raising and then lowering the clock.
4638 */ 3468 */
4639 eecd &= ~E1000_EECD_DI; 3469 eecd &= ~E1000_EECD_DI;
4640 3470
4641 if (data & mask) 3471 if (data & mask)
4642 eecd |= E1000_EECD_DI; 3472 eecd |= E1000_EECD_DI;
4643 3473
4644 ew32(EECD, eecd); 3474 ew32(EECD, eecd);
4645 E1000_WRITE_FLUSH(); 3475 E1000_WRITE_FLUSH();
4646 3476
4647 udelay(eeprom->delay_usec); 3477 udelay(eeprom->delay_usec);
4648 3478
4649 e1000_raise_ee_clk(hw, &eecd); 3479 e1000_raise_ee_clk(hw, &eecd);
4650 e1000_lower_ee_clk(hw, &eecd); 3480 e1000_lower_ee_clk(hw, &eecd);
4651 3481
4652 mask = mask >> 1; 3482 mask = mask >> 1;
4653 3483
4654 } while (mask); 3484 } while (mask);
4655 3485
4656 /* We leave the "DI" bit set to "0" when we leave this routine. */ 3486 /* We leave the "DI" bit set to "0" when we leave this routine. */
4657 eecd &= ~E1000_EECD_DI; 3487 eecd &= ~E1000_EECD_DI;
4658 ew32(EECD, eecd); 3488 ew32(EECD, eecd);
4659} 3489}
4660 3490
4661/****************************************************************************** 3491/**
4662 * Shift data bits in from the EEPROM 3492 * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM
4663 * 3493 * @hw: Struct containing variables accessed by shared code
4664 * hw - Struct containing variables accessed by shared code 3494 * @count: number of bits to shift in
4665 *****************************************************************************/ 3495 */
4666static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) 3496static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
4667{ 3497{
4668 u32 eecd; 3498 u32 eecd;
4669 u32 i; 3499 u32 i;
4670 u16 data; 3500 u16 data;
4671 3501
4672 /* In order to read a register from the EEPROM, we need to shift 'count' 3502 /* In order to read a register from the EEPROM, we need to shift 'count'
4673 * bits in from the EEPROM. Bits are "shifted in" by raising the clock 3503 * bits in from the EEPROM. Bits are "shifted in" by raising the clock
4674 * input to the EEPROM (setting the SK bit), and then reading the value of 3504 * input to the EEPROM (setting the SK bit), and then reading the value of
4675 * the "DO" bit. During this "shifting in" process the "DI" bit should 3505 * the "DO" bit. During this "shifting in" process the "DI" bit should
4676 * always be clear. 3506 * always be clear.
4677 */ 3507 */
4678 3508
4679 eecd = er32(EECD); 3509 eecd = er32(EECD);
4680 3510
4681 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); 3511 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
4682 data = 0; 3512 data = 0;
4683 3513
4684 for (i = 0; i < count; i++) { 3514 for (i = 0; i < count; i++) {
4685 data = data << 1; 3515 data = data << 1;
4686 e1000_raise_ee_clk(hw, &eecd); 3516 e1000_raise_ee_clk(hw, &eecd);
4687 3517
4688 eecd = er32(EECD); 3518 eecd = er32(EECD);
4689 3519
4690 eecd &= ~(E1000_EECD_DI); 3520 eecd &= ~(E1000_EECD_DI);
4691 if (eecd & E1000_EECD_DO) 3521 if (eecd & E1000_EECD_DO)
4692 data |= 1; 3522 data |= 1;
4693 3523
4694 e1000_lower_ee_clk(hw, &eecd); 3524 e1000_lower_ee_clk(hw, &eecd);
4695 } 3525 }
4696 3526
4697 return data; 3527 return data;
4698} 3528}
4699 3529
4700/****************************************************************************** 3530/**
4701 * Prepares EEPROM for access 3531 * e1000_acquire_eeprom - Prepares EEPROM for access
4702 * 3532 * @hw: Struct containing variables accessed by shared code
4703 * hw - Struct containing variables accessed by shared code
4704 * 3533 *
4705 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This 3534 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
4706 * function should be called before issuing a command to the EEPROM. 3535 * function should be called before issuing a command to the EEPROM.
4707 *****************************************************************************/ 3536 */
4708static s32 e1000_acquire_eeprom(struct e1000_hw *hw) 3537static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
4709{ 3538{
4710 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3539 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4711 u32 eecd, i=0; 3540 u32 eecd, i = 0;
4712 3541
4713 DEBUGFUNC("e1000_acquire_eeprom"); 3542 DEBUGFUNC("e1000_acquire_eeprom");
4714 3543
4715 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) 3544 eecd = er32(EECD);
4716 return -E1000_ERR_SWFW_SYNC; 3545
4717 eecd = er32(EECD); 3546 /* Request EEPROM Access */
4718 3547 if (hw->mac_type > e1000_82544) {
4719 if (hw->mac_type != e1000_82573) { 3548 eecd |= E1000_EECD_REQ;
4720 /* Request EEPROM Access */ 3549 ew32(EECD, eecd);
4721 if (hw->mac_type > e1000_82544) { 3550 eecd = er32(EECD);
4722 eecd |= E1000_EECD_REQ; 3551 while ((!(eecd & E1000_EECD_GNT)) &&
4723 ew32(EECD, eecd); 3552 (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
4724 eecd = er32(EECD); 3553 i++;
4725 while ((!(eecd & E1000_EECD_GNT)) && 3554 udelay(5);
4726 (i < E1000_EEPROM_GRANT_ATTEMPTS)) { 3555 eecd = er32(EECD);
4727 i++; 3556 }
4728 udelay(5); 3557 if (!(eecd & E1000_EECD_GNT)) {
4729 eecd = er32(EECD); 3558 eecd &= ~E1000_EECD_REQ;
4730 } 3559 ew32(EECD, eecd);
4731 if (!(eecd & E1000_EECD_GNT)) { 3560 DEBUGOUT("Could not acquire EEPROM grant\n");
4732 eecd &= ~E1000_EECD_REQ; 3561 return -E1000_ERR_EEPROM;
4733 ew32(EECD, eecd); 3562 }
4734 DEBUGOUT("Could not acquire EEPROM grant\n"); 3563 }
4735 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); 3564
4736 return -E1000_ERR_EEPROM; 3565 /* Setup EEPROM for Read/Write */
4737 } 3566
4738 } 3567 if (eeprom->type == e1000_eeprom_microwire) {
4739 } 3568 /* Clear SK and DI */
4740 3569 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
4741 /* Setup EEPROM for Read/Write */ 3570 ew32(EECD, eecd);
4742 3571
4743 if (eeprom->type == e1000_eeprom_microwire) { 3572 /* Set CS */
4744 /* Clear SK and DI */ 3573 eecd |= E1000_EECD_CS;
4745 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); 3574 ew32(EECD, eecd);
4746 ew32(EECD, eecd); 3575 } else if (eeprom->type == e1000_eeprom_spi) {
4747 3576 /* Clear SK and CS */
4748 /* Set CS */ 3577 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
4749 eecd |= E1000_EECD_CS; 3578 ew32(EECD, eecd);
4750 ew32(EECD, eecd); 3579 udelay(1);
4751 } else if (eeprom->type == e1000_eeprom_spi) { 3580 }
4752 /* Clear SK and CS */ 3581
4753 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 3582 return E1000_SUCCESS;
4754 ew32(EECD, eecd);
4755 udelay(1);
4756 }
4757
4758 return E1000_SUCCESS;
4759} 3583}
4760 3584
4761/****************************************************************************** 3585/**
4762 * Returns EEPROM to a "standby" state 3586 * e1000_standby_eeprom - Returns EEPROM to a "standby" state
4763 * 3587 * @hw: Struct containing variables accessed by shared code
4764 * hw - Struct containing variables accessed by shared code 3588 */
4765 *****************************************************************************/
4766static void e1000_standby_eeprom(struct e1000_hw *hw) 3589static void e1000_standby_eeprom(struct e1000_hw *hw)
4767{ 3590{
4768 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3591 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4769 u32 eecd; 3592 u32 eecd;
4770 3593
4771 eecd = er32(EECD); 3594 eecd = er32(EECD);
4772 3595
4773 if (eeprom->type == e1000_eeprom_microwire) { 3596 if (eeprom->type == e1000_eeprom_microwire) {
4774 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 3597 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
4775 ew32(EECD, eecd); 3598 ew32(EECD, eecd);
4776 E1000_WRITE_FLUSH(); 3599 E1000_WRITE_FLUSH();
4777 udelay(eeprom->delay_usec); 3600 udelay(eeprom->delay_usec);
4778 3601
4779 /* Clock high */ 3602 /* Clock high */
4780 eecd |= E1000_EECD_SK; 3603 eecd |= E1000_EECD_SK;
4781 ew32(EECD, eecd); 3604 ew32(EECD, eecd);
4782 E1000_WRITE_FLUSH(); 3605 E1000_WRITE_FLUSH();
4783 udelay(eeprom->delay_usec); 3606 udelay(eeprom->delay_usec);
4784 3607
4785 /* Select EEPROM */ 3608 /* Select EEPROM */
4786 eecd |= E1000_EECD_CS; 3609 eecd |= E1000_EECD_CS;
4787 ew32(EECD, eecd); 3610 ew32(EECD, eecd);
4788 E1000_WRITE_FLUSH(); 3611 E1000_WRITE_FLUSH();
4789 udelay(eeprom->delay_usec); 3612 udelay(eeprom->delay_usec);
4790 3613
4791 /* Clock low */ 3614 /* Clock low */
4792 eecd &= ~E1000_EECD_SK; 3615 eecd &= ~E1000_EECD_SK;
4793 ew32(EECD, eecd); 3616 ew32(EECD, eecd);
4794 E1000_WRITE_FLUSH(); 3617 E1000_WRITE_FLUSH();
4795 udelay(eeprom->delay_usec); 3618 udelay(eeprom->delay_usec);
4796 } else if (eeprom->type == e1000_eeprom_spi) { 3619 } else if (eeprom->type == e1000_eeprom_spi) {
4797 /* Toggle CS to flush commands */ 3620 /* Toggle CS to flush commands */
4798 eecd |= E1000_EECD_CS; 3621 eecd |= E1000_EECD_CS;
4799 ew32(EECD, eecd); 3622 ew32(EECD, eecd);
4800 E1000_WRITE_FLUSH(); 3623 E1000_WRITE_FLUSH();
4801 udelay(eeprom->delay_usec); 3624 udelay(eeprom->delay_usec);
4802 eecd &= ~E1000_EECD_CS; 3625 eecd &= ~E1000_EECD_CS;
4803 ew32(EECD, eecd); 3626 ew32(EECD, eecd);
4804 E1000_WRITE_FLUSH(); 3627 E1000_WRITE_FLUSH();
4805 udelay(eeprom->delay_usec); 3628 udelay(eeprom->delay_usec);
4806 } 3629 }
4807} 3630}
4808 3631
4809/****************************************************************************** 3632/**
4810 * Terminates a command by inverting the EEPROM's chip select pin 3633 * e1000_release_eeprom - drop chip select
3634 * @hw: Struct containing variables accessed by shared code
4811 * 3635 *
4812 * hw - Struct containing variables accessed by shared code 3636 * Terminates a command by inverting the EEPROM's chip select pin
4813 *****************************************************************************/ 3637 */
4814static void e1000_release_eeprom(struct e1000_hw *hw) 3638static void e1000_release_eeprom(struct e1000_hw *hw)
4815{ 3639{
4816 u32 eecd; 3640 u32 eecd;
4817
4818 DEBUGFUNC("e1000_release_eeprom");
4819 3641
4820 eecd = er32(EECD); 3642 DEBUGFUNC("e1000_release_eeprom");
4821 3643
4822 if (hw->eeprom.type == e1000_eeprom_spi) { 3644 eecd = er32(EECD);
4823 eecd |= E1000_EECD_CS; /* Pull CS high */
4824 eecd &= ~E1000_EECD_SK; /* Lower SCK */
4825 3645
4826 ew32(EECD, eecd); 3646 if (hw->eeprom.type == e1000_eeprom_spi) {
3647 eecd |= E1000_EECD_CS; /* Pull CS high */
3648 eecd &= ~E1000_EECD_SK; /* Lower SCK */
4827 3649
4828 udelay(hw->eeprom.delay_usec); 3650 ew32(EECD, eecd);
4829 } else if (hw->eeprom.type == e1000_eeprom_microwire) {
4830 /* cleanup eeprom */
4831 3651
4832 /* CS on Microwire is active-high */ 3652 udelay(hw->eeprom.delay_usec);
4833 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); 3653 } else if (hw->eeprom.type == e1000_eeprom_microwire) {
3654 /* cleanup eeprom */
4834 3655
4835 ew32(EECD, eecd); 3656 /* CS on Microwire is active-high */
3657 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
4836 3658
4837 /* Rising edge of clock */ 3659 ew32(EECD, eecd);
4838 eecd |= E1000_EECD_SK;
4839 ew32(EECD, eecd);
4840 E1000_WRITE_FLUSH();
4841 udelay(hw->eeprom.delay_usec);
4842 3660
4843 /* Falling edge of clock */ 3661 /* Rising edge of clock */
4844 eecd &= ~E1000_EECD_SK; 3662 eecd |= E1000_EECD_SK;
4845 ew32(EECD, eecd); 3663 ew32(EECD, eecd);
4846 E1000_WRITE_FLUSH(); 3664 E1000_WRITE_FLUSH();
4847 udelay(hw->eeprom.delay_usec); 3665 udelay(hw->eeprom.delay_usec);
4848 }
4849 3666
4850 /* Stop requesting EEPROM access */ 3667 /* Falling edge of clock */
4851 if (hw->mac_type > e1000_82544) { 3668 eecd &= ~E1000_EECD_SK;
4852 eecd &= ~E1000_EECD_REQ; 3669 ew32(EECD, eecd);
4853 ew32(EECD, eecd); 3670 E1000_WRITE_FLUSH();
4854 } 3671 udelay(hw->eeprom.delay_usec);
3672 }
4855 3673
4856 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); 3674 /* Stop requesting EEPROM access */
3675 if (hw->mac_type > e1000_82544) {
3676 eecd &= ~E1000_EECD_REQ;
3677 ew32(EECD, eecd);
3678 }
4857} 3679}
4858 3680
4859/****************************************************************************** 3681/**
4860 * Reads a 16 bit word from the EEPROM. 3682 * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM.
4861 * 3683 * @hw: Struct containing variables accessed by shared code
4862 * hw - Struct containing variables accessed by shared code 3684 */
4863 *****************************************************************************/
4864static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) 3685static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
4865{ 3686{
4866 u16 retry_count = 0; 3687 u16 retry_count = 0;
4867 u8 spi_stat_reg; 3688 u8 spi_stat_reg;
4868
4869 DEBUGFUNC("e1000_spi_eeprom_ready");
4870
4871 /* Read "Status Register" repeatedly until the LSB is cleared. The
4872 * EEPROM will signal that the command has been completed by clearing
4873 * bit 0 of the internal status register. If it's not cleared within
4874 * 5 milliseconds, then error out.
4875 */
4876 retry_count = 0;
4877 do {
4878 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
4879 hw->eeprom.opcode_bits);
4880 spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
4881 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
4882 break;
4883
4884 udelay(5);
4885 retry_count += 5;
4886
4887 e1000_standby_eeprom(hw);
4888 } while (retry_count < EEPROM_MAX_RETRY_SPI);
4889
4890 /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
4891 * only 0-5mSec on 5V devices)
4892 */
4893 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
4894 DEBUGOUT("SPI EEPROM Status error\n");
4895 return -E1000_ERR_EEPROM;
4896 }
4897
4898 return E1000_SUCCESS;
4899}
4900
4901/******************************************************************************
4902 * Reads a 16 bit word from the EEPROM.
4903 *
4904 * hw - Struct containing variables accessed by shared code
4905 * offset - offset of word in the EEPROM to read
4906 * data - word read from the EEPROM
4907 * words - number of words to read
4908 *****************************************************************************/
4909s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
4910{
4911 s32 ret;
4912 spin_lock(&e1000_eeprom_lock);
4913 ret = e1000_do_read_eeprom(hw, offset, words, data);
4914 spin_unlock(&e1000_eeprom_lock);
4915 return ret;
4916}
4917
4918static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
4919{
4920 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4921 u32 i = 0;
4922
4923 DEBUGFUNC("e1000_read_eeprom");
4924
4925 /* If eeprom is not yet detected, do so now */
4926 if (eeprom->word_size == 0)
4927 e1000_init_eeprom_params(hw);
4928
4929 /* A check for invalid values: offset too large, too many words, and not
4930 * enough words.
4931 */
4932 if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
4933 (words == 0)) {
4934 DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
4935 return -E1000_ERR_EEPROM;
4936 }
4937
4938 /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
4939 * directly. In this case, we need to acquire the EEPROM so that
4940 * FW or other port software does not interrupt.
4941 */
4942 if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
4943 /* Prepare the EEPROM for bit-bang reading */
4944 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
4945 return -E1000_ERR_EEPROM;
4946 }
4947
4948 /* Eerd register EEPROM access requires no eeprom aquire/release */
4949 if (eeprom->use_eerd)
4950 return e1000_read_eeprom_eerd(hw, offset, words, data);
4951
4952 /* ICH EEPROM access is done via the ICH flash controller */
4953 if (eeprom->type == e1000_eeprom_ich8)
4954 return e1000_read_eeprom_ich8(hw, offset, words, data);
4955
4956 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
4957 * acquired the EEPROM at this point, so any returns should relase it */
4958 if (eeprom->type == e1000_eeprom_spi) {
4959 u16 word_in;
4960 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
4961
4962 if (e1000_spi_eeprom_ready(hw)) {
4963 e1000_release_eeprom(hw);
4964 return -E1000_ERR_EEPROM;
4965 }
4966
4967 e1000_standby_eeprom(hw);
4968
4969 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
4970 if ((eeprom->address_bits == 8) && (offset >= 128))
4971 read_opcode |= EEPROM_A8_OPCODE_SPI;
4972
4973 /* Send the READ command (opcode + addr) */
4974 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
4975 e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
4976
4977 /* Read the data. The address of the eeprom internally increments with
4978 * each byte (spi) being read, saving on the overhead of eeprom setup
4979 * and tear-down. The address counter will roll over if reading beyond
4980 * the size of the eeprom, thus allowing the entire memory to be read
4981 * starting from any offset. */
4982 for (i = 0; i < words; i++) {
4983 word_in = e1000_shift_in_ee_bits(hw, 16);
4984 data[i] = (word_in >> 8) | (word_in << 8);
4985 }
4986 } else if (eeprom->type == e1000_eeprom_microwire) {
4987 for (i = 0; i < words; i++) {
4988 /* Send the READ command (opcode + addr) */
4989 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
4990 eeprom->opcode_bits);
4991 e1000_shift_out_ee_bits(hw, (u16)(offset + i),
4992 eeprom->address_bits);
4993
4994 /* Read the data. For microwire, each word requires the overhead
4995 * of eeprom setup and tear-down. */
4996 data[i] = e1000_shift_in_ee_bits(hw, 16);
4997 e1000_standby_eeprom(hw);
4998 }
4999 }
5000
5001 /* End this read operation */
5002 e1000_release_eeprom(hw);
5003
5004 return E1000_SUCCESS;
5005}
5006 3689
5007/****************************************************************************** 3690 DEBUGFUNC("e1000_spi_eeprom_ready");
5008 * Reads a 16 bit word from the EEPROM using the EERD register.
5009 *
5010 * hw - Struct containing variables accessed by shared code
5011 * offset - offset of word in the EEPROM to read
5012 * data - word read from the EEPROM
5013 * words - number of words to read
5014 *****************************************************************************/
5015static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
5016 u16 *data)
5017{
5018 u32 i, eerd = 0;
5019 s32 error = 0;
5020 3691
5021 for (i = 0; i < words; i++) { 3692 /* Read "Status Register" repeatedly until the LSB is cleared. The
5022 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + 3693 * EEPROM will signal that the command has been completed by clearing
5023 E1000_EEPROM_RW_REG_START; 3694 * bit 0 of the internal status register. If it's not cleared within
3695 * 5 milliseconds, then error out.
3696 */
3697 retry_count = 0;
3698 do {
3699 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
3700 hw->eeprom.opcode_bits);
3701 spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
3702 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
3703 break;
5024 3704
5025 ew32(EERD, eerd); 3705 udelay(5);
5026 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); 3706 retry_count += 5;
5027 3707
5028 if (error) { 3708 e1000_standby_eeprom(hw);
5029 break; 3709 } while (retry_count < EEPROM_MAX_RETRY_SPI);
5030 }
5031 data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
5032 3710
5033 } 3711 /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
3712 * only 0-5mSec on 5V devices)
3713 */
3714 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
3715 DEBUGOUT("SPI EEPROM Status error\n");
3716 return -E1000_ERR_EEPROM;
3717 }
5034 3718
5035 return error; 3719 return E1000_SUCCESS;
5036} 3720}
5037 3721
5038/****************************************************************************** 3722/**
5039 * Writes a 16 bit word from the EEPROM using the EEWR register. 3723 * e1000_read_eeprom - Reads a 16 bit word from the EEPROM.
5040 * 3724 * @hw: Struct containing variables accessed by shared code
5041 * hw - Struct containing variables accessed by shared code 3725 * @offset: offset of word in the EEPROM to read
5042 * offset - offset of word in the EEPROM to read 3726 * @data: word read from the EEPROM
5043 * data - word read from the EEPROM 3727 * @words: number of words to read
5044 * words - number of words to read 3728 */
5045 *****************************************************************************/ 3729s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
5046static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
5047 u16 *data)
5048{ 3730{
5049 u32 register_value = 0; 3731 s32 ret;
5050 u32 i = 0; 3732 spin_lock(&e1000_eeprom_lock);
5051 s32 error = 0; 3733 ret = e1000_do_read_eeprom(hw, offset, words, data);
5052 3734 spin_unlock(&e1000_eeprom_lock);
5053 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) 3735 return ret;
5054 return -E1000_ERR_SWFW_SYNC;
5055
5056 for (i = 0; i < words; i++) {
5057 register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
5058 ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
5059 E1000_EEPROM_RW_REG_START;
5060
5061 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
5062 if (error) {
5063 break;
5064 }
5065
5066 ew32(EEWR, register_value);
5067
5068 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
5069
5070 if (error) {
5071 break;
5072 }
5073 }
5074
5075 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
5076 return error;
5077} 3736}
5078 3737
5079/****************************************************************************** 3738static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
5080 * Polls the status bit (bit 1) of the EERD to determine when the read is done. 3739 u16 *data)
5081 *
5082 * hw - Struct containing variables accessed by shared code
5083 *****************************************************************************/
5084static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5085{ 3740{
5086 u32 attempts = 100000; 3741 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5087 u32 i, reg = 0; 3742 u32 i = 0;
5088 s32 done = E1000_ERR_EEPROM;
5089
5090 for (i = 0; i < attempts; i++) {
5091 if (eerd == E1000_EEPROM_POLL_READ)
5092 reg = er32(EERD);
5093 else
5094 reg = er32(EEWR);
5095
5096 if (reg & E1000_EEPROM_RW_REG_DONE) {
5097 done = E1000_SUCCESS;
5098 break;
5099 }
5100 udelay(5);
5101 }
5102
5103 return done;
5104}
5105 3743
5106/*************************************************************************** 3744 DEBUGFUNC("e1000_read_eeprom");
5107* Description: Determines if the onboard NVM is FLASH or EEPROM.
5108*
5109* hw - Struct containing variables accessed by shared code
5110****************************************************************************/
5111static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5112{
5113 u32 eecd = 0;
5114 3745
5115 DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); 3746 /* If eeprom is not yet detected, do so now */
3747 if (eeprom->word_size == 0)
3748 e1000_init_eeprom_params(hw);
3749
3750 /* A check for invalid values: offset too large, too many words, and not
3751 * enough words.
3752 */
3753 if ((offset >= eeprom->word_size)
3754 || (words > eeprom->word_size - offset) || (words == 0)) {
3755 DEBUGOUT2
3756 ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
3757 offset, eeprom->word_size);
3758 return -E1000_ERR_EEPROM;
3759 }
5116 3760
5117 if (hw->mac_type == e1000_ich8lan) 3761 /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
5118 return false; 3762 * directly. In this case, we need to acquire the EEPROM so that
3763 * FW or other port software does not interrupt.
3764 */
3765 /* Prepare the EEPROM for bit-bang reading */
3766 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3767 return -E1000_ERR_EEPROM;
3768
3769 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
3770 * acquired the EEPROM at this point, so any returns should release it */
3771 if (eeprom->type == e1000_eeprom_spi) {
3772 u16 word_in;
3773 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
3774
3775 if (e1000_spi_eeprom_ready(hw)) {
3776 e1000_release_eeprom(hw);
3777 return -E1000_ERR_EEPROM;
3778 }
5119 3779
5120 if (hw->mac_type == e1000_82573) { 3780 e1000_standby_eeprom(hw);
5121 eecd = er32(EECD); 3781
3782 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
3783 if ((eeprom->address_bits == 8) && (offset >= 128))
3784 read_opcode |= EEPROM_A8_OPCODE_SPI;
3785
3786 /* Send the READ command (opcode + addr) */
3787 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
3788 e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
3789 eeprom->address_bits);
3790
3791 /* Read the data. The address of the eeprom internally increments with
3792 * each byte (spi) being read, saving on the overhead of eeprom setup
3793 * and tear-down. The address counter will roll over if reading beyond
3794 * the size of the eeprom, thus allowing the entire memory to be read
3795 * starting from any offset. */
3796 for (i = 0; i < words; i++) {
3797 word_in = e1000_shift_in_ee_bits(hw, 16);
3798 data[i] = (word_in >> 8) | (word_in << 8);
3799 }
3800 } else if (eeprom->type == e1000_eeprom_microwire) {
3801 for (i = 0; i < words; i++) {
3802 /* Send the READ command (opcode + addr) */
3803 e1000_shift_out_ee_bits(hw,
3804 EEPROM_READ_OPCODE_MICROWIRE,
3805 eeprom->opcode_bits);
3806 e1000_shift_out_ee_bits(hw, (u16) (offset + i),
3807 eeprom->address_bits);
3808
3809 /* Read the data. For microwire, each word requires the overhead
3810 * of eeprom setup and tear-down. */
3811 data[i] = e1000_shift_in_ee_bits(hw, 16);
3812 e1000_standby_eeprom(hw);
3813 }
3814 }
5122 3815
5123 /* Isolate bits 15 & 16 */ 3816 /* End this read operation */
5124 eecd = ((eecd >> 15) & 0x03); 3817 e1000_release_eeprom(hw);
5125 3818
5126 /* If both bits are set, device is Flash type */ 3819 return E1000_SUCCESS;
5127 if (eecd == 0x03) {
5128 return false;
5129 }
5130 }
5131 return true;
5132} 3820}
5133 3821
5134/****************************************************************************** 3822/**
5135 * Verifies that the EEPROM has a valid checksum 3823 * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum
5136 * 3824 * @hw: Struct containing variables accessed by shared code
5137 * hw - Struct containing variables accessed by shared code
5138 * 3825 *
5139 * Reads the first 64 16 bit words of the EEPROM and sums the values read. 3826 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
5140 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 3827 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
5141 * valid. 3828 * valid.
5142 *****************************************************************************/ 3829 */
5143s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) 3830s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5144{ 3831{
5145 u16 checksum = 0; 3832 u16 checksum = 0;
5146 u16 i, eeprom_data; 3833 u16 i, eeprom_data;
5147 3834
5148 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3835 DEBUGFUNC("e1000_validate_eeprom_checksum");
5149 3836
5150 if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) { 3837 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
5151 /* Check bit 4 of word 10h. If it is 0, firmware is done updating 3838 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5152 * 10h-12h. Checksum may need to be fixed. */ 3839 DEBUGOUT("EEPROM Read Error\n");
5153 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); 3840 return -E1000_ERR_EEPROM;
5154 if ((eeprom_data & 0x10) == 0) { 3841 }
5155 /* Read 0x23 and check bit 15. This bit is a 1 when the checksum 3842 checksum += eeprom_data;
5156 * has already been fixed. If the checksum is still wrong and this 3843 }
5157 * bit is a 1, we need to return bad checksum. Otherwise, we need 3844
5158 * to set this bit to a 1 and update the checksum. */ 3845 if (checksum == (u16) EEPROM_SUM)
5159 e1000_read_eeprom(hw, 0x23, 1, &eeprom_data); 3846 return E1000_SUCCESS;
5160 if ((eeprom_data & 0x8000) == 0) { 3847 else {
5161 eeprom_data |= 0x8000; 3848 DEBUGOUT("EEPROM Checksum Invalid\n");
5162 e1000_write_eeprom(hw, 0x23, 1, &eeprom_data); 3849 return -E1000_ERR_EEPROM;
5163 e1000_update_eeprom_checksum(hw); 3850 }
5164 }
5165 }
5166 }
5167
5168 if (hw->mac_type == e1000_ich8lan) {
5169 /* Drivers must allocate the shadow ram structure for the
5170 * EEPROM checksum to be updated. Otherwise, this bit as well
5171 * as the checksum must both be set correctly for this
5172 * validation to pass.
5173 */
5174 e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
5175 if ((eeprom_data & 0x40) == 0) {
5176 eeprom_data |= 0x40;
5177 e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
5178 e1000_update_eeprom_checksum(hw);
5179 }
5180 }
5181
5182 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
5183 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5184 DEBUGOUT("EEPROM Read Error\n");
5185 return -E1000_ERR_EEPROM;
5186 }
5187 checksum += eeprom_data;
5188 }
5189
5190 if (checksum == (u16)EEPROM_SUM)
5191 return E1000_SUCCESS;
5192 else {
5193 DEBUGOUT("EEPROM Checksum Invalid\n");
5194 return -E1000_ERR_EEPROM;
5195 }
5196} 3851}
5197 3852
5198/****************************************************************************** 3853/**
5199 * Calculates the EEPROM checksum and writes it to the EEPROM 3854 * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum
5200 * 3855 * @hw: Struct containing variables accessed by shared code
5201 * hw - Struct containing variables accessed by shared code
5202 * 3856 *
5203 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. 3857 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
5204 * Writes the difference to word offset 63 of the EEPROM. 3858 * Writes the difference to word offset 63 of the EEPROM.
5205 *****************************************************************************/ 3859 */
5206s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) 3860s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
5207{ 3861{
5208 u32 ctrl_ext; 3862 u16 checksum = 0;
5209 u16 checksum = 0; 3863 u16 i, eeprom_data;
5210 u16 i, eeprom_data; 3864
5211 3865 DEBUGFUNC("e1000_update_eeprom_checksum");
5212 DEBUGFUNC("e1000_update_eeprom_checksum"); 3866
5213 3867 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
5214 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 3868 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5215 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3869 DEBUGOUT("EEPROM Read Error\n");
5216 DEBUGOUT("EEPROM Read Error\n"); 3870 return -E1000_ERR_EEPROM;
5217 return -E1000_ERR_EEPROM; 3871 }
5218 } 3872 checksum += eeprom_data;
5219 checksum += eeprom_data; 3873 }
5220 } 3874 checksum = (u16) EEPROM_SUM - checksum;
5221 checksum = (u16)EEPROM_SUM - checksum; 3875 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
5222 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 3876 DEBUGOUT("EEPROM Write Error\n");
5223 DEBUGOUT("EEPROM Write Error\n"); 3877 return -E1000_ERR_EEPROM;
5224 return -E1000_ERR_EEPROM; 3878 }
5225 } else if (hw->eeprom.type == e1000_eeprom_flash) { 3879 return E1000_SUCCESS;
5226 e1000_commit_shadow_ram(hw);
5227 } else if (hw->eeprom.type == e1000_eeprom_ich8) {
5228 e1000_commit_shadow_ram(hw);
5229 /* Reload the EEPROM, or else modifications will not appear
5230 * until after next adapter reset. */
5231 ctrl_ext = er32(CTRL_EXT);
5232 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
5233 ew32(CTRL_EXT, ctrl_ext);
5234 msleep(10);
5235 }
5236 return E1000_SUCCESS;
5237} 3880}
5238 3881
5239/****************************************************************************** 3882/**
5240 * Parent function for writing words to the different EEPROM types. 3883 * e1000_write_eeprom - write words to the different EEPROM types.
5241 * 3884 * @hw: Struct containing variables accessed by shared code
5242 * hw - Struct containing variables accessed by shared code 3885 * @offset: offset within the EEPROM to be written to
5243 * offset - offset within the EEPROM to be written to 3886 * @words: number of words to write
5244 * words - number of words to write 3887 * @data: 16 bit word to be written to the EEPROM
5245 * data - 16 bit word to be written to the EEPROM
5246 * 3888 *
5247 * If e1000_update_eeprom_checksum is not called after this function, the 3889 * If e1000_update_eeprom_checksum is not called after this function, the
5248 * EEPROM will most likely contain an invalid checksum. 3890 * EEPROM will most likely contain an invalid checksum.
5249 *****************************************************************************/ 3891 */
5250s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 3892s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
5251{ 3893{
5252 s32 ret; 3894 s32 ret;
5253 spin_lock(&e1000_eeprom_lock); 3895 spin_lock(&e1000_eeprom_lock);
5254 ret = e1000_do_write_eeprom(hw, offset, words, data); 3896 ret = e1000_do_write_eeprom(hw, offset, words, data);
5255 spin_unlock(&e1000_eeprom_lock); 3897 spin_unlock(&e1000_eeprom_lock);
5256 return ret; 3898 return ret;
5257} 3899}
5258 3900
5259 3901static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
5260static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 3902 u16 *data)
5261{ 3903{
5262 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3904 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5263 s32 status = 0; 3905 s32 status = 0;
5264 3906
5265 DEBUGFUNC("e1000_write_eeprom"); 3907 DEBUGFUNC("e1000_write_eeprom");
5266 3908
5267 /* If eeprom is not yet detected, do so now */ 3909 /* If eeprom is not yet detected, do so now */
5268 if (eeprom->word_size == 0) 3910 if (eeprom->word_size == 0)
5269 e1000_init_eeprom_params(hw); 3911 e1000_init_eeprom_params(hw);
5270 3912
5271 /* A check for invalid values: offset too large, too many words, and not 3913 /* A check for invalid values: offset too large, too many words, and not
5272 * enough words. 3914 * enough words.
5273 */ 3915 */
5274 if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || 3916 if ((offset >= eeprom->word_size)
5275 (words == 0)) { 3917 || (words > eeprom->word_size - offset) || (words == 0)) {
5276 DEBUGOUT("\"words\" parameter out of bounds\n"); 3918 DEBUGOUT("\"words\" parameter out of bounds\n");
5277 return -E1000_ERR_EEPROM; 3919 return -E1000_ERR_EEPROM;
5278 } 3920 }
5279 3921
5280 /* 82573 writes only through eewr */ 3922 /* Prepare the EEPROM for writing */
5281 if (eeprom->use_eewr) 3923 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
5282 return e1000_write_eeprom_eewr(hw, offset, words, data); 3924 return -E1000_ERR_EEPROM;
5283 3925
5284 if (eeprom->type == e1000_eeprom_ich8) 3926 if (eeprom->type == e1000_eeprom_microwire) {
5285 return e1000_write_eeprom_ich8(hw, offset, words, data); 3927 status = e1000_write_eeprom_microwire(hw, offset, words, data);
5286 3928 } else {
5287 /* Prepare the EEPROM for writing */ 3929 status = e1000_write_eeprom_spi(hw, offset, words, data);
5288 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3930 msleep(10);
5289 return -E1000_ERR_EEPROM; 3931 }
5290 3932
5291 if (eeprom->type == e1000_eeprom_microwire) { 3933 /* Done with writing */
5292 status = e1000_write_eeprom_microwire(hw, offset, words, data); 3934 e1000_release_eeprom(hw);
5293 } else { 3935
5294 status = e1000_write_eeprom_spi(hw, offset, words, data); 3936 return status;
5295 msleep(10);
5296 }
5297
5298 /* Done with writing */
5299 e1000_release_eeprom(hw);
5300
5301 return status;
5302} 3937}
5303 3938
5304/****************************************************************************** 3939/**
5305 * Writes a 16 bit word to a given offset in an SPI EEPROM. 3940 * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM.
5306 * 3941 * @hw: Struct containing variables accessed by shared code
5307 * hw - Struct containing variables accessed by shared code 3942 * @offset: offset within the EEPROM to be written to
5308 * offset - offset within the EEPROM to be written to 3943 * @words: number of words to write
5309 * words - number of words to write 3944 * @data: pointer to array of 8 bit words to be written to the EEPROM
5310 * data - pointer to array of 8 bit words to be written to the EEPROM 3945 */
5311 *
5312 *****************************************************************************/
5313static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, 3946static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
5314 u16 *data) 3947 u16 *data)
5315{ 3948{
5316 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3949 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5317 u16 widx = 0; 3950 u16 widx = 0;
5318 3951
5319 DEBUGFUNC("e1000_write_eeprom_spi"); 3952 DEBUGFUNC("e1000_write_eeprom_spi");
5320 3953
5321 while (widx < words) { 3954 while (widx < words) {
5322 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; 3955 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
5323 3956
5324 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; 3957 if (e1000_spi_eeprom_ready(hw))
3958 return -E1000_ERR_EEPROM;
5325 3959
5326 e1000_standby_eeprom(hw); 3960 e1000_standby_eeprom(hw);
5327 3961
5328 /* Send the WRITE ENABLE command (8 bit opcode ) */ 3962 /* Send the WRITE ENABLE command (8 bit opcode ) */
5329 e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, 3963 e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
5330 eeprom->opcode_bits); 3964 eeprom->opcode_bits);
5331 3965
5332 e1000_standby_eeprom(hw); 3966 e1000_standby_eeprom(hw);
5333 3967
5334 /* Some SPI eeproms use the 8th address bit embedded in the opcode */ 3968 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
5335 if ((eeprom->address_bits == 8) && (offset >= 128)) 3969 if ((eeprom->address_bits == 8) && (offset >= 128))
5336 write_opcode |= EEPROM_A8_OPCODE_SPI; 3970 write_opcode |= EEPROM_A8_OPCODE_SPI;
5337 3971
5338 /* Send the Write command (8-bit opcode + addr) */ 3972 /* Send the Write command (8-bit opcode + addr) */
5339 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); 3973 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
5340 3974
5341 e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2), 3975 e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
5342 eeprom->address_bits); 3976 eeprom->address_bits);
5343 3977
5344 /* Send the data */ 3978 /* Send the data */
5345 3979
5346 /* Loop to allow for up to whole page write (32 bytes) of eeprom */ 3980 /* Loop to allow for up to whole page write (32 bytes) of eeprom */
5347 while (widx < words) { 3981 while (widx < words) {
5348 u16 word_out = data[widx]; 3982 u16 word_out = data[widx];
5349 word_out = (word_out >> 8) | (word_out << 8); 3983 word_out = (word_out >> 8) | (word_out << 8);
5350 e1000_shift_out_ee_bits(hw, word_out, 16); 3984 e1000_shift_out_ee_bits(hw, word_out, 16);
5351 widx++; 3985 widx++;
5352 3986
5353 /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE 3987 /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
5354 * operation, while the smaller eeproms are capable of an 8-byte 3988 * operation, while the smaller eeproms are capable of an 8-byte
5355 * PAGE WRITE operation. Break the inner loop to pass new address 3989 * PAGE WRITE operation. Break the inner loop to pass new address
5356 */ 3990 */
5357 if ((((offset + widx)*2) % eeprom->page_size) == 0) { 3991 if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
5358 e1000_standby_eeprom(hw); 3992 e1000_standby_eeprom(hw);
5359 break; 3993 break;
5360 } 3994 }
5361 } 3995 }
5362 } 3996 }
5363 3997
5364 return E1000_SUCCESS; 3998 return E1000_SUCCESS;
5365} 3999}
5366 4000
5367/****************************************************************************** 4001/**
5368 * Writes a 16 bit word to a given offset in a Microwire EEPROM. 4002 * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM.
5369 * 4003 * @hw: Struct containing variables accessed by shared code
5370 * hw - Struct containing variables accessed by shared code 4004 * @offset: offset within the EEPROM to be written to
5371 * offset - offset within the EEPROM to be written to 4005 * @words: number of words to write
5372 * words - number of words to write 4006 * @data: pointer to array of 8 bit words to be written to the EEPROM
5373 * data - pointer to array of 16 bit words to be written to the EEPROM 4007 */
5374 *
5375 *****************************************************************************/
5376static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, 4008static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
5377 u16 words, u16 *data) 4009 u16 words, u16 *data)
5378{ 4010{
5379 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4011 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5380 u32 eecd; 4012 u32 eecd;
5381 u16 words_written = 0; 4013 u16 words_written = 0;
5382 u16 i = 0; 4014 u16 i = 0;
5383
5384 DEBUGFUNC("e1000_write_eeprom_microwire");
5385
5386 /* Send the write enable command to the EEPROM (3-bit opcode plus
5387 * 6/8-bit dummy address beginning with 11). It's less work to include
5388 * the 11 of the dummy address as part of the opcode than it is to shift
5389 * it over the correct number of bits for the address. This puts the
5390 * EEPROM into write/erase mode.
5391 */
5392 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
5393 (u16)(eeprom->opcode_bits + 2));
5394
5395 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5396
5397 /* Prepare the EEPROM */
5398 e1000_standby_eeprom(hw);
5399
5400 while (words_written < words) {
5401 /* Send the Write command (3-bit opcode + addr) */
5402 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
5403 eeprom->opcode_bits);
5404
5405 e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
5406 eeprom->address_bits);
5407
5408 /* Send the data */
5409 e1000_shift_out_ee_bits(hw, data[words_written], 16);
5410
5411 /* Toggle the CS line. This in effect tells the EEPROM to execute
5412 * the previous command.
5413 */
5414 e1000_standby_eeprom(hw);
5415
5416 /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
5417 * signal that the command has been completed by raising the DO signal.
5418 * If DO does not go high in 10 milliseconds, then error out.
5419 */
5420 for (i = 0; i < 200; i++) {
5421 eecd = er32(EECD);
5422 if (eecd & E1000_EECD_DO) break;
5423 udelay(50);
5424 }
5425 if (i == 200) {
5426 DEBUGOUT("EEPROM Write did not complete\n");
5427 return -E1000_ERR_EEPROM;
5428 }
5429
5430 /* Recover from write */
5431 e1000_standby_eeprom(hw);
5432
5433 words_written++;
5434 }
5435
5436 /* Send the write disable command to the EEPROM (3-bit opcode plus
5437 * 6/8-bit dummy address beginning with 10). It's less work to include
5438 * the 10 of the dummy address as part of the opcode than it is to shift
5439 * it over the correct number of bits for the address. This takes the
5440 * EEPROM out of write/erase mode.
5441 */
5442 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
5443 (u16)(eeprom->opcode_bits + 2));
5444
5445 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5446
5447 return E1000_SUCCESS;
5448}
5449 4015
5450/****************************************************************************** 4016 DEBUGFUNC("e1000_write_eeprom_microwire");
5451 * Flushes the cached eeprom to NVM. This is done by saving the modified values 4017
5452 * in the eeprom cache and the non modified values in the currently active bank 4018 /* Send the write enable command to the EEPROM (3-bit opcode plus
5453 * to the new bank. 4019 * 6/8-bit dummy address beginning with 11). It's less work to include
5454 * 4020 * the 11 of the dummy address as part of the opcode than it is to shift
5455 * hw - Struct containing variables accessed by shared code 4021 * it over the correct number of bits for the address. This puts the
5456 * offset - offset of word in the EEPROM to read 4022 * EEPROM into write/erase mode.
5457 * data - word read from the EEPROM 4023 */
5458 * words - number of words to read 4024 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
5459 *****************************************************************************/ 4025 (u16) (eeprom->opcode_bits + 2));
5460static s32 e1000_commit_shadow_ram(struct e1000_hw *hw) 4026
5461{ 4027 e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
5462 u32 attempts = 100000; 4028
5463 u32 eecd = 0; 4029 /* Prepare the EEPROM */
5464 u32 flop = 0; 4030 e1000_standby_eeprom(hw);
5465 u32 i = 0; 4031
5466 s32 error = E1000_SUCCESS; 4032 while (words_written < words) {
5467 u32 old_bank_offset = 0; 4033 /* Send the Write command (3-bit opcode + addr) */
5468 u32 new_bank_offset = 0; 4034 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
5469 u8 low_byte = 0; 4035 eeprom->opcode_bits);
5470 u8 high_byte = 0; 4036
5471 bool sector_write_failed = false; 4037 e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
5472 4038 eeprom->address_bits);
5473 if (hw->mac_type == e1000_82573) { 4039
5474 /* The flop register will be used to determine if flash type is STM */ 4040 /* Send the data */
5475 flop = er32(FLOP); 4041 e1000_shift_out_ee_bits(hw, data[words_written], 16);
5476 for (i=0; i < attempts; i++) { 4042
5477 eecd = er32(EECD); 4043 /* Toggle the CS line. This in effect tells the EEPROM to execute
5478 if ((eecd & E1000_EECD_FLUPD) == 0) { 4044 * the previous command.
5479 break; 4045 */
5480 } 4046 e1000_standby_eeprom(hw);
5481 udelay(5); 4047
5482 } 4048 /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
5483 4049 * signal that the command has been completed by raising the DO signal.
5484 if (i == attempts) { 4050 * If DO does not go high in 10 milliseconds, then error out.
5485 return -E1000_ERR_EEPROM; 4051 */
5486 } 4052 for (i = 0; i < 200; i++) {
5487 4053 eecd = er32(EECD);
5488 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4054 if (eecd & E1000_EECD_DO)
5489 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4055 break;
5490 ew32(HICR, E1000_HICR_FW_RESET); 4056 udelay(50);
5491 } 4057 }
5492 4058 if (i == 200) {
5493 /* Perform the flash update */ 4059 DEBUGOUT("EEPROM Write did not complete\n");
5494 ew32(EECD, eecd | E1000_EECD_FLUPD); 4060 return -E1000_ERR_EEPROM;
5495 4061 }
5496 for (i=0; i < attempts; i++) { 4062
5497 eecd = er32(EECD); 4063 /* Recover from write */
5498 if ((eecd & E1000_EECD_FLUPD) == 0) { 4064 e1000_standby_eeprom(hw);
5499 break; 4065
5500 } 4066 words_written++;
5501 udelay(5); 4067 }
5502 } 4068
5503 4069 /* Send the write disable command to the EEPROM (3-bit opcode plus
5504 if (i == attempts) { 4070 * 6/8-bit dummy address beginning with 10). It's less work to include
5505 return -E1000_ERR_EEPROM; 4071 * the 10 of the dummy address as part of the opcode than it is to shift
5506 } 4072 * it over the correct number of bits for the address. This takes the
5507 } 4073 * EEPROM out of write/erase mode.
5508 4074 */
5509 if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) { 4075 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
5510 /* We're writing to the opposite bank so if we're on bank 1, 4076 (u16) (eeprom->opcode_bits + 2));
5511 * write to bank 0 etc. We also need to erase the segment that 4077
5512 * is going to be written */ 4078 e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
5513 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 4079
5514 new_bank_offset = hw->flash_bank_size * 2; 4080 return E1000_SUCCESS;
5515 old_bank_offset = 0;
5516 e1000_erase_ich8_4k_segment(hw, 1);
5517 } else {
5518 old_bank_offset = hw->flash_bank_size * 2;
5519 new_bank_offset = 0;
5520 e1000_erase_ich8_4k_segment(hw, 0);
5521 }
5522
5523 sector_write_failed = false;
5524 /* Loop for every byte in the shadow RAM,
5525 * which is in units of words. */
5526 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5527 /* Determine whether to write the value stored
5528 * in the other NVM bank or a modified value stored
5529 * in the shadow RAM */
5530 if (hw->eeprom_shadow_ram[i].modified) {
5531 low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
5532 udelay(100);
5533 error = e1000_verify_write_ich8_byte(hw,
5534 (i << 1) + new_bank_offset, low_byte);
5535
5536 if (error != E1000_SUCCESS)
5537 sector_write_failed = true;
5538 else {
5539 high_byte =
5540 (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
5541 udelay(100);
5542 }
5543 } else {
5544 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
5545 &low_byte);
5546 udelay(100);
5547 error = e1000_verify_write_ich8_byte(hw,
5548 (i << 1) + new_bank_offset, low_byte);
5549
5550 if (error != E1000_SUCCESS)
5551 sector_write_failed = true;
5552 else {
5553 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
5554 &high_byte);
5555 udelay(100);
5556 }
5557 }
5558
5559 /* If the write of the low byte was successful, go ahead and
5560 * write the high byte while checking to make sure that if it
5561 * is the signature byte, then it is handled properly */
5562 if (!sector_write_failed) {
5563 /* If the word is 0x13, then make sure the signature bits
5564 * (15:14) are 11b until the commit has completed.
5565 * This will allow us to write 10b which indicates the
5566 * signature is valid. We want to do this after the write
5567 * has completed so that we don't mark the segment valid
5568 * while the write is still in progress */
5569 if (i == E1000_ICH_NVM_SIG_WORD)
5570 high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
5571
5572 error = e1000_verify_write_ich8_byte(hw,
5573 (i << 1) + new_bank_offset + 1, high_byte);
5574 if (error != E1000_SUCCESS)
5575 sector_write_failed = true;
5576
5577 } else {
5578 /* If the write failed then break from the loop and
5579 * return an error */
5580 break;
5581 }
5582 }
5583
5584 /* Don't bother writing the segment valid bits if sector
5585 * programming failed. */
5586 if (!sector_write_failed) {
5587 /* Finally validate the new segment by setting bit 15:14
5588 * to 10b in word 0x13 , this can be done without an
5589 * erase as well since these bits are 11 to start with
5590 * and we need to change bit 14 to 0b */
5591 e1000_read_ich8_byte(hw,
5592 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
5593 &high_byte);
5594 high_byte &= 0xBF;
5595 error = e1000_verify_write_ich8_byte(hw,
5596 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
5597 /* And invalidate the previously valid segment by setting
5598 * its signature word (0x13) high_byte to 0b. This can be
5599 * done without an erase because flash erase sets all bits
5600 * to 1's. We can write 1's to 0's without an erase */
5601 if (error == E1000_SUCCESS) {
5602 error = e1000_verify_write_ich8_byte(hw,
5603 E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
5604 }
5605
5606 /* Clear the now not used entry in the cache */
5607 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5608 hw->eeprom_shadow_ram[i].modified = false;
5609 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
5610 }
5611 }
5612 }
5613
5614 return error;
5615} 4081}
5616 4082
5617/****************************************************************************** 4083/**
4084 * e1000_read_mac_addr - read the adapters MAC from eeprom
4085 * @hw: Struct containing variables accessed by shared code
4086 *
5618 * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the 4087 * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
5619 * second function of dual function devices 4088 * second function of dual function devices
5620 * 4089 */
5621 * hw - Struct containing variables accessed by shared code
5622 *****************************************************************************/
5623s32 e1000_read_mac_addr(struct e1000_hw *hw) 4090s32 e1000_read_mac_addr(struct e1000_hw *hw)
5624{ 4091{
5625 u16 offset; 4092 u16 offset;
5626 u16 eeprom_data, i; 4093 u16 eeprom_data, i;
5627 4094
5628 DEBUGFUNC("e1000_read_mac_addr"); 4095 DEBUGFUNC("e1000_read_mac_addr");
5629 4096
5630 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4097 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
5631 offset = i >> 1; 4098 offset = i >> 1;
5632 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { 4099 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
5633 DEBUGOUT("EEPROM Read Error\n"); 4100 DEBUGOUT("EEPROM Read Error\n");
5634 return -E1000_ERR_EEPROM; 4101 return -E1000_ERR_EEPROM;
5635 } 4102 }
5636 hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); 4103 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
5637 hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8); 4104 hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
5638 } 4105 }
5639 4106
5640 switch (hw->mac_type) { 4107 switch (hw->mac_type) {
5641 default: 4108 default:
5642 break; 4109 break;
5643 case e1000_82546: 4110 case e1000_82546:
5644 case e1000_82546_rev_3: 4111 case e1000_82546_rev_3:
5645 case e1000_82571: 4112 if (er32(STATUS) & E1000_STATUS_FUNC_1)
5646 case e1000_80003es2lan: 4113 hw->perm_mac_addr[5] ^= 0x01;
5647 if (er32(STATUS) & E1000_STATUS_FUNC_1) 4114 break;
5648 hw->perm_mac_addr[5] ^= 0x01; 4115 }
5649 break; 4116
5650 } 4117 for (i = 0; i < NODE_ADDRESS_SIZE; i++)
5651 4118 hw->mac_addr[i] = hw->perm_mac_addr[i];
5652 for (i = 0; i < NODE_ADDRESS_SIZE; i++) 4119 return E1000_SUCCESS;
5653 hw->mac_addr[i] = hw->perm_mac_addr[i];
5654 return E1000_SUCCESS;
5655} 4120}
5656 4121
5657/****************************************************************************** 4122/**
5658 * Initializes receive address filters. 4123 * e1000_init_rx_addrs - Initializes receive address filters.
5659 * 4124 * @hw: Struct containing variables accessed by shared code
5660 * hw - Struct containing variables accessed by shared code
5661 * 4125 *
5662 * Places the MAC address in receive address register 0 and clears the rest 4126 * Places the MAC address in receive address register 0 and clears the rest
5663 * of the receive addresss registers. Clears the multicast table. Assumes 4127 * of the receive address registers. Clears the multicast table. Assumes
5664 * the receiver is in reset when the routine is called. 4128 * the receiver is in reset when the routine is called.
5665 *****************************************************************************/ 4129 */
5666static void e1000_init_rx_addrs(struct e1000_hw *hw) 4130static void e1000_init_rx_addrs(struct e1000_hw *hw)
5667{ 4131{
5668 u32 i; 4132 u32 i;
5669 u32 rar_num; 4133 u32 rar_num;
5670 4134
5671 DEBUGFUNC("e1000_init_rx_addrs"); 4135 DEBUGFUNC("e1000_init_rx_addrs");
5672 4136
5673 /* Setup the receive address. */ 4137 /* Setup the receive address. */
5674 DEBUGOUT("Programming MAC Address into RAR[0]\n"); 4138 DEBUGOUT("Programming MAC Address into RAR[0]\n");
5675 4139
5676 e1000_rar_set(hw, hw->mac_addr, 0); 4140 e1000_rar_set(hw, hw->mac_addr, 0);
5677 4141
5678 rar_num = E1000_RAR_ENTRIES; 4142 rar_num = E1000_RAR_ENTRIES;
5679 4143
5680 /* Reserve a spot for the Locally Administered Address to work around 4144 /* Zero out the other 15 receive addresses. */
5681 * an 82571 issue in which a reset on one port will reload the MAC on 4145 DEBUGOUT("Clearing RAR[1-15]\n");
5682 * the other port. */ 4146 for (i = 1; i < rar_num; i++) {
5683 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present)) 4147 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5684 rar_num -= 1; 4148 E1000_WRITE_FLUSH();
5685 if (hw->mac_type == e1000_ich8lan) 4149 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
5686 rar_num = E1000_RAR_ENTRIES_ICH8LAN; 4150 E1000_WRITE_FLUSH();
5687 4151 }
5688 /* Zero out the other 15 receive addresses. */
5689 DEBUGOUT("Clearing RAR[1-15]\n");
5690 for (i = 1; i < rar_num; i++) {
5691 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5692 E1000_WRITE_FLUSH();
5693 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
5694 E1000_WRITE_FLUSH();
5695 }
5696} 4152}
5697 4153
5698/****************************************************************************** 4154/**
5699 * Hashes an address to determine its location in the multicast table 4155 * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table
5700 * 4156 * @hw: Struct containing variables accessed by shared code
5701 * hw - Struct containing variables accessed by shared code 4157 * @mc_addr: the multicast address to hash
5702 * mc_addr - the multicast address to hash 4158 */
5703 *****************************************************************************/
5704u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 4159u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
5705{ 4160{
5706 u32 hash_value = 0; 4161 u32 hash_value = 0;
5707 4162
5708 /* The portion of the address that is used for the hash table is 4163 /* The portion of the address that is used for the hash table is
5709 * determined by the mc_filter_type setting. 4164 * determined by the mc_filter_type setting.
5710 */ 4165 */
5711 switch (hw->mc_filter_type) { 4166 switch (hw->mc_filter_type) {
5712 /* [0] [1] [2] [3] [4] [5] 4167 /* [0] [1] [2] [3] [4] [5]
5713 * 01 AA 00 12 34 56 4168 * 01 AA 00 12 34 56
5714 * LSB MSB 4169 * LSB MSB
5715 */ 4170 */
5716 case 0: 4171 case 0:
5717 if (hw->mac_type == e1000_ich8lan) { 4172 /* [47:36] i.e. 0x563 for above example address */
5718 /* [47:38] i.e. 0x158 for above example address */ 4173 hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
5719 hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2)); 4174 break;
5720 } else { 4175 case 1:
5721 /* [47:36] i.e. 0x563 for above example address */ 4176 /* [46:35] i.e. 0xAC6 for above example address */
5722 hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 4177 hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
5723 } 4178 break;
5724 break; 4179 case 2:
5725 case 1: 4180 /* [45:34] i.e. 0x5D8 for above example address */
5726 if (hw->mac_type == e1000_ich8lan) { 4181 hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
5727 /* [46:37] i.e. 0x2B1 for above example address */ 4182 break;
5728 hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3)); 4183 case 3:
5729 } else { 4184 /* [43:32] i.e. 0x634 for above example address */
5730 /* [46:35] i.e. 0xAC6 for above example address */ 4185 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
5731 hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 4186 break;
5732 } 4187 }
5733 break; 4188
5734 case 2: 4189 hash_value &= 0xFFF;
5735 if (hw->mac_type == e1000_ich8lan) { 4190 return hash_value;
5736 /*[45:36] i.e. 0x163 for above example address */
5737 hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
5738 } else {
5739 /* [45:34] i.e. 0x5D8 for above example address */
5740 hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
5741 }
5742 break;
5743 case 3:
5744 if (hw->mac_type == e1000_ich8lan) {
5745 /* [43:34] i.e. 0x18D for above example address */
5746 hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
5747 } else {
5748 /* [43:32] i.e. 0x634 for above example address */
5749 hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
5750 }
5751 break;
5752 }
5753
5754 hash_value &= 0xFFF;
5755 if (hw->mac_type == e1000_ich8lan)
5756 hash_value &= 0x3FF;
5757
5758 return hash_value;
5759} 4191}
5760 4192
5761/****************************************************************************** 4193/**
5762 * Puts an ethernet address into a receive address register. 4194 * e1000_rar_set - Puts an ethernet address into a receive address register.
5763 * 4195 * @hw: Struct containing variables accessed by shared code
5764 * hw - Struct containing variables accessed by shared code 4196 * @addr: Address to put into receive address register
5765 * addr - Address to put into receive address register 4197 * @index: Receive address register to write
5766 * index - Receive address register to write 4198 */
5767 *****************************************************************************/
5768void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 4199void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
5769{ 4200{
5770 u32 rar_low, rar_high; 4201 u32 rar_low, rar_high;
5771 4202
5772 /* HW expects these in little endian so we reverse the byte order 4203 /* HW expects these in little endian so we reverse the byte order
5773 * from network order (big endian) to little endian 4204 * from network order (big endian) to little endian
5774 */ 4205 */
5775 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | 4206 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5776 ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 4207 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5777 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 4208 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5778 4209
5779 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx 4210 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
5780 * unit hang. 4211 * unit hang.
5781 * 4212 *
5782 * Description: 4213 * Description:
5783 * If there are any Rx frames queued up or otherwise present in the HW 4214 * If there are any Rx frames queued up or otherwise present in the HW
5784 * before RSS is enabled, and then we enable RSS, the HW Rx unit will 4215 * before RSS is enabled, and then we enable RSS, the HW Rx unit will
5785 * hang. To work around this issue, we have to disable receives and 4216 * hang. To work around this issue, we have to disable receives and
5786 * flush out all Rx frames before we enable RSS. To do so, we modify we 4217 * flush out all Rx frames before we enable RSS. To do so, we modify we
5787 * redirect all Rx traffic to manageability and then reset the HW. 4218 * redirect all Rx traffic to manageability and then reset the HW.
5788 * This flushes away Rx frames, and (since the redirections to 4219 * This flushes away Rx frames, and (since the redirections to
5789 * manageability persists across resets) keeps new ones from coming in 4220 * manageability persists across resets) keeps new ones from coming in
5790 * while we work. Then, we clear the Address Valid AV bit for all MAC 4221 * while we work. Then, we clear the Address Valid AV bit for all MAC
5791 * addresses and undo the re-direction to manageability. 4222 * addresses and undo the re-direction to manageability.
5792 * Now, frames are coming in again, but the MAC won't accept them, so 4223 * Now, frames are coming in again, but the MAC won't accept them, so
5793 * far so good. We now proceed to initialize RSS (if necessary) and 4224 * far so good. We now proceed to initialize RSS (if necessary) and
5794 * configure the Rx unit. Last, we re-enable the AV bits and continue 4225 * configure the Rx unit. Last, we re-enable the AV bits and continue
5795 * on our merry way. 4226 * on our merry way.
5796 */ 4227 */
5797 switch (hw->mac_type) { 4228 switch (hw->mac_type) {
5798 case e1000_82571: 4229 default:
5799 case e1000_82572: 4230 /* Indicate to hardware the Address is Valid. */
5800 case e1000_80003es2lan: 4231 rar_high |= E1000_RAH_AV;
5801 if (hw->leave_av_bit_off) 4232 break;
5802 break; 4233 }
5803 default: 4234
5804 /* Indicate to hardware the Address is Valid. */ 4235 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
5805 rar_high |= E1000_RAH_AV; 4236 E1000_WRITE_FLUSH();
5806 break; 4237 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
5807 } 4238 E1000_WRITE_FLUSH();
5808
5809 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
5810 E1000_WRITE_FLUSH();
5811 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
5812 E1000_WRITE_FLUSH();
5813} 4239}
5814 4240
5815/****************************************************************************** 4241/**
5816 * Writes a value to the specified offset in the VLAN filter table. 4242 * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
5817 * 4243 * @hw: Struct containing variables accessed by shared code
5818 * hw - Struct containing variables accessed by shared code 4244 * @offset: Offset in VLAN filer table to write
5819 * offset - Offset in VLAN filer table to write 4245 * @value: Value to write into VLAN filter table
5820 * value - Value to write into VLAN filter table 4246 */
5821 *****************************************************************************/
5822void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 4247void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
5823{ 4248{
5824 u32 temp; 4249 u32 temp;
5825 4250
5826 if (hw->mac_type == e1000_ich8lan) 4251 if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
5827 return; 4252 temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
5828 4253 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5829 if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { 4254 E1000_WRITE_FLUSH();
5830 temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); 4255 E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
5831 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); 4256 E1000_WRITE_FLUSH();
5832 E1000_WRITE_FLUSH(); 4257 } else {
5833 E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); 4258 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5834 E1000_WRITE_FLUSH(); 4259 E1000_WRITE_FLUSH();
5835 } else { 4260 }
5836 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5837 E1000_WRITE_FLUSH();
5838 }
5839} 4261}
5840 4262
5841/****************************************************************************** 4263/**
5842 * Clears the VLAN filer table 4264 * e1000_clear_vfta - Clears the VLAN filer table
5843 * 4265 * @hw: Struct containing variables accessed by shared code
5844 * hw - Struct containing variables accessed by shared code 4266 */
5845 *****************************************************************************/
5846static void e1000_clear_vfta(struct e1000_hw *hw) 4267static void e1000_clear_vfta(struct e1000_hw *hw)
5847{ 4268{
5848 u32 offset; 4269 u32 offset;
5849 u32 vfta_value = 0; 4270 u32 vfta_value = 0;
5850 u32 vfta_offset = 0; 4271 u32 vfta_offset = 0;
5851 u32 vfta_bit_in_reg = 0; 4272 u32 vfta_bit_in_reg = 0;
5852 4273
5853 if (hw->mac_type == e1000_ich8lan) 4274 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
5854 return; 4275 /* If the offset we want to clear is the same offset of the
5855 4276 * manageability VLAN ID, then clear all bits except that of the
5856 if (hw->mac_type == e1000_82573) { 4277 * manageability unit */
5857 if (hw->mng_cookie.vlan_id != 0) { 4278 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
5858 /* The VFTA is a 4096b bit-field, each identifying a single VLAN 4279 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
5859 * ID. The following operations determine which 32b entry 4280 E1000_WRITE_FLUSH();
5860 * (i.e. offset) into the array we want to set the VLAN ID 4281 }
5861 * (i.e. bit) of the manageability unit. */
5862 vfta_offset = (hw->mng_cookie.vlan_id >>
5863 E1000_VFTA_ENTRY_SHIFT) &
5864 E1000_VFTA_ENTRY_MASK;
5865 vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
5866 E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
5867 }
5868 }
5869 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
5870 /* If the offset we want to clear is the same offset of the
5871 * manageability VLAN ID, then clear all bits except that of the
5872 * manageability unit */
5873 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
5874 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
5875 E1000_WRITE_FLUSH();
5876 }
5877} 4282}
5878 4283
5879static s32 e1000_id_led_init(struct e1000_hw *hw) 4284static s32 e1000_id_led_init(struct e1000_hw *hw)
5880{ 4285{
5881 u32 ledctl; 4286 u32 ledctl;
5882 const u32 ledctl_mask = 0x000000FF; 4287 const u32 ledctl_mask = 0x000000FF;
5883 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; 4288 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
5884 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 4289 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
5885 u16 eeprom_data, i, temp; 4290 u16 eeprom_data, i, temp;
5886 const u16 led_mask = 0x0F; 4291 const u16 led_mask = 0x0F;
5887 4292
5888 DEBUGFUNC("e1000_id_led_init"); 4293 DEBUGFUNC("e1000_id_led_init");
5889 4294
5890 if (hw->mac_type < e1000_82540) { 4295 if (hw->mac_type < e1000_82540) {
5891 /* Nothing to do */ 4296 /* Nothing to do */
5892 return E1000_SUCCESS; 4297 return E1000_SUCCESS;
5893 } 4298 }
5894 4299
5895 ledctl = er32(LEDCTL); 4300 ledctl = er32(LEDCTL);
5896 hw->ledctl_default = ledctl; 4301 hw->ledctl_default = ledctl;
5897 hw->ledctl_mode1 = hw->ledctl_default; 4302 hw->ledctl_mode1 = hw->ledctl_default;
5898 hw->ledctl_mode2 = hw->ledctl_default; 4303 hw->ledctl_mode2 = hw->ledctl_default;
5899 4304
5900 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { 4305 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
5901 DEBUGOUT("EEPROM Read Error\n"); 4306 DEBUGOUT("EEPROM Read Error\n");
5902 return -E1000_ERR_EEPROM; 4307 return -E1000_ERR_EEPROM;
5903 } 4308 }
5904 4309
5905 if ((hw->mac_type == e1000_82573) && 4310 if ((eeprom_data == ID_LED_RESERVED_0000) ||
5906 (eeprom_data == ID_LED_RESERVED_82573)) 4311 (eeprom_data == ID_LED_RESERVED_FFFF)) {
5907 eeprom_data = ID_LED_DEFAULT_82573; 4312 eeprom_data = ID_LED_DEFAULT;
5908 else if ((eeprom_data == ID_LED_RESERVED_0000) || 4313 }
5909 (eeprom_data == ID_LED_RESERVED_FFFF)) { 4314
5910 if (hw->mac_type == e1000_ich8lan) 4315 for (i = 0; i < 4; i++) {
5911 eeprom_data = ID_LED_DEFAULT_ICH8LAN; 4316 temp = (eeprom_data >> (i << 2)) & led_mask;
5912 else 4317 switch (temp) {
5913 eeprom_data = ID_LED_DEFAULT; 4318 case ID_LED_ON1_DEF2:
5914 } 4319 case ID_LED_ON1_ON2:
5915 4320 case ID_LED_ON1_OFF2:
5916 for (i = 0; i < 4; i++) { 4321 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
5917 temp = (eeprom_data >> (i << 2)) & led_mask; 4322 hw->ledctl_mode1 |= ledctl_on << (i << 3);
5918 switch (temp) { 4323 break;
5919 case ID_LED_ON1_DEF2: 4324 case ID_LED_OFF1_DEF2:
5920 case ID_LED_ON1_ON2: 4325 case ID_LED_OFF1_ON2:
5921 case ID_LED_ON1_OFF2: 4326 case ID_LED_OFF1_OFF2:
5922 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 4327 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
5923 hw->ledctl_mode1 |= ledctl_on << (i << 3); 4328 hw->ledctl_mode1 |= ledctl_off << (i << 3);
5924 break; 4329 break;
5925 case ID_LED_OFF1_DEF2: 4330 default:
5926 case ID_LED_OFF1_ON2: 4331 /* Do nothing */
5927 case ID_LED_OFF1_OFF2: 4332 break;
5928 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 4333 }
5929 hw->ledctl_mode1 |= ledctl_off << (i << 3); 4334 switch (temp) {
5930 break; 4335 case ID_LED_DEF1_ON2:
5931 default: 4336 case ID_LED_ON1_ON2:
5932 /* Do nothing */ 4337 case ID_LED_OFF1_ON2:
5933 break; 4338 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
5934 } 4339 hw->ledctl_mode2 |= ledctl_on << (i << 3);
5935 switch (temp) { 4340 break;
5936 case ID_LED_DEF1_ON2: 4341 case ID_LED_DEF1_OFF2:
5937 case ID_LED_ON1_ON2: 4342 case ID_LED_ON1_OFF2:
5938 case ID_LED_OFF1_ON2: 4343 case ID_LED_OFF1_OFF2:
5939 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 4344 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
5940 hw->ledctl_mode2 |= ledctl_on << (i << 3); 4345 hw->ledctl_mode2 |= ledctl_off << (i << 3);
5941 break; 4346 break;
5942 case ID_LED_DEF1_OFF2: 4347 default:
5943 case ID_LED_ON1_OFF2: 4348 /* Do nothing */
5944 case ID_LED_OFF1_OFF2: 4349 break;
5945 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 4350 }
5946 hw->ledctl_mode2 |= ledctl_off << (i << 3); 4351 }
5947 break; 4352 return E1000_SUCCESS;
5948 default:
5949 /* Do nothing */
5950 break;
5951 }
5952 }
5953 return E1000_SUCCESS;
5954} 4353}
5955 4354
5956/****************************************************************************** 4355/**
5957 * Prepares SW controlable LED for use and saves the current state of the LED. 4356 * e1000_setup_led
4357 * @hw: Struct containing variables accessed by shared code
5958 * 4358 *
5959 * hw - Struct containing variables accessed by shared code 4359 * Prepares SW controlable LED for use and saves the current state of the LED.
5960 *****************************************************************************/ 4360 */
5961s32 e1000_setup_led(struct e1000_hw *hw) 4361s32 e1000_setup_led(struct e1000_hw *hw)
5962{ 4362{
5963 u32 ledctl; 4363 u32 ledctl;
5964 s32 ret_val = E1000_SUCCESS; 4364 s32 ret_val = E1000_SUCCESS;
5965
5966 DEBUGFUNC("e1000_setup_led");
5967
5968 switch (hw->mac_type) {
5969 case e1000_82542_rev2_0:
5970 case e1000_82542_rev2_1:
5971 case e1000_82543:
5972 case e1000_82544:
5973 /* No setup necessary */
5974 break;
5975 case e1000_82541:
5976 case e1000_82547:
5977 case e1000_82541_rev_2:
5978 case e1000_82547_rev_2:
5979 /* Turn off PHY Smart Power Down (if enabled) */
5980 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
5981 &hw->phy_spd_default);
5982 if (ret_val)
5983 return ret_val;
5984 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
5985 (u16)(hw->phy_spd_default &
5986 ~IGP01E1000_GMII_SPD));
5987 if (ret_val)
5988 return ret_val;
5989 /* Fall Through */
5990 default:
5991 if (hw->media_type == e1000_media_type_fiber) {
5992 ledctl = er32(LEDCTL);
5993 /* Save current LEDCTL settings */
5994 hw->ledctl_default = ledctl;
5995 /* Turn off LED0 */
5996 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
5997 E1000_LEDCTL_LED0_BLINK |
5998 E1000_LEDCTL_LED0_MODE_MASK);
5999 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
6000 E1000_LEDCTL_LED0_MODE_SHIFT);
6001 ew32(LEDCTL, ledctl);
6002 } else if (hw->media_type == e1000_media_type_copper)
6003 ew32(LEDCTL, hw->ledctl_mode1);
6004 break;
6005 }
6006
6007 return E1000_SUCCESS;
6008}
6009 4365
4366 DEBUGFUNC("e1000_setup_led");
6010 4367
6011/****************************************************************************** 4368 switch (hw->mac_type) {
6012 * Used on 82571 and later Si that has LED blink bits. 4369 case e1000_82542_rev2_0:
6013 * Callers must use their own timer and should have already called 4370 case e1000_82542_rev2_1:
6014 * e1000_id_led_init() 4371 case e1000_82543:
6015 * Call e1000_cleanup led() to stop blinking 4372 case e1000_82544:
6016 * 4373 /* No setup necessary */
6017 * hw - Struct containing variables accessed by shared code 4374 break;
6018 *****************************************************************************/ 4375 case e1000_82541:
6019s32 e1000_blink_led_start(struct e1000_hw *hw) 4376 case e1000_82547:
6020{ 4377 case e1000_82541_rev_2:
6021 s16 i; 4378 case e1000_82547_rev_2:
6022 u32 ledctl_blink = 0; 4379 /* Turn off PHY Smart Power Down (if enabled) */
6023 4380 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
6024 DEBUGFUNC("e1000_id_led_blink_on"); 4381 &hw->phy_spd_default);
6025 4382 if (ret_val)
6026 if (hw->mac_type < e1000_82571) { 4383 return ret_val;
6027 /* Nothing to do */ 4384 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6028 return E1000_SUCCESS; 4385 (u16) (hw->phy_spd_default &
6029 } 4386 ~IGP01E1000_GMII_SPD));
6030 if (hw->media_type == e1000_media_type_fiber) { 4387 if (ret_val)
6031 /* always blink LED0 for PCI-E fiber */ 4388 return ret_val;
6032 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 4389 /* Fall Through */
6033 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 4390 default:
6034 } else { 4391 if (hw->media_type == e1000_media_type_fiber) {
6035 /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */ 4392 ledctl = er32(LEDCTL);
6036 ledctl_blink = hw->ledctl_mode2; 4393 /* Save current LEDCTL settings */
6037 for (i=0; i < 4; i++) 4394 hw->ledctl_default = ledctl;
6038 if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) == 4395 /* Turn off LED0 */
6039 E1000_LEDCTL_MODE_LED_ON) 4396 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
6040 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); 4397 E1000_LEDCTL_LED0_BLINK |
6041 } 4398 E1000_LEDCTL_LED0_MODE_MASK);
6042 4399 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
6043 ew32(LEDCTL, ledctl_blink); 4400 E1000_LEDCTL_LED0_MODE_SHIFT);
6044 4401 ew32(LEDCTL, ledctl);
6045 return E1000_SUCCESS; 4402 } else if (hw->media_type == e1000_media_type_copper)
4403 ew32(LEDCTL, hw->ledctl_mode1);
4404 break;
4405 }
4406
4407 return E1000_SUCCESS;
6046} 4408}
6047 4409
6048/****************************************************************************** 4410/**
6049 * Restores the saved state of the SW controlable LED. 4411 * e1000_cleanup_led - Restores the saved state of the SW controlable LED.
6050 * 4412 * @hw: Struct containing variables accessed by shared code
6051 * hw - Struct containing variables accessed by shared code 4413 */
6052 *****************************************************************************/
6053s32 e1000_cleanup_led(struct e1000_hw *hw) 4414s32 e1000_cleanup_led(struct e1000_hw *hw)
6054{ 4415{
6055 s32 ret_val = E1000_SUCCESS; 4416 s32 ret_val = E1000_SUCCESS;
6056 4417
6057 DEBUGFUNC("e1000_cleanup_led"); 4418 DEBUGFUNC("e1000_cleanup_led");
6058 4419
6059 switch (hw->mac_type) { 4420 switch (hw->mac_type) {
6060 case e1000_82542_rev2_0: 4421 case e1000_82542_rev2_0:
6061 case e1000_82542_rev2_1: 4422 case e1000_82542_rev2_1:
6062 case e1000_82543: 4423 case e1000_82543:
6063 case e1000_82544: 4424 case e1000_82544:
6064 /* No cleanup necessary */ 4425 /* No cleanup necessary */
6065 break; 4426 break;
6066 case e1000_82541: 4427 case e1000_82541:
6067 case e1000_82547: 4428 case e1000_82547:
6068 case e1000_82541_rev_2: 4429 case e1000_82541_rev_2:
6069 case e1000_82547_rev_2: 4430 case e1000_82547_rev_2:
6070 /* Turn on PHY Smart Power Down (if previously enabled) */ 4431 /* Turn on PHY Smart Power Down (if previously enabled) */
6071 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, 4432 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6072 hw->phy_spd_default); 4433 hw->phy_spd_default);
6073 if (ret_val) 4434 if (ret_val)
6074 return ret_val; 4435 return ret_val;
6075 /* Fall Through */ 4436 /* Fall Through */
6076 default: 4437 default:
6077 if (hw->phy_type == e1000_phy_ife) { 4438 /* Restore LEDCTL settings */
6078 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 4439 ew32(LEDCTL, hw->ledctl_default);
6079 break; 4440 break;
6080 } 4441 }
6081 /* Restore LEDCTL settings */ 4442
6082 ew32(LEDCTL, hw->ledctl_default); 4443 return E1000_SUCCESS;
6083 break;
6084 }
6085
6086 return E1000_SUCCESS;
6087} 4444}
6088 4445
6089/****************************************************************************** 4446/**
6090 * Turns on the software controllable LED 4447 * e1000_led_on - Turns on the software controllable LED
6091 * 4448 * @hw: Struct containing variables accessed by shared code
6092 * hw - Struct containing variables accessed by shared code 4449 */
6093 *****************************************************************************/
6094s32 e1000_led_on(struct e1000_hw *hw) 4450s32 e1000_led_on(struct e1000_hw *hw)
6095{ 4451{
6096 u32 ctrl = er32(CTRL); 4452 u32 ctrl = er32(CTRL);
6097 4453
6098 DEBUGFUNC("e1000_led_on"); 4454 DEBUGFUNC("e1000_led_on");
6099 4455
6100 switch (hw->mac_type) { 4456 switch (hw->mac_type) {
6101 case e1000_82542_rev2_0: 4457 case e1000_82542_rev2_0:
6102 case e1000_82542_rev2_1: 4458 case e1000_82542_rev2_1:
6103 case e1000_82543: 4459 case e1000_82543:
6104 /* Set SW Defineable Pin 0 to turn on the LED */ 4460 /* Set SW Defineable Pin 0 to turn on the LED */
6105 ctrl |= E1000_CTRL_SWDPIN0; 4461 ctrl |= E1000_CTRL_SWDPIN0;
6106 ctrl |= E1000_CTRL_SWDPIO0; 4462 ctrl |= E1000_CTRL_SWDPIO0;
6107 break; 4463 break;
6108 case e1000_82544: 4464 case e1000_82544:
6109 if (hw->media_type == e1000_media_type_fiber) { 4465 if (hw->media_type == e1000_media_type_fiber) {
6110 /* Set SW Defineable Pin 0 to turn on the LED */ 4466 /* Set SW Defineable Pin 0 to turn on the LED */
6111 ctrl |= E1000_CTRL_SWDPIN0; 4467 ctrl |= E1000_CTRL_SWDPIN0;
6112 ctrl |= E1000_CTRL_SWDPIO0; 4468 ctrl |= E1000_CTRL_SWDPIO0;
6113 } else { 4469 } else {
6114 /* Clear SW Defineable Pin 0 to turn on the LED */ 4470 /* Clear SW Defineable Pin 0 to turn on the LED */
6115 ctrl &= ~E1000_CTRL_SWDPIN0; 4471 ctrl &= ~E1000_CTRL_SWDPIN0;
6116 ctrl |= E1000_CTRL_SWDPIO0; 4472 ctrl |= E1000_CTRL_SWDPIO0;
6117 } 4473 }
6118 break; 4474 break;
6119 default: 4475 default:
6120 if (hw->media_type == e1000_media_type_fiber) { 4476 if (hw->media_type == e1000_media_type_fiber) {
6121 /* Clear SW Defineable Pin 0 to turn on the LED */ 4477 /* Clear SW Defineable Pin 0 to turn on the LED */
6122 ctrl &= ~E1000_CTRL_SWDPIN0; 4478 ctrl &= ~E1000_CTRL_SWDPIN0;
6123 ctrl |= E1000_CTRL_SWDPIO0; 4479 ctrl |= E1000_CTRL_SWDPIO0;
6124 } else if (hw->phy_type == e1000_phy_ife) { 4480 } else if (hw->media_type == e1000_media_type_copper) {
6125 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 4481 ew32(LEDCTL, hw->ledctl_mode2);
6126 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 4482 return E1000_SUCCESS;
6127 } else if (hw->media_type == e1000_media_type_copper) { 4483 }
6128 ew32(LEDCTL, hw->ledctl_mode2); 4484 break;
6129 return E1000_SUCCESS; 4485 }
6130 } 4486
6131 break; 4487 ew32(CTRL, ctrl);
6132 } 4488
6133 4489 return E1000_SUCCESS;
6134 ew32(CTRL, ctrl);
6135
6136 return E1000_SUCCESS;
6137} 4490}
6138 4491
6139/****************************************************************************** 4492/**
6140 * Turns off the software controllable LED 4493 * e1000_led_off - Turns off the software controllable LED
6141 * 4494 * @hw: Struct containing variables accessed by shared code
6142 * hw - Struct containing variables accessed by shared code 4495 */
6143 *****************************************************************************/
6144s32 e1000_led_off(struct e1000_hw *hw) 4496s32 e1000_led_off(struct e1000_hw *hw)
6145{ 4497{
6146 u32 ctrl = er32(CTRL); 4498 u32 ctrl = er32(CTRL);
6147 4499
6148 DEBUGFUNC("e1000_led_off"); 4500 DEBUGFUNC("e1000_led_off");
6149 4501
6150 switch (hw->mac_type) { 4502 switch (hw->mac_type) {
6151 case e1000_82542_rev2_0: 4503 case e1000_82542_rev2_0:
6152 case e1000_82542_rev2_1: 4504 case e1000_82542_rev2_1:
6153 case e1000_82543: 4505 case e1000_82543:
6154 /* Clear SW Defineable Pin 0 to turn off the LED */ 4506 /* Clear SW Defineable Pin 0 to turn off the LED */
6155 ctrl &= ~E1000_CTRL_SWDPIN0; 4507 ctrl &= ~E1000_CTRL_SWDPIN0;
6156 ctrl |= E1000_CTRL_SWDPIO0; 4508 ctrl |= E1000_CTRL_SWDPIO0;
6157 break; 4509 break;
6158 case e1000_82544: 4510 case e1000_82544:
6159 if (hw->media_type == e1000_media_type_fiber) { 4511 if (hw->media_type == e1000_media_type_fiber) {
6160 /* Clear SW Defineable Pin 0 to turn off the LED */ 4512 /* Clear SW Defineable Pin 0 to turn off the LED */
6161 ctrl &= ~E1000_CTRL_SWDPIN0; 4513 ctrl &= ~E1000_CTRL_SWDPIN0;
6162 ctrl |= E1000_CTRL_SWDPIO0; 4514 ctrl |= E1000_CTRL_SWDPIO0;
6163 } else { 4515 } else {
6164 /* Set SW Defineable Pin 0 to turn off the LED */ 4516 /* Set SW Defineable Pin 0 to turn off the LED */
6165 ctrl |= E1000_CTRL_SWDPIN0; 4517 ctrl |= E1000_CTRL_SWDPIN0;
6166 ctrl |= E1000_CTRL_SWDPIO0; 4518 ctrl |= E1000_CTRL_SWDPIO0;
6167 } 4519 }
6168 break; 4520 break;
6169 default: 4521 default:
6170 if (hw->media_type == e1000_media_type_fiber) { 4522 if (hw->media_type == e1000_media_type_fiber) {
6171 /* Set SW Defineable Pin 0 to turn off the LED */ 4523 /* Set SW Defineable Pin 0 to turn off the LED */
6172 ctrl |= E1000_CTRL_SWDPIN0; 4524 ctrl |= E1000_CTRL_SWDPIN0;
6173 ctrl |= E1000_CTRL_SWDPIO0; 4525 ctrl |= E1000_CTRL_SWDPIO0;
6174 } else if (hw->phy_type == e1000_phy_ife) { 4526 } else if (hw->media_type == e1000_media_type_copper) {
6175 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 4527 ew32(LEDCTL, hw->ledctl_mode1);
6176 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 4528 return E1000_SUCCESS;
6177 } else if (hw->media_type == e1000_media_type_copper) { 4529 }
6178 ew32(LEDCTL, hw->ledctl_mode1); 4530 break;
6179 return E1000_SUCCESS; 4531 }
6180 } 4532
6181 break; 4533 ew32(CTRL, ctrl);
6182 } 4534
6183 4535 return E1000_SUCCESS;
6184 ew32(CTRL, ctrl);
6185
6186 return E1000_SUCCESS;
6187} 4536}
6188 4537
6189/****************************************************************************** 4538/**
6190 * Clears all hardware statistics counters. 4539 * e1000_clear_hw_cntrs - Clears all hardware statistics counters.
6191 * 4540 * @hw: Struct containing variables accessed by shared code
6192 * hw - Struct containing variables accessed by shared code 4541 */
6193 *****************************************************************************/
6194static void e1000_clear_hw_cntrs(struct e1000_hw *hw) 4542static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
6195{ 4543{
6196 volatile u32 temp; 4544 volatile u32 temp;
6197 4545
6198 temp = er32(CRCERRS); 4546 temp = er32(CRCERRS);
6199 temp = er32(SYMERRS); 4547 temp = er32(SYMERRS);
6200 temp = er32(MPC); 4548 temp = er32(MPC);
6201 temp = er32(SCC); 4549 temp = er32(SCC);
6202 temp = er32(ECOL); 4550 temp = er32(ECOL);
6203 temp = er32(MCC); 4551 temp = er32(MCC);
6204 temp = er32(LATECOL); 4552 temp = er32(LATECOL);
6205 temp = er32(COLC); 4553 temp = er32(COLC);
6206 temp = er32(DC); 4554 temp = er32(DC);
6207 temp = er32(SEC); 4555 temp = er32(SEC);
6208 temp = er32(RLEC); 4556 temp = er32(RLEC);
6209 temp = er32(XONRXC); 4557 temp = er32(XONRXC);
6210 temp = er32(XONTXC); 4558 temp = er32(XONTXC);
6211 temp = er32(XOFFRXC); 4559 temp = er32(XOFFRXC);
6212 temp = er32(XOFFTXC); 4560 temp = er32(XOFFTXC);
6213 temp = er32(FCRUC); 4561 temp = er32(FCRUC);
6214 4562
6215 if (hw->mac_type != e1000_ich8lan) { 4563 temp = er32(PRC64);
6216 temp = er32(PRC64); 4564 temp = er32(PRC127);
6217 temp = er32(PRC127); 4565 temp = er32(PRC255);
6218 temp = er32(PRC255); 4566 temp = er32(PRC511);
6219 temp = er32(PRC511); 4567 temp = er32(PRC1023);
6220 temp = er32(PRC1023); 4568 temp = er32(PRC1522);
6221 temp = er32(PRC1522); 4569
6222 } 4570 temp = er32(GPRC);
6223 4571 temp = er32(BPRC);
6224 temp = er32(GPRC); 4572 temp = er32(MPRC);
6225 temp = er32(BPRC); 4573 temp = er32(GPTC);
6226 temp = er32(MPRC); 4574 temp = er32(GORCL);
6227 temp = er32(GPTC); 4575 temp = er32(GORCH);
6228 temp = er32(GORCL); 4576 temp = er32(GOTCL);
6229 temp = er32(GORCH); 4577 temp = er32(GOTCH);
6230 temp = er32(GOTCL); 4578 temp = er32(RNBC);
6231 temp = er32(GOTCH); 4579 temp = er32(RUC);
6232 temp = er32(RNBC); 4580 temp = er32(RFC);
6233 temp = er32(RUC); 4581 temp = er32(ROC);
6234 temp = er32(RFC); 4582 temp = er32(RJC);
6235 temp = er32(ROC); 4583 temp = er32(TORL);
6236 temp = er32(RJC); 4584 temp = er32(TORH);
6237 temp = er32(TORL); 4585 temp = er32(TOTL);
6238 temp = er32(TORH); 4586 temp = er32(TOTH);
6239 temp = er32(TOTL); 4587 temp = er32(TPR);
6240 temp = er32(TOTH); 4588 temp = er32(TPT);
6241 temp = er32(TPR); 4589
6242 temp = er32(TPT); 4590 temp = er32(PTC64);
6243 4591 temp = er32(PTC127);
6244 if (hw->mac_type != e1000_ich8lan) { 4592 temp = er32(PTC255);
6245 temp = er32(PTC64); 4593 temp = er32(PTC511);
6246 temp = er32(PTC127); 4594 temp = er32(PTC1023);
6247 temp = er32(PTC255); 4595 temp = er32(PTC1522);
6248 temp = er32(PTC511); 4596
6249 temp = er32(PTC1023); 4597 temp = er32(MPTC);
6250 temp = er32(PTC1522); 4598 temp = er32(BPTC);
6251 } 4599
6252 4600 if (hw->mac_type < e1000_82543)
6253 temp = er32(MPTC); 4601 return;
6254 temp = er32(BPTC); 4602
6255 4603 temp = er32(ALGNERRC);
6256 if (hw->mac_type < e1000_82543) return; 4604 temp = er32(RXERRC);
6257 4605 temp = er32(TNCRS);
6258 temp = er32(ALGNERRC); 4606 temp = er32(CEXTERR);
6259 temp = er32(RXERRC); 4607 temp = er32(TSCTC);
6260 temp = er32(TNCRS); 4608 temp = er32(TSCTFC);
6261 temp = er32(CEXTERR); 4609
6262 temp = er32(TSCTC); 4610 if (hw->mac_type <= e1000_82544)
6263 temp = er32(TSCTFC); 4611 return;
6264 4612
6265 if (hw->mac_type <= e1000_82544) return; 4613 temp = er32(MGTPRC);
6266 4614 temp = er32(MGTPDC);
6267 temp = er32(MGTPRC); 4615 temp = er32(MGTPTC);
6268 temp = er32(MGTPDC); 4616}
6269 temp = er32(MGTPTC); 4617
6270 4618/**
6271 if (hw->mac_type <= e1000_82547_rev_2) return; 4619 * e1000_reset_adaptive - Resets Adaptive IFS to its default state.
6272 4620 * @hw: Struct containing variables accessed by shared code
6273 temp = er32(IAC);
6274 temp = er32(ICRXOC);
6275
6276 if (hw->mac_type == e1000_ich8lan) return;
6277
6278 temp = er32(ICRXPTC);
6279 temp = er32(ICRXATC);
6280 temp = er32(ICTXPTC);
6281 temp = er32(ICTXATC);
6282 temp = er32(ICTXQEC);
6283 temp = er32(ICTXQMTC);
6284 temp = er32(ICRXDMTC);
6285}
6286
6287/******************************************************************************
6288 * Resets Adaptive IFS to its default state.
6289 *
6290 * hw - Struct containing variables accessed by shared code
6291 * 4621 *
6292 * Call this after e1000_init_hw. You may override the IFS defaults by setting 4622 * Call this after e1000_init_hw. You may override the IFS defaults by setting
6293 * hw->ifs_params_forced to true. However, you must initialize hw-> 4623 * hw->ifs_params_forced to true. However, you must initialize hw->
6294 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio 4624 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
6295 * before calling this function. 4625 * before calling this function.
6296 *****************************************************************************/ 4626 */
6297void e1000_reset_adaptive(struct e1000_hw *hw) 4627void e1000_reset_adaptive(struct e1000_hw *hw)
6298{ 4628{
6299 DEBUGFUNC("e1000_reset_adaptive"); 4629 DEBUGFUNC("e1000_reset_adaptive");
6300 4630
6301 if (hw->adaptive_ifs) { 4631 if (hw->adaptive_ifs) {
6302 if (!hw->ifs_params_forced) { 4632 if (!hw->ifs_params_forced) {
6303 hw->current_ifs_val = 0; 4633 hw->current_ifs_val = 0;
6304 hw->ifs_min_val = IFS_MIN; 4634 hw->ifs_min_val = IFS_MIN;
6305 hw->ifs_max_val = IFS_MAX; 4635 hw->ifs_max_val = IFS_MAX;
6306 hw->ifs_step_size = IFS_STEP; 4636 hw->ifs_step_size = IFS_STEP;
6307 hw->ifs_ratio = IFS_RATIO; 4637 hw->ifs_ratio = IFS_RATIO;
6308 } 4638 }
6309 hw->in_ifs_mode = false; 4639 hw->in_ifs_mode = false;
6310 ew32(AIT, 0); 4640 ew32(AIT, 0);
6311 } else { 4641 } else {
6312 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4642 DEBUGOUT("Not in Adaptive IFS mode!\n");
6313 } 4643 }
6314} 4644}
6315 4645
6316/****************************************************************************** 4646/**
4647 * e1000_update_adaptive - update adaptive IFS
4648 * @hw: Struct containing variables accessed by shared code
4649 * @tx_packets: Number of transmits since last callback
4650 * @total_collisions: Number of collisions since last callback
4651 *
6317 * Called during the callback/watchdog routine to update IFS value based on 4652 * Called during the callback/watchdog routine to update IFS value based on
6318 * the ratio of transmits to collisions. 4653 * the ratio of transmits to collisions.
6319 * 4654 */
6320 * hw - Struct containing variables accessed by shared code
6321 * tx_packets - Number of transmits since last callback
6322 * total_collisions - Number of collisions since last callback
6323 *****************************************************************************/
6324void e1000_update_adaptive(struct e1000_hw *hw) 4655void e1000_update_adaptive(struct e1000_hw *hw)
6325{ 4656{
6326 DEBUGFUNC("e1000_update_adaptive"); 4657 DEBUGFUNC("e1000_update_adaptive");
6327 4658
6328 if (hw->adaptive_ifs) { 4659 if (hw->adaptive_ifs) {
6329 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { 4660 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
6330 if (hw->tx_packet_delta > MIN_NUM_XMITS) { 4661 if (hw->tx_packet_delta > MIN_NUM_XMITS) {
6331 hw->in_ifs_mode = true; 4662 hw->in_ifs_mode = true;
6332 if (hw->current_ifs_val < hw->ifs_max_val) { 4663 if (hw->current_ifs_val < hw->ifs_max_val) {
6333 if (hw->current_ifs_val == 0) 4664 if (hw->current_ifs_val == 0)
6334 hw->current_ifs_val = hw->ifs_min_val; 4665 hw->current_ifs_val =
6335 else 4666 hw->ifs_min_val;
6336 hw->current_ifs_val += hw->ifs_step_size; 4667 else
6337 ew32(AIT, hw->current_ifs_val); 4668 hw->current_ifs_val +=
6338 } 4669 hw->ifs_step_size;
6339 } 4670 ew32(AIT, hw->current_ifs_val);
6340 } else { 4671 }
6341 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { 4672 }
6342 hw->current_ifs_val = 0; 4673 } else {
6343 hw->in_ifs_mode = false; 4674 if (hw->in_ifs_mode
6344 ew32(AIT, 0); 4675 && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
6345 } 4676 hw->current_ifs_val = 0;
6346 } 4677 hw->in_ifs_mode = false;
6347 } else { 4678 ew32(AIT, 0);
6348 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4679 }
6349 } 4680 }
4681 } else {
4682 DEBUGOUT("Not in Adaptive IFS mode!\n");
4683 }
6350} 4684}
6351 4685
6352/****************************************************************************** 4686/**
6353 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4687 * e1000_tbi_adjust_stats
4688 * @hw: Struct containing variables accessed by shared code
4689 * @frame_len: The length of the frame in question
4690 * @mac_addr: The Ethernet destination address of the frame in question
6354 * 4691 *
6355 * hw - Struct containing variables accessed by shared code 4692 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
6356 * frame_len - The length of the frame in question 4693 */
6357 * mac_addr - The Ethernet destination address of the frame in question
6358 *****************************************************************************/
6359void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, 4694void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
6360 u32 frame_len, u8 *mac_addr) 4695 u32 frame_len, u8 *mac_addr)
6361{ 4696{
6362 u64 carry_bit; 4697 u64 carry_bit;
6363 4698
6364 /* First adjust the frame length. */ 4699 /* First adjust the frame length. */
6365 frame_len--; 4700 frame_len--;
6366 /* We need to adjust the statistics counters, since the hardware 4701 /* We need to adjust the statistics counters, since the hardware
6367 * counters overcount this packet as a CRC error and undercount 4702 * counters overcount this packet as a CRC error and undercount
6368 * the packet as a good packet 4703 * the packet as a good packet
6369 */ 4704 */
6370 /* This packet should not be counted as a CRC error. */ 4705 /* This packet should not be counted as a CRC error. */
6371 stats->crcerrs--; 4706 stats->crcerrs--;
6372 /* This packet does count as a Good Packet Received. */ 4707 /* This packet does count as a Good Packet Received. */
6373 stats->gprc++; 4708 stats->gprc++;
6374 4709
6375 /* Adjust the Good Octets received counters */ 4710 /* Adjust the Good Octets received counters */
6376 carry_bit = 0x80000000 & stats->gorcl; 4711 carry_bit = 0x80000000 & stats->gorcl;
6377 stats->gorcl += frame_len; 4712 stats->gorcl += frame_len;
6378 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4713 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
6379 * Received Count) was one before the addition, 4714 * Received Count) was one before the addition,
6380 * AND it is zero after, then we lost the carry out, 4715 * AND it is zero after, then we lost the carry out,
6381 * need to add one to Gorch (Good Octets Received Count High). 4716 * need to add one to Gorch (Good Octets Received Count High).
6382 * This could be simplified if all environments supported 4717 * This could be simplified if all environments supported
6383 * 64-bit integers. 4718 * 64-bit integers.
6384 */ 4719 */
6385 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4720 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
6386 stats->gorch++; 4721 stats->gorch++;
6387 /* Is this a broadcast or multicast? Check broadcast first, 4722 /* Is this a broadcast or multicast? Check broadcast first,
6388 * since the test for a multicast frame will test positive on 4723 * since the test for a multicast frame will test positive on
6389 * a broadcast frame. 4724 * a broadcast frame.
6390 */ 4725 */
6391 if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff)) 4726 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
6392 /* Broadcast packet */ 4727 /* Broadcast packet */
6393 stats->bprc++; 4728 stats->bprc++;
6394 else if (*mac_addr & 0x01) 4729 else if (*mac_addr & 0x01)
6395 /* Multicast packet */ 4730 /* Multicast packet */
6396 stats->mprc++; 4731 stats->mprc++;
6397 4732
6398 if (frame_len == hw->max_frame_size) { 4733 if (frame_len == hw->max_frame_size) {
6399 /* In this case, the hardware has overcounted the number of 4734 /* In this case, the hardware has overcounted the number of
6400 * oversize frames. 4735 * oversize frames.
6401 */ 4736 */
6402 if (stats->roc > 0) 4737 if (stats->roc > 0)
6403 stats->roc--; 4738 stats->roc--;
6404 } 4739 }
6405 4740
6406 /* Adjust the bin counters when the extra byte put the frame in the 4741 /* Adjust the bin counters when the extra byte put the frame in the
6407 * wrong bin. Remember that the frame_len was adjusted above. 4742 * wrong bin. Remember that the frame_len was adjusted above.
6408 */ 4743 */
6409 if (frame_len == 64) { 4744 if (frame_len == 64) {
6410 stats->prc64++; 4745 stats->prc64++;
6411 stats->prc127--; 4746 stats->prc127--;
6412 } else if (frame_len == 127) { 4747 } else if (frame_len == 127) {
6413 stats->prc127++; 4748 stats->prc127++;
6414 stats->prc255--; 4749 stats->prc255--;
6415 } else if (frame_len == 255) { 4750 } else if (frame_len == 255) {
6416 stats->prc255++; 4751 stats->prc255++;
6417 stats->prc511--; 4752 stats->prc511--;
6418 } else if (frame_len == 511) { 4753 } else if (frame_len == 511) {
6419 stats->prc511++; 4754 stats->prc511++;
6420 stats->prc1023--; 4755 stats->prc1023--;
6421 } else if (frame_len == 1023) { 4756 } else if (frame_len == 1023) {
6422 stats->prc1023++; 4757 stats->prc1023++;
6423 stats->prc1522--; 4758 stats->prc1522--;
6424 } else if (frame_len == 1522) { 4759 } else if (frame_len == 1522) {
6425 stats->prc1522++; 4760 stats->prc1522++;
6426 } 4761 }
6427} 4762}
6428 4763
6429/****************************************************************************** 4764/**
6430 * Gets the current PCI bus type, speed, and width of the hardware 4765 * e1000_get_bus_info
4766 * @hw: Struct containing variables accessed by shared code
6431 * 4767 *
6432 * hw - Struct containing variables accessed by shared code 4768 * Gets the current PCI bus type, speed, and width of the hardware
6433 *****************************************************************************/ 4769 */
6434void e1000_get_bus_info(struct e1000_hw *hw) 4770void e1000_get_bus_info(struct e1000_hw *hw)
6435{ 4771{
6436 s32 ret_val; 4772 u32 status;
6437 u16 pci_ex_link_status; 4773
6438 u32 status; 4774 switch (hw->mac_type) {
6439 4775 case e1000_82542_rev2_0:
6440 switch (hw->mac_type) { 4776 case e1000_82542_rev2_1:
6441 case e1000_82542_rev2_0: 4777 hw->bus_type = e1000_bus_type_pci;
6442 case e1000_82542_rev2_1: 4778 hw->bus_speed = e1000_bus_speed_unknown;
6443 hw->bus_type = e1000_bus_type_pci; 4779 hw->bus_width = e1000_bus_width_unknown;
6444 hw->bus_speed = e1000_bus_speed_unknown; 4780 break;
6445 hw->bus_width = e1000_bus_width_unknown; 4781 default:
6446 break; 4782 status = er32(STATUS);
6447 case e1000_82571: 4783 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
6448 case e1000_82572: 4784 e1000_bus_type_pcix : e1000_bus_type_pci;
6449 case e1000_82573: 4785
6450 case e1000_80003es2lan: 4786 if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
6451 hw->bus_type = e1000_bus_type_pci_express; 4787 hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
6452 hw->bus_speed = e1000_bus_speed_2500; 4788 e1000_bus_speed_66 : e1000_bus_speed_120;
6453 ret_val = e1000_read_pcie_cap_reg(hw, 4789 } else if (hw->bus_type == e1000_bus_type_pci) {
6454 PCI_EX_LINK_STATUS, 4790 hw->bus_speed = (status & E1000_STATUS_PCI66) ?
6455 &pci_ex_link_status); 4791 e1000_bus_speed_66 : e1000_bus_speed_33;
6456 if (ret_val) 4792 } else {
6457 hw->bus_width = e1000_bus_width_unknown; 4793 switch (status & E1000_STATUS_PCIX_SPEED) {
6458 else 4794 case E1000_STATUS_PCIX_SPEED_66:
6459 hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >> 4795 hw->bus_speed = e1000_bus_speed_66;
6460 PCI_EX_LINK_WIDTH_SHIFT; 4796 break;
6461 break; 4797 case E1000_STATUS_PCIX_SPEED_100:
6462 case e1000_ich8lan: 4798 hw->bus_speed = e1000_bus_speed_100;
6463 hw->bus_type = e1000_bus_type_pci_express; 4799 break;
6464 hw->bus_speed = e1000_bus_speed_2500; 4800 case E1000_STATUS_PCIX_SPEED_133:
6465 hw->bus_width = e1000_bus_width_pciex_1; 4801 hw->bus_speed = e1000_bus_speed_133;
6466 break; 4802 break;
6467 default: 4803 default:
6468 status = er32(STATUS); 4804 hw->bus_speed = e1000_bus_speed_reserved;
6469 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? 4805 break;
6470 e1000_bus_type_pcix : e1000_bus_type_pci; 4806 }
6471 4807 }
6472 if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { 4808 hw->bus_width = (status & E1000_STATUS_BUS64) ?
6473 hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? 4809 e1000_bus_width_64 : e1000_bus_width_32;
6474 e1000_bus_speed_66 : e1000_bus_speed_120; 4810 break;
6475 } else if (hw->bus_type == e1000_bus_type_pci) { 4811 }
6476 hw->bus_speed = (status & E1000_STATUS_PCI66) ?
6477 e1000_bus_speed_66 : e1000_bus_speed_33;
6478 } else {
6479 switch (status & E1000_STATUS_PCIX_SPEED) {
6480 case E1000_STATUS_PCIX_SPEED_66:
6481 hw->bus_speed = e1000_bus_speed_66;
6482 break;
6483 case E1000_STATUS_PCIX_SPEED_100:
6484 hw->bus_speed = e1000_bus_speed_100;
6485 break;
6486 case E1000_STATUS_PCIX_SPEED_133:
6487 hw->bus_speed = e1000_bus_speed_133;
6488 break;
6489 default:
6490 hw->bus_speed = e1000_bus_speed_reserved;
6491 break;
6492 }
6493 }
6494 hw->bus_width = (status & E1000_STATUS_BUS64) ?
6495 e1000_bus_width_64 : e1000_bus_width_32;
6496 break;
6497 }
6498} 4812}
6499 4813
6500/****************************************************************************** 4814/**
4815 * e1000_write_reg_io
4816 * @hw: Struct containing variables accessed by shared code
4817 * @offset: offset to write to
4818 * @value: value to write
4819 *
6501 * Writes a value to one of the devices registers using port I/O (as opposed to 4820 * Writes a value to one of the devices registers using port I/O (as opposed to
6502 * memory mapped I/O). Only 82544 and newer devices support port I/O. 4821 * memory mapped I/O). Only 82544 and newer devices support port I/O.
6503 * 4822 */
6504 * hw - Struct containing variables accessed by shared code
6505 * offset - offset to write to
6506 * value - value to write
6507 *****************************************************************************/
6508static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) 4823static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
6509{ 4824{
6510 unsigned long io_addr = hw->io_base; 4825 unsigned long io_addr = hw->io_base;
6511 unsigned long io_data = hw->io_base + 4; 4826 unsigned long io_data = hw->io_base + 4;
6512 4827
6513 e1000_io_write(hw, io_addr, offset); 4828 e1000_io_write(hw, io_addr, offset);
6514 e1000_io_write(hw, io_data, value); 4829 e1000_io_write(hw, io_data, value);
6515} 4830}
6516 4831
6517/****************************************************************************** 4832/**
6518 * Estimates the cable length. 4833 * e1000_get_cable_length - Estimates the cable length.
6519 * 4834 * @hw: Struct containing variables accessed by shared code
6520 * hw - Struct containing variables accessed by shared code 4835 * @min_length: The estimated minimum length
6521 * min_length - The estimated minimum length 4836 * @max_length: The estimated maximum length
6522 * max_length - The estimated maximum length
6523 * 4837 *
6524 * returns: - E1000_ERR_XXX 4838 * returns: - E1000_ERR_XXX
6525 * E1000_SUCCESS 4839 * E1000_SUCCESS
@@ -6528,185 +4842,115 @@ static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
6528 * So for M88 phy's, this function interprets the one value returned from the 4842 * So for M88 phy's, this function interprets the one value returned from the
6529 * register to the minimum and maximum range. 4843 * register to the minimum and maximum range.
6530 * For IGP phy's, the function calculates the range by the AGC registers. 4844 * For IGP phy's, the function calculates the range by the AGC registers.
6531 *****************************************************************************/ 4845 */
6532static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, 4846static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
6533 u16 *max_length) 4847 u16 *max_length)
6534{ 4848{
6535 s32 ret_val; 4849 s32 ret_val;
6536 u16 agc_value = 0; 4850 u16 agc_value = 0;
6537 u16 i, phy_data; 4851 u16 i, phy_data;
6538 u16 cable_length; 4852 u16 cable_length;
6539 4853
6540 DEBUGFUNC("e1000_get_cable_length"); 4854 DEBUGFUNC("e1000_get_cable_length");
6541 4855
6542 *min_length = *max_length = 0; 4856 *min_length = *max_length = 0;
6543 4857
6544 /* Use old method for Phy older than IGP */ 4858 /* Use old method for Phy older than IGP */
6545 if (hw->phy_type == e1000_phy_m88) { 4859 if (hw->phy_type == e1000_phy_m88) {
6546 4860
6547 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 4861 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6548 &phy_data); 4862 &phy_data);
6549 if (ret_val) 4863 if (ret_val)
6550 return ret_val; 4864 return ret_val;
6551 cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 4865 cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
6552 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 4866 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
6553 4867
6554 /* Convert the enum value to ranged values */ 4868 /* Convert the enum value to ranged values */
6555 switch (cable_length) { 4869 switch (cable_length) {
6556 case e1000_cable_length_50: 4870 case e1000_cable_length_50:
6557 *min_length = 0; 4871 *min_length = 0;
6558 *max_length = e1000_igp_cable_length_50; 4872 *max_length = e1000_igp_cable_length_50;
6559 break; 4873 break;
6560 case e1000_cable_length_50_80: 4874 case e1000_cable_length_50_80:
6561 *min_length = e1000_igp_cable_length_50; 4875 *min_length = e1000_igp_cable_length_50;
6562 *max_length = e1000_igp_cable_length_80; 4876 *max_length = e1000_igp_cable_length_80;
6563 break; 4877 break;
6564 case e1000_cable_length_80_110: 4878 case e1000_cable_length_80_110:
6565 *min_length = e1000_igp_cable_length_80; 4879 *min_length = e1000_igp_cable_length_80;
6566 *max_length = e1000_igp_cable_length_110; 4880 *max_length = e1000_igp_cable_length_110;
6567 break; 4881 break;
6568 case e1000_cable_length_110_140: 4882 case e1000_cable_length_110_140:
6569 *min_length = e1000_igp_cable_length_110; 4883 *min_length = e1000_igp_cable_length_110;
6570 *max_length = e1000_igp_cable_length_140; 4884 *max_length = e1000_igp_cable_length_140;
6571 break; 4885 break;
6572 case e1000_cable_length_140: 4886 case e1000_cable_length_140:
6573 *min_length = e1000_igp_cable_length_140; 4887 *min_length = e1000_igp_cable_length_140;
6574 *max_length = e1000_igp_cable_length_170; 4888 *max_length = e1000_igp_cable_length_170;
6575 break; 4889 break;
6576 default: 4890 default:
6577 return -E1000_ERR_PHY; 4891 return -E1000_ERR_PHY;
6578 break; 4892 break;
6579 } 4893 }
6580 } else if (hw->phy_type == e1000_phy_gg82563) { 4894 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
6581 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, 4895 u16 cur_agc_value;
6582 &phy_data); 4896 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6583 if (ret_val) 4897 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6584 return ret_val; 4898 { IGP01E1000_PHY_AGC_A,
6585 cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; 4899 IGP01E1000_PHY_AGC_B,
6586 4900 IGP01E1000_PHY_AGC_C,
6587 switch (cable_length) { 4901 IGP01E1000_PHY_AGC_D
6588 case e1000_gg_cable_length_60: 4902 };
6589 *min_length = 0; 4903 /* Read the AGC registers for all channels */
6590 *max_length = e1000_igp_cable_length_60; 4904 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6591 break; 4905
6592 case e1000_gg_cable_length_60_115: 4906 ret_val =
6593 *min_length = e1000_igp_cable_length_60; 4907 e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
6594 *max_length = e1000_igp_cable_length_115; 4908 if (ret_val)
6595 break; 4909 return ret_val;
6596 case e1000_gg_cable_length_115_150: 4910
6597 *min_length = e1000_igp_cable_length_115; 4911 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
6598 *max_length = e1000_igp_cable_length_150; 4912
6599 break; 4913 /* Value bound check. */
6600 case e1000_gg_cable_length_150: 4914 if ((cur_agc_value >=
6601 *min_length = e1000_igp_cable_length_150; 4915 IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
6602 *max_length = e1000_igp_cable_length_180; 4916 || (cur_agc_value == 0))
6603 break; 4917 return -E1000_ERR_PHY;
6604 default: 4918
6605 return -E1000_ERR_PHY; 4919 agc_value += cur_agc_value;
6606 break; 4920
6607 } 4921 /* Update minimal AGC value. */
6608 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 4922 if (min_agc_value > cur_agc_value)
6609 u16 cur_agc_value; 4923 min_agc_value = cur_agc_value;
6610 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 4924 }
6611 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 4925
6612 {IGP01E1000_PHY_AGC_A, 4926 /* Remove the minimal AGC result for length < 50m */
6613 IGP01E1000_PHY_AGC_B, 4927 if (agc_value <
6614 IGP01E1000_PHY_AGC_C, 4928 IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
6615 IGP01E1000_PHY_AGC_D}; 4929 agc_value -= min_agc_value;
6616 /* Read the AGC registers for all channels */ 4930
6617 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 4931 /* Get the average length of the remaining 3 channels */
6618 4932 agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
6619 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); 4933 } else {
6620 if (ret_val) 4934 /* Get the average length of all the 4 channels. */
6621 return ret_val; 4935 agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
6622 4936 }
6623 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; 4937
6624 4938 /* Set the range of the calculated length. */
6625 /* Value bound check. */ 4939 *min_length = ((e1000_igp_cable_length_table[agc_value] -
6626 if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || 4940 IGP01E1000_AGC_RANGE) > 0) ?
6627 (cur_agc_value == 0)) 4941 (e1000_igp_cable_length_table[agc_value] -
6628 return -E1000_ERR_PHY; 4942 IGP01E1000_AGC_RANGE) : 0;
6629 4943 *max_length = e1000_igp_cable_length_table[agc_value] +
6630 agc_value += cur_agc_value; 4944 IGP01E1000_AGC_RANGE;
6631 4945 }
6632 /* Update minimal AGC value. */ 4946
6633 if (min_agc_value > cur_agc_value) 4947 return E1000_SUCCESS;
6634 min_agc_value = cur_agc_value;
6635 }
6636
6637 /* Remove the minimal AGC result for length < 50m */
6638 if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
6639 agc_value -= min_agc_value;
6640
6641 /* Get the average length of the remaining 3 channels */
6642 agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
6643 } else {
6644 /* Get the average length of all the 4 channels. */
6645 agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
6646 }
6647
6648 /* Set the range of the calculated length. */
6649 *min_length = ((e1000_igp_cable_length_table[agc_value] -
6650 IGP01E1000_AGC_RANGE) > 0) ?
6651 (e1000_igp_cable_length_table[agc_value] -
6652 IGP01E1000_AGC_RANGE) : 0;
6653 *max_length = e1000_igp_cable_length_table[agc_value] +
6654 IGP01E1000_AGC_RANGE;
6655 } else if (hw->phy_type == e1000_phy_igp_2 ||
6656 hw->phy_type == e1000_phy_igp_3) {
6657 u16 cur_agc_index, max_agc_index = 0;
6658 u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
6659 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
6660 {IGP02E1000_PHY_AGC_A,
6661 IGP02E1000_PHY_AGC_B,
6662 IGP02E1000_PHY_AGC_C,
6663 IGP02E1000_PHY_AGC_D};
6664 /* Read the AGC registers for all channels */
6665 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
6666 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
6667 if (ret_val)
6668 return ret_val;
6669
6670 /* Getting bits 15:9, which represent the combination of course and
6671 * fine gain values. The result is a number that can be put into
6672 * the lookup table to obtain the approximate cable length. */
6673 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
6674 IGP02E1000_AGC_LENGTH_MASK;
6675
6676 /* Array index bound check. */
6677 if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
6678 (cur_agc_index == 0))
6679 return -E1000_ERR_PHY;
6680
6681 /* Remove min & max AGC values from calculation. */
6682 if (e1000_igp_2_cable_length_table[min_agc_index] >
6683 e1000_igp_2_cable_length_table[cur_agc_index])
6684 min_agc_index = cur_agc_index;
6685 if (e1000_igp_2_cable_length_table[max_agc_index] <
6686 e1000_igp_2_cable_length_table[cur_agc_index])
6687 max_agc_index = cur_agc_index;
6688
6689 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
6690 }
6691
6692 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
6693 e1000_igp_2_cable_length_table[max_agc_index]);
6694 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
6695
6696 /* Calculate cable length with the error range of +/- 10 meters. */
6697 *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
6698 (agc_value - IGP02E1000_AGC_RANGE) : 0;
6699 *max_length = agc_value + IGP02E1000_AGC_RANGE;
6700 }
6701
6702 return E1000_SUCCESS;
6703} 4948}
6704 4949
6705/****************************************************************************** 4950/**
6706 * Check the cable polarity 4951 * e1000_check_polarity - Check the cable polarity
6707 * 4952 * @hw: Struct containing variables accessed by shared code
6708 * hw - Struct containing variables accessed by shared code 4953 * @polarity: output parameter : 0 - Polarity is not reversed
6709 * polarity - output parameter : 0 - Polarity is not reversed
6710 * 1 - Polarity is reversed. 4954 * 1 - Polarity is reversed.
6711 * 4955 *
6712 * returns: - E1000_ERR_XXX 4956 * returns: - E1000_ERR_XXX
@@ -6717,73 +4961,65 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
6717 * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will 4961 * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
6718 * return 0. If the link speed is 1000 Mbps the polarity status is in the 4962 * return 0. If the link speed is 1000 Mbps the polarity status is in the
6719 * IGP01E1000_PHY_PCS_INIT_REG. 4963 * IGP01E1000_PHY_PCS_INIT_REG.
6720 *****************************************************************************/ 4964 */
6721static s32 e1000_check_polarity(struct e1000_hw *hw, 4965static s32 e1000_check_polarity(struct e1000_hw *hw,
6722 e1000_rev_polarity *polarity) 4966 e1000_rev_polarity *polarity)
6723{ 4967{
6724 s32 ret_val; 4968 s32 ret_val;
6725 u16 phy_data; 4969 u16 phy_data;
6726 4970
6727 DEBUGFUNC("e1000_check_polarity"); 4971 DEBUGFUNC("e1000_check_polarity");
6728 4972
6729 if ((hw->phy_type == e1000_phy_m88) || 4973 if (hw->phy_type == e1000_phy_m88) {
6730 (hw->phy_type == e1000_phy_gg82563)) { 4974 /* return the Polarity bit in the Status register. */
6731 /* return the Polarity bit in the Status register. */ 4975 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6732 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 4976 &phy_data);
6733 &phy_data); 4977 if (ret_val)
6734 if (ret_val) 4978 return ret_val;
6735 return ret_val; 4979 *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
6736 *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> 4980 M88E1000_PSSR_REV_POLARITY_SHIFT) ?
6737 M88E1000_PSSR_REV_POLARITY_SHIFT) ? 4981 e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
6738 e1000_rev_polarity_reversed : e1000_rev_polarity_normal; 4982
6739 4983 } else if (hw->phy_type == e1000_phy_igp) {
6740 } else if (hw->phy_type == e1000_phy_igp || 4984 /* Read the Status register to check the speed */
6741 hw->phy_type == e1000_phy_igp_3 || 4985 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
6742 hw->phy_type == e1000_phy_igp_2) { 4986 &phy_data);
6743 /* Read the Status register to check the speed */ 4987 if (ret_val)
6744 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 4988 return ret_val;
6745 &phy_data); 4989
6746 if (ret_val) 4990 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
6747 return ret_val; 4991 * find the polarity status */
6748 4992 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
6749 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to 4993 IGP01E1000_PSSR_SPEED_1000MBPS) {
6750 * find the polarity status */ 4994
6751 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 4995 /* Read the GIG initialization PCS register (0x00B4) */
6752 IGP01E1000_PSSR_SPEED_1000MBPS) { 4996 ret_val =
6753 4997 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
6754 /* Read the GIG initialization PCS register (0x00B4) */ 4998 &phy_data);
6755 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, 4999 if (ret_val)
6756 &phy_data); 5000 return ret_val;
6757 if (ret_val) 5001
6758 return ret_val; 5002 /* Check the polarity bits */
6759 5003 *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
6760 /* Check the polarity bits */ 5004 e1000_rev_polarity_reversed :
6761 *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? 5005 e1000_rev_polarity_normal;
6762 e1000_rev_polarity_reversed : e1000_rev_polarity_normal; 5006 } else {
6763 } else { 5007 /* For 10 Mbps, read the polarity bit in the status register. (for
6764 /* For 10 Mbps, read the polarity bit in the status register. (for 5008 * 100 Mbps this bit is always 0) */
6765 * 100 Mbps this bit is always 0) */ 5009 *polarity =
6766 *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? 5010 (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
6767 e1000_rev_polarity_reversed : e1000_rev_polarity_normal; 5011 e1000_rev_polarity_reversed :
6768 } 5012 e1000_rev_polarity_normal;
6769 } else if (hw->phy_type == e1000_phy_ife) { 5013 }
6770 ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL, 5014 }
6771 &phy_data); 5015 return E1000_SUCCESS;
6772 if (ret_val)
6773 return ret_val;
6774 *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
6775 IFE_PESC_POLARITY_REVERSED_SHIFT) ?
6776 e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
6777 }
6778 return E1000_SUCCESS;
6779} 5016}
6780 5017
6781/****************************************************************************** 5018/**
6782 * Check if Downshift occured 5019 * e1000_check_downshift - Check if Downshift occurred
6783 * 5020 * @hw: Struct containing variables accessed by shared code
6784 * hw - Struct containing variables accessed by shared code 5021 * @downshift: output parameter : 0 - No Downshift occurred.
6785 * downshift - output parameter : 0 - No Downshift ocured. 5022 * 1 - Downshift occurred.
6786 * 1 - Downshift ocured.
6787 * 5023 *
6788 * returns: - E1000_ERR_XXX 5024 * returns: - E1000_ERR_XXX
6789 * E1000_SUCCESS 5025 * E1000_SUCCESS
@@ -6792,2041 +5028,607 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
6792 * Specific Status register. For IGP phy's, it reads the Downgrade bit in the 5028 * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
6793 * Link Health register. In IGP this bit is latched high, so the driver must 5029 * Link Health register. In IGP this bit is latched high, so the driver must
6794 * read it immediately after link is established. 5030 * read it immediately after link is established.
6795 *****************************************************************************/ 5031 */
6796static s32 e1000_check_downshift(struct e1000_hw *hw) 5032static s32 e1000_check_downshift(struct e1000_hw *hw)
6797{ 5033{
6798 s32 ret_val; 5034 s32 ret_val;
6799 u16 phy_data; 5035 u16 phy_data;
6800
6801 DEBUGFUNC("e1000_check_downshift");
6802
6803 if (hw->phy_type == e1000_phy_igp ||
6804 hw->phy_type == e1000_phy_igp_3 ||
6805 hw->phy_type == e1000_phy_igp_2) {
6806 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
6807 &phy_data);
6808 if (ret_val)
6809 return ret_val;
6810
6811 hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
6812 } else if ((hw->phy_type == e1000_phy_m88) ||
6813 (hw->phy_type == e1000_phy_gg82563)) {
6814 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6815 &phy_data);
6816 if (ret_val)
6817 return ret_val;
6818
6819 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
6820 M88E1000_PSSR_DOWNSHIFT_SHIFT;
6821 } else if (hw->phy_type == e1000_phy_ife) {
6822 /* e1000_phy_ife supports 10/100 speed only */
6823 hw->speed_downgraded = false;
6824 }
6825
6826 return E1000_SUCCESS;
6827}
6828 5036
6829/***************************************************************************** 5037 DEBUGFUNC("e1000_check_downshift");
6830 *
6831 * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
6832 * gigabit link is achieved to improve link quality.
6833 *
6834 * hw: Struct containing variables accessed by shared code
6835 *
6836 * returns: - E1000_ERR_PHY if fail to read/write the PHY
6837 * E1000_SUCCESS at any other case.
6838 *
6839 ****************************************************************************/
6840 5038
6841static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) 5039 if (hw->phy_type == e1000_phy_igp) {
6842{ 5040 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
6843 s32 ret_val; 5041 &phy_data);
6844 u16 phy_data, phy_saved_data, speed, duplex, i; 5042 if (ret_val)
6845 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 5043 return ret_val;
6846 {IGP01E1000_PHY_AGC_PARAM_A,
6847 IGP01E1000_PHY_AGC_PARAM_B,
6848 IGP01E1000_PHY_AGC_PARAM_C,
6849 IGP01E1000_PHY_AGC_PARAM_D};
6850 u16 min_length, max_length;
6851
6852 DEBUGFUNC("e1000_config_dsp_after_link_change");
6853
6854 if (hw->phy_type != e1000_phy_igp)
6855 return E1000_SUCCESS;
6856
6857 if (link_up) {
6858 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
6859 if (ret_val) {
6860 DEBUGOUT("Error getting link speed and duplex\n");
6861 return ret_val;
6862 }
6863
6864 if (speed == SPEED_1000) {
6865
6866 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
6867 if (ret_val)
6868 return ret_val;
6869
6870 if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
6871 min_length >= e1000_igp_cable_length_50) {
6872
6873 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6874 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
6875 &phy_data);
6876 if (ret_val)
6877 return ret_val;
6878
6879 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
6880
6881 ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
6882 phy_data);
6883 if (ret_val)
6884 return ret_val;
6885 }
6886 hw->dsp_config_state = e1000_dsp_config_activated;
6887 }
6888
6889 if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
6890 (min_length < e1000_igp_cable_length_50)) {
6891
6892 u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
6893 u32 idle_errs = 0;
6894
6895 /* clear previous idle error counts */
6896 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6897 &phy_data);
6898 if (ret_val)
6899 return ret_val;
6900
6901 for (i = 0; i < ffe_idle_err_timeout; i++) {
6902 udelay(1000);
6903 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6904 &phy_data);
6905 if (ret_val)
6906 return ret_val;
6907
6908 idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
6909 if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
6910 hw->ffe_config_state = e1000_ffe_config_active;
6911
6912 ret_val = e1000_write_phy_reg(hw,
6913 IGP01E1000_PHY_DSP_FFE,
6914 IGP01E1000_PHY_DSP_FFE_CM_CP);
6915 if (ret_val)
6916 return ret_val;
6917 break;
6918 }
6919
6920 if (idle_errs)
6921 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
6922 }
6923 }
6924 }
6925 } else {
6926 if (hw->dsp_config_state == e1000_dsp_config_activated) {
6927 /* Save off the current value of register 0x2F5B to be restored at
6928 * the end of the routines. */
6929 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
6930
6931 if (ret_val)
6932 return ret_val;
6933
6934 /* Disable the PHY transmitter */
6935 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
6936
6937 if (ret_val)
6938 return ret_val;
6939
6940 mdelay(20);
6941
6942 ret_val = e1000_write_phy_reg(hw, 0x0000,
6943 IGP01E1000_IEEE_FORCE_GIGA);
6944 if (ret_val)
6945 return ret_val;
6946 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6947 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
6948 if (ret_val)
6949 return ret_val;
6950
6951 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
6952 phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
6953
6954 ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
6955 if (ret_val)
6956 return ret_val;
6957 }
6958
6959 ret_val = e1000_write_phy_reg(hw, 0x0000,
6960 IGP01E1000_IEEE_RESTART_AUTONEG);
6961 if (ret_val)
6962 return ret_val;
6963
6964 mdelay(20);
6965
6966 /* Now enable the transmitter */
6967 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
6968
6969 if (ret_val)
6970 return ret_val;
6971
6972 hw->dsp_config_state = e1000_dsp_config_enabled;
6973 }
6974
6975 if (hw->ffe_config_state == e1000_ffe_config_active) {
6976 /* Save off the current value of register 0x2F5B to be restored at
6977 * the end of the routines. */
6978 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
6979
6980 if (ret_val)
6981 return ret_val;
6982
6983 /* Disable the PHY transmitter */
6984 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
6985
6986 if (ret_val)
6987 return ret_val;
6988
6989 mdelay(20);
6990
6991 ret_val = e1000_write_phy_reg(hw, 0x0000,
6992 IGP01E1000_IEEE_FORCE_GIGA);
6993 if (ret_val)
6994 return ret_val;
6995 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
6996 IGP01E1000_PHY_DSP_FFE_DEFAULT);
6997 if (ret_val)
6998 return ret_val;
6999
7000 ret_val = e1000_write_phy_reg(hw, 0x0000,
7001 IGP01E1000_IEEE_RESTART_AUTONEG);
7002 if (ret_val)
7003 return ret_val;
7004
7005 mdelay(20);
7006
7007 /* Now enable the transmitter */
7008 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
7009
7010 if (ret_val)
7011 return ret_val;
7012
7013 hw->ffe_config_state = e1000_ffe_config_enabled;
7014 }
7015 }
7016 return E1000_SUCCESS;
7017}
7018 5044
7019/***************************************************************************** 5045 hw->speed_downgraded =
7020 * Set PHY to class A mode 5046 (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
7021 * Assumes the following operations will follow to enable the new class mode. 5047 } else if (hw->phy_type == e1000_phy_m88) {
7022 * 1. Do a PHY soft reset 5048 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
7023 * 2. Restart auto-negotiation or force link. 5049 &phy_data);
7024 * 5050 if (ret_val)
7025 * hw - Struct containing variables accessed by shared code 5051 return ret_val;
7026 ****************************************************************************/
7027static s32 e1000_set_phy_mode(struct e1000_hw *hw)
7028{
7029 s32 ret_val;
7030 u16 eeprom_data;
7031
7032 DEBUGFUNC("e1000_set_phy_mode");
7033
7034 if ((hw->mac_type == e1000_82545_rev_3) &&
7035 (hw->media_type == e1000_media_type_copper)) {
7036 ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
7037 if (ret_val) {
7038 return ret_val;
7039 }
7040
7041 if ((eeprom_data != EEPROM_RESERVED_WORD) &&
7042 (eeprom_data & EEPROM_PHY_CLASS_A)) {
7043 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
7044 if (ret_val)
7045 return ret_val;
7046 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
7047 if (ret_val)
7048 return ret_val;
7049
7050 hw->phy_reset_disable = false;
7051 }
7052 }
7053
7054 return E1000_SUCCESS;
7055}
7056 5052
7057/***************************************************************************** 5053 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
7058 * 5054 M88E1000_PSSR_DOWNSHIFT_SHIFT;
7059 * This function sets the lplu state according to the active flag. When 5055 }
7060 * activating lplu this function also disables smart speed and vise versa.
7061 * lplu will not be activated unless the device autonegotiation advertisment
7062 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
7063 * hw: Struct containing variables accessed by shared code
7064 * active - true to enable lplu false to disable lplu.
7065 *
7066 * returns: - E1000_ERR_PHY if fail to read/write the PHY
7067 * E1000_SUCCESS at any other case.
7068 *
7069 ****************************************************************************/
7070 5056
7071static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) 5057 return E1000_SUCCESS;
7072{
7073 u32 phy_ctrl = 0;
7074 s32 ret_val;
7075 u16 phy_data;
7076 DEBUGFUNC("e1000_set_d3_lplu_state");
7077
7078 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
7079 && hw->phy_type != e1000_phy_igp_3)
7080 return E1000_SUCCESS;
7081
7082 /* During driver activity LPLU should not be used or it will attain link
7083 * from the lowest speeds starting from 10Mbps. The capability is used for
7084 * Dx transitions and states */
7085 if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
7086 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
7087 if (ret_val)
7088 return ret_val;
7089 } else if (hw->mac_type == e1000_ich8lan) {
7090 /* MAC writes into PHY register based on the state transition
7091 * and start auto-negotiation. SW driver can overwrite the settings
7092 * in CSR PHY power control E1000_PHY_CTRL register. */
7093 phy_ctrl = er32(PHY_CTRL);
7094 } else {
7095 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
7096 if (ret_val)
7097 return ret_val;
7098 }
7099
7100 if (!active) {
7101 if (hw->mac_type == e1000_82541_rev_2 ||
7102 hw->mac_type == e1000_82547_rev_2) {
7103 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
7104 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
7105 if (ret_val)
7106 return ret_val;
7107 } else {
7108 if (hw->mac_type == e1000_ich8lan) {
7109 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
7110 ew32(PHY_CTRL, phy_ctrl);
7111 } else {
7112 phy_data &= ~IGP02E1000_PM_D3_LPLU;
7113 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
7114 phy_data);
7115 if (ret_val)
7116 return ret_val;
7117 }
7118 }
7119
7120 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
7121 * Dx states where the power conservation is most important. During
7122 * driver activity we should enable SmartSpeed, so performance is
7123 * maintained. */
7124 if (hw->smart_speed == e1000_smart_speed_on) {
7125 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7126 &phy_data);
7127 if (ret_val)
7128 return ret_val;
7129
7130 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
7131 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7132 phy_data);
7133 if (ret_val)
7134 return ret_val;
7135 } else if (hw->smart_speed == e1000_smart_speed_off) {
7136 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7137 &phy_data);
7138 if (ret_val)
7139 return ret_val;
7140
7141 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7142 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7143 phy_data);
7144 if (ret_val)
7145 return ret_val;
7146 }
7147
7148 } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
7149 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
7150 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
7151
7152 if (hw->mac_type == e1000_82541_rev_2 ||
7153 hw->mac_type == e1000_82547_rev_2) {
7154 phy_data |= IGP01E1000_GMII_FLEX_SPD;
7155 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
7156 if (ret_val)
7157 return ret_val;
7158 } else {
7159 if (hw->mac_type == e1000_ich8lan) {
7160 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
7161 ew32(PHY_CTRL, phy_ctrl);
7162 } else {
7163 phy_data |= IGP02E1000_PM_D3_LPLU;
7164 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
7165 phy_data);
7166 if (ret_val)
7167 return ret_val;
7168 }
7169 }
7170
7171 /* When LPLU is enabled we should disable SmartSpeed */
7172 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
7173 if (ret_val)
7174 return ret_val;
7175
7176 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7177 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
7178 if (ret_val)
7179 return ret_val;
7180
7181 }
7182 return E1000_SUCCESS;
7183} 5058}
7184 5059
7185/***************************************************************************** 5060/**
7186 * 5061 * e1000_config_dsp_after_link_change
7187 * This function sets the lplu d0 state according to the active flag. When 5062 * @hw: Struct containing variables accessed by shared code
7188 * activating lplu this function also disables smart speed and vise versa. 5063 * @link_up: was link up at the time this was called
7189 * lplu will not be activated unless the device autonegotiation advertisment
7190 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
7191 * hw: Struct containing variables accessed by shared code
7192 * active - true to enable lplu false to disable lplu.
7193 * 5064 *
7194 * returns: - E1000_ERR_PHY if fail to read/write the PHY 5065 * returns: - E1000_ERR_PHY if fail to read/write the PHY
7195 * E1000_SUCCESS at any other case. 5066 * E1000_SUCCESS at any other case.
7196 * 5067 *
7197 ****************************************************************************/ 5068 * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
7198 5069 * gigabit link is achieved to improve link quality.
7199static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) 5070 */
7200{
7201 u32 phy_ctrl = 0;
7202 s32 ret_val;
7203 u16 phy_data;
7204 DEBUGFUNC("e1000_set_d0_lplu_state");
7205
7206 if (hw->mac_type <= e1000_82547_rev_2)
7207 return E1000_SUCCESS;
7208
7209 if (hw->mac_type == e1000_ich8lan) {
7210 phy_ctrl = er32(PHY_CTRL);
7211 } else {
7212 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
7213 if (ret_val)
7214 return ret_val;
7215 }
7216
7217 if (!active) {
7218 if (hw->mac_type == e1000_ich8lan) {
7219 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
7220 ew32(PHY_CTRL, phy_ctrl);
7221 } else {
7222 phy_data &= ~IGP02E1000_PM_D0_LPLU;
7223 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
7224 if (ret_val)
7225 return ret_val;
7226 }
7227
7228 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
7229 * Dx states where the power conservation is most important. During
7230 * driver activity we should enable SmartSpeed, so performance is
7231 * maintained. */
7232 if (hw->smart_speed == e1000_smart_speed_on) {
7233 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7234 &phy_data);
7235 if (ret_val)
7236 return ret_val;
7237
7238 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
7239 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7240 phy_data);
7241 if (ret_val)
7242 return ret_val;
7243 } else if (hw->smart_speed == e1000_smart_speed_off) {
7244 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7245 &phy_data);
7246 if (ret_val)
7247 return ret_val;
7248
7249 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7250 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7251 phy_data);
7252 if (ret_val)
7253 return ret_val;
7254 }
7255
7256
7257 } else {
7258
7259 if (hw->mac_type == e1000_ich8lan) {
7260 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
7261 ew32(PHY_CTRL, phy_ctrl);
7262 } else {
7263 phy_data |= IGP02E1000_PM_D0_LPLU;
7264 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
7265 if (ret_val)
7266 return ret_val;
7267 }
7268
7269 /* When LPLU is enabled we should disable SmartSpeed */
7270 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
7271 if (ret_val)
7272 return ret_val;
7273
7274 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7275 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
7276 if (ret_val)
7277 return ret_val;
7278
7279 }
7280 return E1000_SUCCESS;
7281}
7282 5071
7283/****************************************************************************** 5072static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
7284 * Change VCO speed register to improve Bit Error Rate performance of SERDES.
7285 *
7286 * hw - Struct containing variables accessed by shared code
7287 *****************************************************************************/
7288static s32 e1000_set_vco_speed(struct e1000_hw *hw)
7289{ 5073{
7290 s32 ret_val; 5074 s32 ret_val;
7291 u16 default_page = 0; 5075 u16 phy_data, phy_saved_data, speed, duplex, i;
7292 u16 phy_data; 5076 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
7293 5077 { IGP01E1000_PHY_AGC_PARAM_A,
7294 DEBUGFUNC("e1000_set_vco_speed"); 5078 IGP01E1000_PHY_AGC_PARAM_B,
5079 IGP01E1000_PHY_AGC_PARAM_C,
5080 IGP01E1000_PHY_AGC_PARAM_D
5081 };
5082 u16 min_length, max_length;
5083
5084 DEBUGFUNC("e1000_config_dsp_after_link_change");
5085
5086 if (hw->phy_type != e1000_phy_igp)
5087 return E1000_SUCCESS;
5088
5089 if (link_up) {
5090 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
5091 if (ret_val) {
5092 DEBUGOUT("Error getting link speed and duplex\n");
5093 return ret_val;
5094 }
7295 5095
7296 switch (hw->mac_type) { 5096 if (speed == SPEED_1000) {
7297 case e1000_82545_rev_3: 5097
7298 case e1000_82546_rev_3: 5098 ret_val =
7299 break; 5099 e1000_get_cable_length(hw, &min_length,
7300 default: 5100 &max_length);
7301 return E1000_SUCCESS; 5101 if (ret_val)
7302 } 5102 return ret_val;
5103
5104 if ((hw->dsp_config_state == e1000_dsp_config_enabled)
5105 && min_length >= e1000_igp_cable_length_50) {
5106
5107 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
5108 ret_val =
5109 e1000_read_phy_reg(hw,
5110 dsp_reg_array[i],
5111 &phy_data);
5112 if (ret_val)
5113 return ret_val;
5114
5115 phy_data &=
5116 ~IGP01E1000_PHY_EDAC_MU_INDEX;
5117
5118 ret_val =
5119 e1000_write_phy_reg(hw,
5120 dsp_reg_array
5121 [i], phy_data);
5122 if (ret_val)
5123 return ret_val;
5124 }
5125 hw->dsp_config_state =
5126 e1000_dsp_config_activated;
5127 }
5128
5129 if ((hw->ffe_config_state == e1000_ffe_config_enabled)
5130 && (min_length < e1000_igp_cable_length_50)) {
5131
5132 u16 ffe_idle_err_timeout =
5133 FFE_IDLE_ERR_COUNT_TIMEOUT_20;
5134 u32 idle_errs = 0;
5135
5136 /* clear previous idle error counts */
5137 ret_val =
5138 e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5139 &phy_data);
5140 if (ret_val)
5141 return ret_val;
5142
5143 for (i = 0; i < ffe_idle_err_timeout; i++) {
5144 udelay(1000);
5145 ret_val =
5146 e1000_read_phy_reg(hw,
5147 PHY_1000T_STATUS,
5148 &phy_data);
5149 if (ret_val)
5150 return ret_val;
5151
5152 idle_errs +=
5153 (phy_data &
5154 SR_1000T_IDLE_ERROR_CNT);
5155 if (idle_errs >
5156 SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
5157 {
5158 hw->ffe_config_state =
5159 e1000_ffe_config_active;
5160
5161 ret_val =
5162 e1000_write_phy_reg(hw,
5163 IGP01E1000_PHY_DSP_FFE,
5164 IGP01E1000_PHY_DSP_FFE_CM_CP);
5165 if (ret_val)
5166 return ret_val;
5167 break;
5168 }
5169
5170 if (idle_errs)
5171 ffe_idle_err_timeout =
5172 FFE_IDLE_ERR_COUNT_TIMEOUT_100;
5173 }
5174 }
5175 }
5176 } else {
5177 if (hw->dsp_config_state == e1000_dsp_config_activated) {
5178 /* Save off the current value of register 0x2F5B to be restored at
5179 * the end of the routines. */
5180 ret_val =
5181 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
5182
5183 if (ret_val)
5184 return ret_val;
5185
5186 /* Disable the PHY transmitter */
5187 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
5188
5189 if (ret_val)
5190 return ret_val;
5191
5192 mdelay(20);
5193
5194 ret_val = e1000_write_phy_reg(hw, 0x0000,
5195 IGP01E1000_IEEE_FORCE_GIGA);
5196 if (ret_val)
5197 return ret_val;
5198 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
5199 ret_val =
5200 e1000_read_phy_reg(hw, dsp_reg_array[i],
5201 &phy_data);
5202 if (ret_val)
5203 return ret_val;
5204
5205 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
5206 phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
5207
5208 ret_val =
5209 e1000_write_phy_reg(hw, dsp_reg_array[i],
5210 phy_data);
5211 if (ret_val)
5212 return ret_val;
5213 }
5214
5215 ret_val = e1000_write_phy_reg(hw, 0x0000,
5216 IGP01E1000_IEEE_RESTART_AUTONEG);
5217 if (ret_val)
5218 return ret_val;
5219
5220 mdelay(20);
5221
5222 /* Now enable the transmitter */
5223 ret_val =
5224 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
5225
5226 if (ret_val)
5227 return ret_val;
5228
5229 hw->dsp_config_state = e1000_dsp_config_enabled;
5230 }
7303 5231
7304 /* Set PHY register 30, page 5, bit 8 to 0 */ 5232 if (hw->ffe_config_state == e1000_ffe_config_active) {
5233 /* Save off the current value of register 0x2F5B to be restored at
5234 * the end of the routines. */
5235 ret_val =
5236 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
7305 5237
7306 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); 5238 if (ret_val)
7307 if (ret_val) 5239 return ret_val;
7308 return ret_val;
7309 5240
7310 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); 5241 /* Disable the PHY transmitter */
7311 if (ret_val) 5242 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
7312 return ret_val;
7313 5243
7314 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); 5244 if (ret_val)
7315 if (ret_val) 5245 return ret_val;
7316 return ret_val;
7317 5246
7318 phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; 5247 mdelay(20);
7319 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
7320 if (ret_val)
7321 return ret_val;
7322 5248
7323 /* Set PHY register 30, page 4, bit 11 to 1 */ 5249 ret_val = e1000_write_phy_reg(hw, 0x0000,
5250 IGP01E1000_IEEE_FORCE_GIGA);
5251 if (ret_val)
5252 return ret_val;
5253 ret_val =
5254 e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
5255 IGP01E1000_PHY_DSP_FFE_DEFAULT);
5256 if (ret_val)
5257 return ret_val;
7324 5258
7325 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); 5259 ret_val = e1000_write_phy_reg(hw, 0x0000,
7326 if (ret_val) 5260 IGP01E1000_IEEE_RESTART_AUTONEG);
7327 return ret_val; 5261 if (ret_val)
5262 return ret_val;
7328 5263
7329 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); 5264 mdelay(20);
7330 if (ret_val)
7331 return ret_val;
7332 5265
7333 phy_data |= M88E1000_PHY_VCO_REG_BIT11; 5266 /* Now enable the transmitter */
7334 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); 5267 ret_val =
7335 if (ret_val) 5268 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
7336 return ret_val;
7337 5269
7338 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); 5270 if (ret_val)
7339 if (ret_val) 5271 return ret_val;
7340 return ret_val;
7341 5272
7342 return E1000_SUCCESS; 5273 hw->ffe_config_state = e1000_ffe_config_enabled;
5274 }
5275 }
5276 return E1000_SUCCESS;
7343} 5277}
7344 5278
7345 5279/**
7346/***************************************************************************** 5280 * e1000_set_phy_mode - Set PHY to class A mode
7347 * This function reads the cookie from ARC ram. 5281 * @hw: Struct containing variables accessed by shared code
7348 * 5282 *
7349 * returns: - E1000_SUCCESS . 5283 * Assumes the following operations will follow to enable the new class mode.
7350 ****************************************************************************/ 5284 * 1. Do a PHY soft reset
7351static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer) 5285 * 2. Restart auto-negotiation or force link.
5286 */
5287static s32 e1000_set_phy_mode(struct e1000_hw *hw)
7352{ 5288{
7353 u8 i; 5289 s32 ret_val;
7354 u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET; 5290 u16 eeprom_data;
7355 u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
7356
7357 length = (length >> 2);
7358 offset = (offset >> 2);
7359
7360 for (i = 0; i < length; i++) {
7361 *((u32 *)buffer + i) =
7362 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
7363 }
7364 return E1000_SUCCESS;
7365}
7366 5291
5292 DEBUGFUNC("e1000_set_phy_mode");
7367 5293
7368/***************************************************************************** 5294 if ((hw->mac_type == e1000_82545_rev_3) &&
7369 * This function checks whether the HOST IF is enabled for command operaton 5295 (hw->media_type == e1000_media_type_copper)) {
7370 * and also checks whether the previous command is completed. 5296 ret_val =
7371 * It busy waits in case of previous command is not completed. 5297 e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
7372 * 5298 &eeprom_data);
7373 * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or 5299 if (ret_val) {
7374 * timeout 5300 return ret_val;
7375 * - E1000_SUCCESS for success. 5301 }
7376 ****************************************************************************/
7377static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
7378{
7379 u32 hicr;
7380 u8 i;
7381
7382 /* Check that the host interface is enabled. */
7383 hicr = er32(HICR);
7384 if ((hicr & E1000_HICR_EN) == 0) {
7385 DEBUGOUT("E1000_HOST_EN bit disabled.\n");
7386 return -E1000_ERR_HOST_INTERFACE_COMMAND;
7387 }
7388 /* check the previous command is completed */
7389 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
7390 hicr = er32(HICR);
7391 if (!(hicr & E1000_HICR_C))
7392 break;
7393 mdelay(1);
7394 }
7395
7396 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
7397 DEBUGOUT("Previous command timeout failed .\n");
7398 return -E1000_ERR_HOST_INTERFACE_COMMAND;
7399 }
7400 return E1000_SUCCESS;
7401}
7402 5302
7403/***************************************************************************** 5303 if ((eeprom_data != EEPROM_RESERVED_WORD) &&
7404 * This function writes the buffer content at the offset given on the host if. 5304 (eeprom_data & EEPROM_PHY_CLASS_A)) {
7405 * It also does alignment considerations to do the writes in most efficient way. 5305 ret_val =
7406 * Also fills up the sum of the buffer in *buffer parameter. 5306 e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
7407 * 5307 0x000B);
7408 * returns - E1000_SUCCESS for success. 5308 if (ret_val)
7409 ****************************************************************************/ 5309 return ret_val;
7410static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, 5310 ret_val =
7411 u16 offset, u8 *sum) 5311 e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL,
7412{ 5312 0x8104);
7413 u8 *tmp; 5313 if (ret_val)
7414 u8 *bufptr = buffer; 5314 return ret_val;
7415 u32 data = 0; 5315
7416 u16 remaining, i, j, prev_bytes; 5316 hw->phy_reset_disable = false;
7417 5317 }
7418 /* sum = only sum of the data and it is not checksum */ 5318 }
7419
7420 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
7421 return -E1000_ERR_PARAM;
7422 }
7423
7424 tmp = (u8 *)&data;
7425 prev_bytes = offset & 0x3;
7426 offset &= 0xFFFC;
7427 offset >>= 2;
7428
7429 if (prev_bytes) {
7430 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
7431 for (j = prev_bytes; j < sizeof(u32); j++) {
7432 *(tmp + j) = *bufptr++;
7433 *sum += *(tmp + j);
7434 }
7435 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
7436 length -= j - prev_bytes;
7437 offset++;
7438 }
7439
7440 remaining = length & 0x3;
7441 length -= remaining;
7442
7443 /* Calculate length in DWORDs */
7444 length >>= 2;
7445
7446 /* The device driver writes the relevant command block into the
7447 * ram area. */
7448 for (i = 0; i < length; i++) {
7449 for (j = 0; j < sizeof(u32); j++) {
7450 *(tmp + j) = *bufptr++;
7451 *sum += *(tmp + j);
7452 }
7453
7454 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
7455 }
7456 if (remaining) {
7457 for (j = 0; j < sizeof(u32); j++) {
7458 if (j < remaining)
7459 *(tmp + j) = *bufptr++;
7460 else
7461 *(tmp + j) = 0;
7462
7463 *sum += *(tmp + j);
7464 }
7465 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
7466 }
7467
7468 return E1000_SUCCESS;
7469}
7470 5319
5320 return E1000_SUCCESS;
5321}
7471 5322
7472/***************************************************************************** 5323/**
7473 * This function writes the command header after does the checksum calculation. 5324 * e1000_set_d3_lplu_state - set d3 link power state
5325 * @hw: Struct containing variables accessed by shared code
5326 * @active: true to enable lplu false to disable lplu.
5327 *
5328 * This function sets the lplu state according to the active flag. When
5329 * activating lplu this function also disables smart speed and vise versa.
5330 * lplu will not be activated unless the device autonegotiation advertisement
5331 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
7474 * 5332 *
7475 * returns - E1000_SUCCESS for success. 5333 * returns: - E1000_ERR_PHY if fail to read/write the PHY
7476 ****************************************************************************/ 5334 * E1000_SUCCESS at any other case.
7477static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, 5335 */
7478 struct e1000_host_mng_command_header *hdr) 5336static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
7479{ 5337{
7480 u16 i; 5338 s32 ret_val;
7481 u8 sum; 5339 u16 phy_data;
7482 u8 *buffer; 5340 DEBUGFUNC("e1000_set_d3_lplu_state");
7483 5341
7484 /* Write the whole command header structure which includes sum of 5342 if (hw->phy_type != e1000_phy_igp)
7485 * the buffer */ 5343 return E1000_SUCCESS;
7486 5344
7487 u16 length = sizeof(struct e1000_host_mng_command_header); 5345 /* During driver activity LPLU should not be used or it will attain link
5346 * from the lowest speeds starting from 10Mbps. The capability is used for
5347 * Dx transitions and states */
5348 if (hw->mac_type == e1000_82541_rev_2
5349 || hw->mac_type == e1000_82547_rev_2) {
5350 ret_val =
5351 e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
5352 if (ret_val)
5353 return ret_val;
5354 }
7488 5355
7489 sum = hdr->checksum; 5356 if (!active) {
7490 hdr->checksum = 0; 5357 if (hw->mac_type == e1000_82541_rev_2 ||
5358 hw->mac_type == e1000_82547_rev_2) {
5359 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
5360 ret_val =
5361 e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
5362 phy_data);
5363 if (ret_val)
5364 return ret_val;
5365 }
7491 5366
7492 buffer = (u8 *)hdr; 5367 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
7493 i = length; 5368 * Dx states where the power conservation is most important. During
7494 while (i--) 5369 * driver activity we should enable SmartSpeed, so performance is
7495 sum += buffer[i]; 5370 * maintained. */
5371 if (hw->smart_speed == e1000_smart_speed_on) {
5372 ret_val =
5373 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5374 &phy_data);
5375 if (ret_val)
5376 return ret_val;
5377
5378 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
5379 ret_val =
5380 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5381 phy_data);
5382 if (ret_val)
5383 return ret_val;
5384 } else if (hw->smart_speed == e1000_smart_speed_off) {
5385 ret_val =
5386 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5387 &phy_data);
5388 if (ret_val)
5389 return ret_val;
5390
5391 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
5392 ret_val =
5393 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5394 phy_data);
5395 if (ret_val)
5396 return ret_val;
5397 }
5398 } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
5399 || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
5400 || (hw->autoneg_advertised ==
5401 AUTONEG_ADVERTISE_10_100_ALL)) {
5402
5403 if (hw->mac_type == e1000_82541_rev_2 ||
5404 hw->mac_type == e1000_82547_rev_2) {
5405 phy_data |= IGP01E1000_GMII_FLEX_SPD;
5406 ret_val =
5407 e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
5408 phy_data);
5409 if (ret_val)
5410 return ret_val;
5411 }
7496 5412
7497 hdr->checksum = 0 - sum; 5413 /* When LPLU is enabled we should disable SmartSpeed */
5414 ret_val =
5415 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5416 &phy_data);
5417 if (ret_val)
5418 return ret_val;
7498 5419
7499 length >>= 2; 5420 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7500 /* The device driver writes the relevant command block into the ram area. */ 5421 ret_val =
7501 for (i = 0; i < length; i++) { 5422 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7502 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i)); 5423 phy_data);
7503 E1000_WRITE_FLUSH(); 5424 if (ret_val)
7504 } 5425 return ret_val;
7505 5426
7506 return E1000_SUCCESS; 5427 }
5428 return E1000_SUCCESS;
7507} 5429}
7508 5430
7509 5431/**
7510/***************************************************************************** 5432 * e1000_set_vco_speed
7511 * This function indicates to ARC that a new command is pending which completes 5433 * @hw: Struct containing variables accessed by shared code
7512 * one write operation by the driver.
7513 * 5434 *
7514 * returns - E1000_SUCCESS for success. 5435 * Change VCO speed register to improve Bit Error Rate performance of SERDES.
7515 ****************************************************************************/ 5436 */
7516static s32 e1000_mng_write_commit(struct e1000_hw *hw) 5437static s32 e1000_set_vco_speed(struct e1000_hw *hw)
7517{ 5438{
7518 u32 hicr; 5439 s32 ret_val;
5440 u16 default_page = 0;
5441 u16 phy_data;
7519 5442
7520 hicr = er32(HICR); 5443 DEBUGFUNC("e1000_set_vco_speed");
7521 /* Setting this bit tells the ARC that a new command is pending. */
7522 ew32(HICR, hicr | E1000_HICR_C);
7523 5444
7524 return E1000_SUCCESS; 5445 switch (hw->mac_type) {
7525} 5446 case e1000_82545_rev_3:
5447 case e1000_82546_rev_3:
5448 break;
5449 default:
5450 return E1000_SUCCESS;
5451 }
7526 5452
5453 /* Set PHY register 30, page 5, bit 8 to 0 */
7527 5454
7528/***************************************************************************** 5455 ret_val =
7529 * This function checks the mode of the firmware. 5456 e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
7530 * 5457 if (ret_val)
7531 * returns - true when the mode is IAMT or false. 5458 return ret_val;
7532 ****************************************************************************/
7533bool e1000_check_mng_mode(struct e1000_hw *hw)
7534{
7535 u32 fwsm;
7536
7537 fwsm = er32(FWSM);
7538 5459
7539 if (hw->mac_type == e1000_ich8lan) { 5460 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
7540 if ((fwsm & E1000_FWSM_MODE_MASK) == 5461 if (ret_val)
7541 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 5462 return ret_val;
7542 return true;
7543 } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
7544 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7545 return true;
7546 5463
7547 return false; 5464 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
7548} 5465 if (ret_val)
5466 return ret_val;
7549 5467
5468 phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
5469 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
5470 if (ret_val)
5471 return ret_val;
7550 5472
7551/***************************************************************************** 5473 /* Set PHY register 30, page 4, bit 11 to 1 */
7552 * This function writes the dhcp info .
7553 ****************************************************************************/
7554s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
7555{
7556 s32 ret_val;
7557 struct e1000_host_mng_command_header hdr;
7558
7559 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
7560 hdr.command_length = length;
7561 hdr.reserved1 = 0;
7562 hdr.reserved2 = 0;
7563 hdr.checksum = 0;
7564
7565 ret_val = e1000_mng_enable_host_if(hw);
7566 if (ret_val == E1000_SUCCESS) {
7567 ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
7568 &(hdr.checksum));
7569 if (ret_val == E1000_SUCCESS) {
7570 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
7571 if (ret_val == E1000_SUCCESS)
7572 ret_val = e1000_mng_write_commit(hw);
7573 }
7574 }
7575 return ret_val;
7576}
7577 5474
5475 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
5476 if (ret_val)
5477 return ret_val;
7578 5478
7579/***************************************************************************** 5479 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
7580 * This function calculates the checksum. 5480 if (ret_val)
7581 * 5481 return ret_val;
7582 * returns - checksum of buffer contents.
7583 ****************************************************************************/
7584static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
7585{
7586 u8 sum = 0;
7587 u32 i;
7588 5482
7589 if (!buffer) 5483 phy_data |= M88E1000_PHY_VCO_REG_BIT11;
7590 return 0; 5484 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
5485 if (ret_val)
5486 return ret_val;
7591 5487
7592 for (i=0; i < length; i++) 5488 ret_val =
7593 sum += buffer[i]; 5489 e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
5490 if (ret_val)
5491 return ret_val;
7594 5492
7595 return (u8)(0 - sum); 5493 return E1000_SUCCESS;
7596} 5494}
7597 5495
7598/*****************************************************************************
7599 * This function checks whether tx pkt filtering needs to be enabled or not.
7600 *
7601 * returns - true for packet filtering or false.
7602 ****************************************************************************/
7603bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7604{
7605 /* called in init as well as watchdog timer functions */
7606
7607 s32 ret_val, checksum;
7608 bool tx_filter = false;
7609 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
7610 u8 *buffer = (u8 *) &(hw->mng_cookie);
7611
7612 if (e1000_check_mng_mode(hw)) {
7613 ret_val = e1000_mng_enable_host_if(hw);
7614 if (ret_val == E1000_SUCCESS) {
7615 ret_val = e1000_host_if_read_cookie(hw, buffer);
7616 if (ret_val == E1000_SUCCESS) {
7617 checksum = hdr->checksum;
7618 hdr->checksum = 0;
7619 if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
7620 checksum == e1000_calculate_mng_checksum((char *)buffer,
7621 E1000_MNG_DHCP_COOKIE_LENGTH)) {
7622 if (hdr->status &
7623 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
7624 tx_filter = true;
7625 } else
7626 tx_filter = true;
7627 } else
7628 tx_filter = true;
7629 }
7630 }
7631
7632 hw->tx_pkt_filtering = tx_filter;
7633 return tx_filter;
7634}
7635 5496
7636/****************************************************************************** 5497/**
7637 * Verifies the hardware needs to allow ARPs to be processed by the host 5498 * e1000_enable_mng_pass_thru - check for bmc pass through
7638 * 5499 * @hw: Struct containing variables accessed by shared code
7639 * hw - Struct containing variables accessed by shared code
7640 * 5500 *
5501 * Verifies the hardware needs to allow ARPs to be processed by the host
7641 * returns: - true/false 5502 * returns: - true/false
7642 * 5503 */
7643 *****************************************************************************/
7644u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) 5504u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
7645{ 5505{
7646 u32 manc; 5506 u32 manc;
7647 u32 fwsm, factps;
7648
7649 if (hw->asf_firmware_present) {
7650 manc = er32(MANC);
7651
7652 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
7653 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
7654 return false;
7655 if (e1000_arc_subsystem_valid(hw)) {
7656 fwsm = er32(FWSM);
7657 factps = er32(FACTPS);
7658
7659 if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
7660 e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
7661 return true;
7662 } else
7663 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
7664 return true;
7665 }
7666 return false;
7667}
7668 5507
7669static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) 5508 if (hw->asf_firmware_present) {
7670{ 5509 manc = er32(MANC);
7671 s32 ret_val; 5510
7672 u16 mii_status_reg; 5511 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
7673 u16 i; 5512 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
7674 5513 return false;
7675 /* Polarity reversal workaround for forced 10F/10H links. */ 5514 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
7676 5515 return true;
7677 /* Disable the transmitter on the PHY */ 5516 }
7678 5517 return false;
7679 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7680 if (ret_val)
7681 return ret_val;
7682 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
7683 if (ret_val)
7684 return ret_val;
7685
7686 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7687 if (ret_val)
7688 return ret_val;
7689
7690 /* This loop will early-out if the NO link condition has been met. */
7691 for (i = PHY_FORCE_TIME; i > 0; i--) {
7692 /* Read the MII Status Register and wait for Link Status bit
7693 * to be clear.
7694 */
7695
7696 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7697 if (ret_val)
7698 return ret_val;
7699
7700 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7701 if (ret_val)
7702 return ret_val;
7703
7704 if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
7705 mdelay(100);
7706 }
7707
7708 /* Recommended delay time after link has been lost */
7709 mdelay(1000);
7710
7711 /* Now we will re-enable th transmitter on the PHY */
7712
7713 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7714 if (ret_val)
7715 return ret_val;
7716 mdelay(50);
7717 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
7718 if (ret_val)
7719 return ret_val;
7720 mdelay(50);
7721 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
7722 if (ret_val)
7723 return ret_val;
7724 mdelay(50);
7725 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
7726 if (ret_val)
7727 return ret_val;
7728
7729 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7730 if (ret_val)
7731 return ret_val;
7732
7733 /* This loop will early-out if the link condition has been met. */
7734 for (i = PHY_FORCE_TIME; i > 0; i--) {
7735 /* Read the MII Status Register and wait for Link Status bit
7736 * to be set.
7737 */
7738
7739 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7740 if (ret_val)
7741 return ret_val;
7742
7743 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7744 if (ret_val)
7745 return ret_val;
7746
7747 if (mii_status_reg & MII_SR_LINK_STATUS) break;
7748 mdelay(100);
7749 }
7750 return E1000_SUCCESS;
7751} 5518}
7752 5519
7753/*************************************************************************** 5520static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7754 *
7755 * Disables PCI-Express master access.
7756 *
7757 * hw: Struct containing variables accessed by shared code
7758 *
7759 * returns: - none.
7760 *
7761 ***************************************************************************/
7762static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7763{ 5521{
7764 u32 ctrl; 5522 s32 ret_val;
5523 u16 mii_status_reg;
5524 u16 i;
7765 5525
7766 DEBUGFUNC("e1000_set_pci_express_master_disable"); 5526 /* Polarity reversal workaround for forced 10F/10H links. */
7767 5527
7768 if (hw->bus_type != e1000_bus_type_pci_express) 5528 /* Disable the transmitter on the PHY */
7769 return;
7770 5529
7771 ctrl = er32(CTRL); 5530 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7772 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 5531 if (ret_val)
7773 ew32(CTRL, ctrl); 5532 return ret_val;
7774} 5533 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
5534 if (ret_val)
5535 return ret_val;
7775 5536
7776/******************************************************************************* 5537 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7777 * 5538 if (ret_val)
7778 * Disables PCI-Express master access and verifies there are no pending requests 5539 return ret_val;
7779 *
7780 * hw: Struct containing variables accessed by shared code
7781 *
7782 * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
7783 * caused the master requests to be disabled.
7784 * E1000_SUCCESS master requests disabled.
7785 *
7786 ******************************************************************************/
7787s32 e1000_disable_pciex_master(struct e1000_hw *hw)
7788{
7789 s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
7790 5540
7791 DEBUGFUNC("e1000_disable_pciex_master"); 5541 /* This loop will early-out if the NO link condition has been met. */
5542 for (i = PHY_FORCE_TIME; i > 0; i--) {
5543 /* Read the MII Status Register and wait for Link Status bit
5544 * to be clear.
5545 */
7792 5546
7793 if (hw->bus_type != e1000_bus_type_pci_express) 5547 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7794 return E1000_SUCCESS; 5548 if (ret_val)
5549 return ret_val;
7795 5550
7796 e1000_set_pci_express_master_disable(hw); 5551 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
5552 if (ret_val)
5553 return ret_val;
7797 5554
7798 while (timeout) { 5555 if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
7799 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 5556 break;
7800 break; 5557 mdelay(100);
7801 else 5558 }
7802 udelay(100);
7803 timeout--;
7804 }
7805
7806 if (!timeout) {
7807 DEBUGOUT("Master requests are pending.\n");
7808 return -E1000_ERR_MASTER_REQUESTS_PENDING;
7809 }
7810 5559
7811 return E1000_SUCCESS; 5560 /* Recommended delay time after link has been lost */
5561 mdelay(1000);
5562
5563 /* Now we will re-enable th transmitter on the PHY */
5564
5565 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
5566 if (ret_val)
5567 return ret_val;
5568 mdelay(50);
5569 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
5570 if (ret_val)
5571 return ret_val;
5572 mdelay(50);
5573 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
5574 if (ret_val)
5575 return ret_val;
5576 mdelay(50);
5577 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
5578 if (ret_val)
5579 return ret_val;
5580
5581 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
5582 if (ret_val)
5583 return ret_val;
5584
5585 /* This loop will early-out if the link condition has been met. */
5586 for (i = PHY_FORCE_TIME; i > 0; i--) {
5587 /* Read the MII Status Register and wait for Link Status bit
5588 * to be set.
5589 */
5590
5591 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
5592 if (ret_val)
5593 return ret_val;
5594
5595 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
5596 if (ret_val)
5597 return ret_val;
5598
5599 if (mii_status_reg & MII_SR_LINK_STATUS)
5600 break;
5601 mdelay(100);
5602 }
5603 return E1000_SUCCESS;
7812} 5604}
7813 5605
7814/******************************************************************************* 5606/**
5607 * e1000_get_auto_rd_done
5608 * @hw: Struct containing variables accessed by shared code
7815 * 5609 *
7816 * Check for EEPROM Auto Read bit done. 5610 * Check for EEPROM Auto Read bit done.
7817 *
7818 * hw: Struct containing variables accessed by shared code
7819 *
7820 * returns: - E1000_ERR_RESET if fail to reset MAC 5611 * returns: - E1000_ERR_RESET if fail to reset MAC
7821 * E1000_SUCCESS at any other case. 5612 * E1000_SUCCESS at any other case.
7822 * 5613 */
7823 ******************************************************************************/
7824static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5614static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
7825{ 5615{
7826 s32 timeout = AUTO_READ_DONE_TIMEOUT; 5616 DEBUGFUNC("e1000_get_auto_rd_done");
7827 5617 msleep(5);
7828 DEBUGFUNC("e1000_get_auto_rd_done"); 5618 return E1000_SUCCESS;
7829
7830 switch (hw->mac_type) {
7831 default:
7832 msleep(5);
7833 break;
7834 case e1000_82571:
7835 case e1000_82572:
7836 case e1000_82573:
7837 case e1000_80003es2lan:
7838 case e1000_ich8lan:
7839 while (timeout) {
7840 if (er32(EECD) & E1000_EECD_AUTO_RD)
7841 break;
7842 else msleep(1);
7843 timeout--;
7844 }
7845
7846 if (!timeout) {
7847 DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
7848 return -E1000_ERR_RESET;
7849 }
7850 break;
7851 }
7852
7853 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
7854 * Need to wait for PHY configuration completion before accessing NVM
7855 * and PHY. */
7856 if (hw->mac_type == e1000_82573)
7857 msleep(25);
7858
7859 return E1000_SUCCESS;
7860} 5619}
7861 5620
7862/*************************************************************************** 5621/**
7863 * Checks if the PHY configuration is done 5622 * e1000_get_phy_cfg_done
7864 * 5623 * @hw: Struct containing variables accessed by shared code
7865 * hw: Struct containing variables accessed by shared code
7866 * 5624 *
5625 * Checks if the PHY configuration is done
7867 * returns: - E1000_ERR_RESET if fail to reset MAC 5626 * returns: - E1000_ERR_RESET if fail to reset MAC
7868 * E1000_SUCCESS at any other case. 5627 * E1000_SUCCESS at any other case.
7869 * 5628 */
7870 ***************************************************************************/
7871static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5629static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
7872{ 5630{
7873 s32 timeout = PHY_CFG_TIMEOUT; 5631 DEBUGFUNC("e1000_get_phy_cfg_done");
7874 u32 cfg_mask = E1000_EEPROM_CFG_DONE; 5632 mdelay(10);
7875 5633 return E1000_SUCCESS;
7876 DEBUGFUNC("e1000_get_phy_cfg_done");
7877
7878 switch (hw->mac_type) {
7879 default:
7880 mdelay(10);
7881 break;
7882 case e1000_80003es2lan:
7883 /* Separate *_CFG_DONE_* bit for each port */
7884 if (er32(STATUS) & E1000_STATUS_FUNC_1)
7885 cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
7886 /* Fall Through */
7887 case e1000_82571:
7888 case e1000_82572:
7889 while (timeout) {
7890 if (er32(EEMNGCTL) & cfg_mask)
7891 break;
7892 else
7893 msleep(1);
7894 timeout--;
7895 }
7896 if (!timeout) {
7897 DEBUGOUT("MNG configuration cycle has not completed.\n");
7898 return -E1000_ERR_RESET;
7899 }
7900 break;
7901 }
7902
7903 return E1000_SUCCESS;
7904}
7905
7906/***************************************************************************
7907 *
7908 * Using the combination of SMBI and SWESMBI semaphore bits when resetting
7909 * adapter or Eeprom access.
7910 *
7911 * hw: Struct containing variables accessed by shared code
7912 *
7913 * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
7914 * E1000_SUCCESS at any other case.
7915 *
7916 ***************************************************************************/
7917static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
7918{
7919 s32 timeout;
7920 u32 swsm;
7921
7922 DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
7923
7924 if (!hw->eeprom_semaphore_present)
7925 return E1000_SUCCESS;
7926
7927 if (hw->mac_type == e1000_80003es2lan) {
7928 /* Get the SW semaphore. */
7929 if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
7930 return -E1000_ERR_EEPROM;
7931 }
7932
7933 /* Get the FW semaphore. */
7934 timeout = hw->eeprom.word_size + 1;
7935 while (timeout) {
7936 swsm = er32(SWSM);
7937 swsm |= E1000_SWSM_SWESMBI;
7938 ew32(SWSM, swsm);
7939 /* if we managed to set the bit we got the semaphore. */
7940 swsm = er32(SWSM);
7941 if (swsm & E1000_SWSM_SWESMBI)
7942 break;
7943
7944 udelay(50);
7945 timeout--;
7946 }
7947
7948 if (!timeout) {
7949 /* Release semaphores */
7950 e1000_put_hw_eeprom_semaphore(hw);
7951 DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
7952 return -E1000_ERR_EEPROM;
7953 }
7954
7955 return E1000_SUCCESS;
7956}
7957
7958/***************************************************************************
7959 * This function clears HW semaphore bits.
7960 *
7961 * hw: Struct containing variables accessed by shared code
7962 *
7963 * returns: - None.
7964 *
7965 ***************************************************************************/
7966static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
7967{
7968 u32 swsm;
7969
7970 DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
7971
7972 if (!hw->eeprom_semaphore_present)
7973 return;
7974
7975 swsm = er32(SWSM);
7976 if (hw->mac_type == e1000_80003es2lan) {
7977 /* Release both semaphores. */
7978 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
7979 } else
7980 swsm &= ~(E1000_SWSM_SWESMBI);
7981 ew32(SWSM, swsm);
7982}
7983
7984/***************************************************************************
7985 *
7986 * Obtaining software semaphore bit (SMBI) before resetting PHY.
7987 *
7988 * hw: Struct containing variables accessed by shared code
7989 *
7990 * returns: - E1000_ERR_RESET if fail to obtain semaphore.
7991 * E1000_SUCCESS at any other case.
7992 *
7993 ***************************************************************************/
7994static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
7995{
7996 s32 timeout = hw->eeprom.word_size + 1;
7997 u32 swsm;
7998
7999 DEBUGFUNC("e1000_get_software_semaphore");
8000
8001 if (hw->mac_type != e1000_80003es2lan) {
8002 return E1000_SUCCESS;
8003 }
8004
8005 while (timeout) {
8006 swsm = er32(SWSM);
8007 /* If SMBI bit cleared, it is now set and we hold the semaphore */
8008 if (!(swsm & E1000_SWSM_SMBI))
8009 break;
8010 mdelay(1);
8011 timeout--;
8012 }
8013
8014 if (!timeout) {
8015 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
8016 return -E1000_ERR_RESET;
8017 }
8018
8019 return E1000_SUCCESS;
8020}
8021
8022/***************************************************************************
8023 *
8024 * Release semaphore bit (SMBI).
8025 *
8026 * hw: Struct containing variables accessed by shared code
8027 *
8028 ***************************************************************************/
8029static void e1000_release_software_semaphore(struct e1000_hw *hw)
8030{
8031 u32 swsm;
8032
8033 DEBUGFUNC("e1000_release_software_semaphore");
8034
8035 if (hw->mac_type != e1000_80003es2lan) {
8036 return;
8037 }
8038
8039 swsm = er32(SWSM);
8040 /* Release the SW semaphores.*/
8041 swsm &= ~E1000_SWSM_SMBI;
8042 ew32(SWSM, swsm);
8043}
8044
8045/******************************************************************************
8046 * Checks if PHY reset is blocked due to SOL/IDER session, for example.
8047 * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
8048 * the caller to figure out how to deal with it.
8049 *
8050 * hw - Struct containing variables accessed by shared code
8051 *
8052 * returns: - E1000_BLK_PHY_RESET
8053 * E1000_SUCCESS
8054 *
8055 *****************************************************************************/
8056s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
8057{
8058 u32 manc = 0;
8059 u32 fwsm = 0;
8060
8061 if (hw->mac_type == e1000_ich8lan) {
8062 fwsm = er32(FWSM);
8063 return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
8064 : E1000_BLK_PHY_RESET;
8065 }
8066
8067 if (hw->mac_type > e1000_82547_rev_2)
8068 manc = er32(MANC);
8069 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
8070 E1000_BLK_PHY_RESET : E1000_SUCCESS;
8071}
8072
8073static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
8074{
8075 u32 fwsm;
8076
8077 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
8078 * may not be provided a DMA clock when no manageability features are
8079 * enabled. We do not want to perform any reads/writes to these registers
8080 * if this is the case. We read FWSM to determine the manageability mode.
8081 */
8082 switch (hw->mac_type) {
8083 case e1000_82571:
8084 case e1000_82572:
8085 case e1000_82573:
8086 case e1000_80003es2lan:
8087 fwsm = er32(FWSM);
8088 if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
8089 return true;
8090 break;
8091 case e1000_ich8lan:
8092 return true;
8093 default:
8094 break;
8095 }
8096 return false;
8097}
8098
8099
8100/******************************************************************************
8101 * Configure PCI-Ex no-snoop
8102 *
8103 * hw - Struct containing variables accessed by shared code.
8104 * no_snoop - Bitmap of no-snoop events.
8105 *
8106 * returns: E1000_SUCCESS
8107 *
8108 *****************************************************************************/
8109static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
8110{
8111 u32 gcr_reg = 0;
8112
8113 DEBUGFUNC("e1000_set_pci_ex_no_snoop");
8114
8115 if (hw->bus_type == e1000_bus_type_unknown)
8116 e1000_get_bus_info(hw);
8117
8118 if (hw->bus_type != e1000_bus_type_pci_express)
8119 return E1000_SUCCESS;
8120
8121 if (no_snoop) {
8122 gcr_reg = er32(GCR);
8123 gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
8124 gcr_reg |= no_snoop;
8125 ew32(GCR, gcr_reg);
8126 }
8127 if (hw->mac_type == e1000_ich8lan) {
8128 u32 ctrl_ext;
8129
8130 ew32(GCR, PCI_EX_82566_SNOOP_ALL);
8131
8132 ctrl_ext = er32(CTRL_EXT);
8133 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
8134 ew32(CTRL_EXT, ctrl_ext);
8135 }
8136
8137 return E1000_SUCCESS;
8138}
8139
8140/***************************************************************************
8141 *
8142 * Get software semaphore FLAG bit (SWFLAG).
8143 * SWFLAG is used to synchronize the access to all shared resource between
8144 * SW, FW and HW.
8145 *
8146 * hw: Struct containing variables accessed by shared code
8147 *
8148 ***************************************************************************/
8149static s32 e1000_get_software_flag(struct e1000_hw *hw)
8150{
8151 s32 timeout = PHY_CFG_TIMEOUT;
8152 u32 extcnf_ctrl;
8153
8154 DEBUGFUNC("e1000_get_software_flag");
8155
8156 if (hw->mac_type == e1000_ich8lan) {
8157 while (timeout) {
8158 extcnf_ctrl = er32(EXTCNF_CTRL);
8159 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8160 ew32(EXTCNF_CTRL, extcnf_ctrl);
8161
8162 extcnf_ctrl = er32(EXTCNF_CTRL);
8163 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8164 break;
8165 mdelay(1);
8166 timeout--;
8167 }
8168
8169 if (!timeout) {
8170 DEBUGOUT("FW or HW locks the resource too long.\n");
8171 return -E1000_ERR_CONFIG;
8172 }
8173 }
8174
8175 return E1000_SUCCESS;
8176}
8177
8178/***************************************************************************
8179 *
8180 * Release software semaphore FLAG bit (SWFLAG).
8181 * SWFLAG is used to synchronize the access to all shared resource between
8182 * SW, FW and HW.
8183 *
8184 * hw: Struct containing variables accessed by shared code
8185 *
8186 ***************************************************************************/
8187static void e1000_release_software_flag(struct e1000_hw *hw)
8188{
8189 u32 extcnf_ctrl;
8190
8191 DEBUGFUNC("e1000_release_software_flag");
8192
8193 if (hw->mac_type == e1000_ich8lan) {
8194 extcnf_ctrl= er32(EXTCNF_CTRL);
8195 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8196 ew32(EXTCNF_CTRL, extcnf_ctrl);
8197 }
8198
8199 return;
8200}
8201
8202/******************************************************************************
8203 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8204 * register.
8205 *
8206 * hw - Struct containing variables accessed by shared code
8207 * offset - offset of word in the EEPROM to read
8208 * data - word read from the EEPROM
8209 * words - number of words to read
8210 *****************************************************************************/
8211static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8212 u16 *data)
8213{
8214 s32 error = E1000_SUCCESS;
8215 u32 flash_bank = 0;
8216 u32 act_offset = 0;
8217 u32 bank_offset = 0;
8218 u16 word = 0;
8219 u16 i = 0;
8220
8221 /* We need to know which is the valid flash bank. In the event
8222 * that we didn't allocate eeprom_shadow_ram, we may not be
8223 * managing flash_bank. So it cannot be trusted and needs
8224 * to be updated with each read.
8225 */
8226 /* Value of bit 22 corresponds to the flash bank we're on. */
8227 flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
8228
8229 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
8230 bank_offset = flash_bank * (hw->flash_bank_size * 2);
8231
8232 error = e1000_get_software_flag(hw);
8233 if (error != E1000_SUCCESS)
8234 return error;
8235
8236 for (i = 0; i < words; i++) {
8237 if (hw->eeprom_shadow_ram != NULL &&
8238 hw->eeprom_shadow_ram[offset+i].modified) {
8239 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
8240 } else {
8241 /* The NVM part needs a byte offset, hence * 2 */
8242 act_offset = bank_offset + ((offset + i) * 2);
8243 error = e1000_read_ich8_word(hw, act_offset, &word);
8244 if (error != E1000_SUCCESS)
8245 break;
8246 data[i] = word;
8247 }
8248 }
8249
8250 e1000_release_software_flag(hw);
8251
8252 return error;
8253}
8254
8255/******************************************************************************
8256 * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
8257 * register. Actually, writes are written to the shadow ram cache in the hw
8258 * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
8259 * the NVM, which occurs when the NVM checksum is updated.
8260 *
8261 * hw - Struct containing variables accessed by shared code
8262 * offset - offset of word in the EEPROM to write
8263 * words - number of words to write
8264 * data - words to write to the EEPROM
8265 *****************************************************************************/
8266static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8267 u16 *data)
8268{
8269 u32 i = 0;
8270 s32 error = E1000_SUCCESS;
8271
8272 error = e1000_get_software_flag(hw);
8273 if (error != E1000_SUCCESS)
8274 return error;
8275
8276 /* A driver can write to the NVM only if it has eeprom_shadow_ram
8277 * allocated. Subsequent reads to the modified words are read from
8278 * this cached structure as well. Writes will only go into this
8279 * cached structure unless it's followed by a call to
8280 * e1000_update_eeprom_checksum() where it will commit the changes
8281 * and clear the "modified" field.
8282 */
8283 if (hw->eeprom_shadow_ram != NULL) {
8284 for (i = 0; i < words; i++) {
8285 if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
8286 hw->eeprom_shadow_ram[offset+i].modified = true;
8287 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
8288 } else {
8289 error = -E1000_ERR_EEPROM;
8290 break;
8291 }
8292 }
8293 } else {
8294 /* Drivers have the option to not allocate eeprom_shadow_ram as long
8295 * as they don't perform any NVM writes. An attempt in doing so
8296 * will result in this error.
8297 */
8298 error = -E1000_ERR_EEPROM;
8299 }
8300
8301 e1000_release_software_flag(hw);
8302
8303 return error;
8304}
8305
8306/******************************************************************************
8307 * This function does initial flash setup so that a new read/write/erase cycle
8308 * can be started.
8309 *
8310 * hw - The pointer to the hw structure
8311 ****************************************************************************/
8312static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
8313{
8314 union ich8_hws_flash_status hsfsts;
8315 s32 error = E1000_ERR_EEPROM;
8316 s32 i = 0;
8317
8318 DEBUGFUNC("e1000_ich8_cycle_init");
8319
8320 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8321
8322 /* May be check the Flash Des Valid bit in Hw status */
8323 if (hsfsts.hsf_status.fldesvalid == 0) {
8324 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
8325 return error;
8326 }
8327
8328 /* Clear FCERR in Hw status by writing 1 */
8329 /* Clear DAEL in Hw status by writing a 1 */
8330 hsfsts.hsf_status.flcerr = 1;
8331 hsfsts.hsf_status.dael = 1;
8332
8333 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
8334
8335 /* Either we should have a hardware SPI cycle in progress bit to check
8336 * against, in order to start a new cycle or FDONE bit should be changed
8337 * in the hardware so that it is 1 after harware reset, which can then be
8338 * used as an indication whether a cycle is in progress or has been
8339 * completed .. we should also have some software semaphore mechanism to
8340 * guard FDONE or the cycle in progress bit so that two threads access to
8341 * those bits can be sequentiallized or a way so that 2 threads dont
8342 * start the cycle at the same time */
8343
8344 if (hsfsts.hsf_status.flcinprog == 0) {
8345 /* There is no cycle running at present, so we can start a cycle */
8346 /* Begin by setting Flash Cycle Done. */
8347 hsfsts.hsf_status.flcdone = 1;
8348 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
8349 error = E1000_SUCCESS;
8350 } else {
8351 /* otherwise poll for sometime so the current cycle has a chance
8352 * to end before giving up. */
8353 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8354 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8355 if (hsfsts.hsf_status.flcinprog == 0) {
8356 error = E1000_SUCCESS;
8357 break;
8358 }
8359 udelay(1);
8360 }
8361 if (error == E1000_SUCCESS) {
8362 /* Successful in waiting for previous cycle to timeout,
8363 * now set the Flash Cycle Done. */
8364 hsfsts.hsf_status.flcdone = 1;
8365 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
8366 } else {
8367 DEBUGOUT("Flash controller busy, cannot get access");
8368 }
8369 }
8370 return error;
8371}
8372
8373/******************************************************************************
8374 * This function starts a flash cycle and waits for its completion
8375 *
8376 * hw - The pointer to the hw structure
8377 ****************************************************************************/
8378static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
8379{
8380 union ich8_hws_flash_ctrl hsflctl;
8381 union ich8_hws_flash_status hsfsts;
8382 s32 error = E1000_ERR_EEPROM;
8383 u32 i = 0;
8384
8385 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8386 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8387 hsflctl.hsf_ctrl.flcgo = 1;
8388 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8389
8390 /* wait till FDONE bit is set to 1 */
8391 do {
8392 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8393 if (hsfsts.hsf_status.flcdone == 1)
8394 break;
8395 udelay(1);
8396 i++;
8397 } while (i < timeout);
8398 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
8399 error = E1000_SUCCESS;
8400 }
8401 return error;
8402}
8403
8404/******************************************************************************
8405 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8406 *
8407 * hw - The pointer to the hw structure
8408 * index - The index of the byte or word to read.
8409 * size - Size of data to read, 1=byte 2=word
8410 * data - Pointer to the word to store the value read.
8411 *****************************************************************************/
8412static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
8413 u16 *data)
8414{
8415 union ich8_hws_flash_status hsfsts;
8416 union ich8_hws_flash_ctrl hsflctl;
8417 u32 flash_linear_address;
8418 u32 flash_data = 0;
8419 s32 error = -E1000_ERR_EEPROM;
8420 s32 count = 0;
8421
8422 DEBUGFUNC("e1000_read_ich8_data");
8423
8424 if (size < 1 || size > 2 || data == NULL ||
8425 index > ICH_FLASH_LINEAR_ADDR_MASK)
8426 return error;
8427
8428 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8429 hw->flash_base_addr;
8430
8431 do {
8432 udelay(1);
8433 /* Steps */
8434 error = e1000_ich8_cycle_init(hw);
8435 if (error != E1000_SUCCESS)
8436 break;
8437
8438 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8439 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8440 hsflctl.hsf_ctrl.fldbcount = size - 1;
8441 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
8442 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8443
8444 /* Write the last 24 bits of index into Flash Linear address field in
8445 * Flash Address */
8446 /* TODO: TBD maybe check the index against the size of flash */
8447
8448 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8449
8450 error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
8451
8452 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
8453 * sequence a few more times, else read in (shift in) the Flash Data0,
8454 * the order is least significant byte first msb to lsb */
8455 if (error == E1000_SUCCESS) {
8456 flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
8457 if (size == 1) {
8458 *data = (u8)(flash_data & 0x000000FF);
8459 } else if (size == 2) {
8460 *data = (u16)(flash_data & 0x0000FFFF);
8461 }
8462 break;
8463 } else {
8464 /* If we've gotten here, then things are probably completely hosed,
8465 * but if the error condition is detected, it won't hurt to give
8466 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
8467 */
8468 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8469 if (hsfsts.hsf_status.flcerr == 1) {
8470 /* Repeat for some time before giving up. */
8471 continue;
8472 } else if (hsfsts.hsf_status.flcdone == 0) {
8473 DEBUGOUT("Timeout error - flash cycle did not complete.");
8474 break;
8475 }
8476 }
8477 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8478
8479 return error;
8480}
8481
8482/******************************************************************************
8483 * Writes One /two bytes to the NVM using the ICH8 flash access registers.
8484 *
8485 * hw - The pointer to the hw structure
8486 * index - The index of the byte/word to read.
8487 * size - Size of data to read, 1=byte 2=word
8488 * data - The byte(s) to write to the NVM.
8489 *****************************************************************************/
8490static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
8491 u16 data)
8492{
8493 union ich8_hws_flash_status hsfsts;
8494 union ich8_hws_flash_ctrl hsflctl;
8495 u32 flash_linear_address;
8496 u32 flash_data = 0;
8497 s32 error = -E1000_ERR_EEPROM;
8498 s32 count = 0;
8499
8500 DEBUGFUNC("e1000_write_ich8_data");
8501
8502 if (size < 1 || size > 2 || data > size * 0xff ||
8503 index > ICH_FLASH_LINEAR_ADDR_MASK)
8504 return error;
8505
8506 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8507 hw->flash_base_addr;
8508
8509 do {
8510 udelay(1);
8511 /* Steps */
8512 error = e1000_ich8_cycle_init(hw);
8513 if (error != E1000_SUCCESS)
8514 break;
8515
8516 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8517 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8518 hsflctl.hsf_ctrl.fldbcount = size -1;
8519 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
8520 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8521
8522 /* Write the last 24 bits of index into Flash Linear address field in
8523 * Flash Address */
8524 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8525
8526 if (size == 1)
8527 flash_data = (u32)data & 0x00FF;
8528 else
8529 flash_data = (u32)data;
8530
8531 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
8532
8533 /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
8534 * sequence a few more times else done */
8535 error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
8536 if (error == E1000_SUCCESS) {
8537 break;
8538 } else {
8539 /* If we're here, then things are most likely completely hosed,
8540 * but if the error condition is detected, it won't hurt to give
8541 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
8542 */
8543 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8544 if (hsfsts.hsf_status.flcerr == 1) {
8545 /* Repeat for some time before giving up. */
8546 continue;
8547 } else if (hsfsts.hsf_status.flcdone == 0) {
8548 DEBUGOUT("Timeout error - flash cycle did not complete.");
8549 break;
8550 }
8551 }
8552 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8553
8554 return error;
8555}
8556
8557/******************************************************************************
8558 * Reads a single byte from the NVM using the ICH8 flash access registers.
8559 *
8560 * hw - pointer to e1000_hw structure
8561 * index - The index of the byte to read.
8562 * data - Pointer to a byte to store the value read.
8563 *****************************************************************************/
8564static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
8565{
8566 s32 status = E1000_SUCCESS;
8567 u16 word = 0;
8568
8569 status = e1000_read_ich8_data(hw, index, 1, &word);
8570 if (status == E1000_SUCCESS) {
8571 *data = (u8)word;
8572 }
8573
8574 return status;
8575}
8576
8577/******************************************************************************
8578 * Writes a single byte to the NVM using the ICH8 flash access registers.
8579 * Performs verification by reading back the value and then going through
8580 * a retry algorithm before giving up.
8581 *
8582 * hw - pointer to e1000_hw structure
8583 * index - The index of the byte to write.
8584 * byte - The byte to write to the NVM.
8585 *****************************************************************************/
8586static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
8587{
8588 s32 error = E1000_SUCCESS;
8589 s32 program_retries = 0;
8590
8591 DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
8592
8593 error = e1000_write_ich8_byte(hw, index, byte);
8594
8595 if (error != E1000_SUCCESS) {
8596 for (program_retries = 0; program_retries < 100; program_retries++) {
8597 DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
8598 error = e1000_write_ich8_byte(hw, index, byte);
8599 udelay(100);
8600 if (error == E1000_SUCCESS)
8601 break;
8602 }
8603 }
8604
8605 if (program_retries == 100)
8606 error = E1000_ERR_EEPROM;
8607
8608 return error;
8609}
8610
8611/******************************************************************************
8612 * Writes a single byte to the NVM using the ICH8 flash access registers.
8613 *
8614 * hw - pointer to e1000_hw structure
8615 * index - The index of the byte to read.
8616 * data - The byte to write to the NVM.
8617 *****************************************************************************/
8618static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
8619{
8620 s32 status = E1000_SUCCESS;
8621 u16 word = (u16)data;
8622
8623 status = e1000_write_ich8_data(hw, index, 1, word);
8624
8625 return status;
8626}
8627
8628/******************************************************************************
8629 * Reads a word from the NVM using the ICH8 flash access registers.
8630 *
8631 * hw - pointer to e1000_hw structure
8632 * index - The starting byte index of the word to read.
8633 * data - Pointer to a word to store the value read.
8634 *****************************************************************************/
8635static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
8636{
8637 s32 status = E1000_SUCCESS;
8638 status = e1000_read_ich8_data(hw, index, 2, data);
8639 return status;
8640}
8641
8642/******************************************************************************
8643 * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
8644 * based.
8645 *
8646 * hw - pointer to e1000_hw structure
8647 * bank - 0 for first bank, 1 for second bank
8648 *
8649 * Note that this function may actually erase as much as 8 or 64 KBytes. The
8650 * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
8651 * bank size may be 4, 8 or 64 KBytes
8652 *****************************************************************************/
8653static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
8654{
8655 union ich8_hws_flash_status hsfsts;
8656 union ich8_hws_flash_ctrl hsflctl;
8657 u32 flash_linear_address;
8658 s32 count = 0;
8659 s32 error = E1000_ERR_EEPROM;
8660 s32 iteration;
8661 s32 sub_sector_size = 0;
8662 s32 bank_size;
8663 s32 j = 0;
8664 s32 error_flag = 0;
8665
8666 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8667
8668 /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
8669 /* 00: The Hw sector is 256 bytes, hence we need to erase 16
8670 * consecutive sectors. The start index for the nth Hw sector can be
8671 * calculated as bank * 4096 + n * 256
8672 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
8673 * The start index for the nth Hw sector can be calculated
8674 * as bank * 4096
8675 * 10: The HW sector is 8K bytes
8676 * 11: The Hw sector size is 64K bytes */
8677 if (hsfsts.hsf_status.berasesz == 0x0) {
8678 /* Hw sector size 256 */
8679 sub_sector_size = ICH_FLASH_SEG_SIZE_256;
8680 bank_size = ICH_FLASH_SECTOR_SIZE;
8681 iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
8682 } else if (hsfsts.hsf_status.berasesz == 0x1) {
8683 bank_size = ICH_FLASH_SEG_SIZE_4K;
8684 iteration = 1;
8685 } else if (hsfsts.hsf_status.berasesz == 0x3) {
8686 bank_size = ICH_FLASH_SEG_SIZE_64K;
8687 iteration = 1;
8688 } else {
8689 return error;
8690 }
8691
8692 for (j = 0; j < iteration ; j++) {
8693 do {
8694 count++;
8695 /* Steps */
8696 error = e1000_ich8_cycle_init(hw);
8697 if (error != E1000_SUCCESS) {
8698 error_flag = 1;
8699 break;
8700 }
8701
8702 /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
8703 * Control */
8704 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8705 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
8706 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8707
8708 /* Write the last 24 bits of an index within the block into Flash
8709 * Linear address field in Flash Address. This probably needs to
8710 * be calculated here based off the on-chip erase sector size and
8711 * the software bank size (4, 8 or 64 KBytes) */
8712 flash_linear_address = bank * bank_size + j * sub_sector_size;
8713 flash_linear_address += hw->flash_base_addr;
8714 flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
8715
8716 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8717
8718 error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
8719 /* Check if FCERR is set to 1. If 1, clear it and try the whole
8720 * sequence a few more times else Done */
8721 if (error == E1000_SUCCESS) {
8722 break;
8723 } else {
8724 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8725 if (hsfsts.hsf_status.flcerr == 1) {
8726 /* repeat for some time before giving up */
8727 continue;
8728 } else if (hsfsts.hsf_status.flcdone == 0) {
8729 error_flag = 1;
8730 break;
8731 }
8732 }
8733 } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
8734 if (error_flag == 1)
8735 break;
8736 }
8737 if (error_flag != 1)
8738 error = E1000_SUCCESS;
8739 return error;
8740}
8741
8742static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8743 u32 cnf_base_addr,
8744 u32 cnf_size)
8745{
8746 u32 ret_val = E1000_SUCCESS;
8747 u16 word_addr, reg_data, reg_addr;
8748 u16 i;
8749
8750 /* cnf_base_addr is in DWORD */
8751 word_addr = (u16)(cnf_base_addr << 1);
8752
8753 /* cnf_size is returned in size of dwords */
8754 for (i = 0; i < cnf_size; i++) {
8755 ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
8756 if (ret_val)
8757 return ret_val;
8758
8759 ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
8760 if (ret_val)
8761 return ret_val;
8762
8763 ret_val = e1000_get_software_flag(hw);
8764 if (ret_val != E1000_SUCCESS)
8765 return ret_val;
8766
8767 ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
8768
8769 e1000_release_software_flag(hw);
8770 }
8771
8772 return ret_val;
8773}
8774
8775
8776/******************************************************************************
8777 * This function initializes the PHY from the NVM on ICH8 platforms. This
8778 * is needed due to an issue where the NVM configuration is not properly
8779 * autoloaded after power transitions. Therefore, after each PHY reset, we
8780 * will load the configuration data out of the NVM manually.
8781 *
8782 * hw: Struct containing variables accessed by shared code
8783 *****************************************************************************/
8784static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
8785{
8786 u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
8787
8788 if (hw->phy_type != e1000_phy_igp_3)
8789 return E1000_SUCCESS;
8790
8791 /* Check if SW needs configure the PHY */
8792 reg_data = er32(FEXTNVM);
8793 if (!(reg_data & FEXTNVM_SW_CONFIG))
8794 return E1000_SUCCESS;
8795
8796 /* Wait for basic configuration completes before proceeding*/
8797 loop = 0;
8798 do {
8799 reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
8800 udelay(100);
8801 loop++;
8802 } while ((!reg_data) && (loop < 50));
8803
8804 /* Clear the Init Done bit for the next init event */
8805 reg_data = er32(STATUS);
8806 reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
8807 ew32(STATUS, reg_data);
8808
8809 /* Make sure HW does not configure LCD from PHY extended configuration
8810 before SW configuration */
8811 reg_data = er32(EXTCNF_CTRL);
8812 if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
8813 reg_data = er32(EXTCNF_SIZE);
8814 cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
8815 cnf_size >>= 16;
8816 if (cnf_size) {
8817 reg_data = er32(EXTCNF_CTRL);
8818 cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
8819 /* cnf_base_addr is in DWORD */
8820 cnf_base_addr >>= 16;
8821
8822 /* Configure LCD from extended configuration region. */
8823 ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
8824 cnf_size);
8825 if (ret_val)
8826 return ret_val;
8827 }
8828 }
8829
8830 return E1000_SUCCESS;
8831} 5634}
8832
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index a8866bdbb671..9acfddb0dafb 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,7 +35,6 @@
35 35
36#include "e1000_osdep.h" 36#include "e1000_osdep.h"
37 37
38
39/* Forward declarations of structures used by the shared code */ 38/* Forward declarations of structures used by the shared code */
40struct e1000_hw; 39struct e1000_hw;
41struct e1000_hw_stats; 40struct e1000_hw_stats;
@@ -43,252 +42,231 @@ struct e1000_hw_stats;
43/* Enumerated types specific to the e1000 hardware */ 42/* Enumerated types specific to the e1000 hardware */
44/* Media Access Controlers */ 43/* Media Access Controlers */
45typedef enum { 44typedef enum {
46 e1000_undefined = 0, 45 e1000_undefined = 0,
47 e1000_82542_rev2_0, 46 e1000_82542_rev2_0,
48 e1000_82542_rev2_1, 47 e1000_82542_rev2_1,
49 e1000_82543, 48 e1000_82543,
50 e1000_82544, 49 e1000_82544,
51 e1000_82540, 50 e1000_82540,
52 e1000_82545, 51 e1000_82545,
53 e1000_82545_rev_3, 52 e1000_82545_rev_3,
54 e1000_82546, 53 e1000_82546,
55 e1000_82546_rev_3, 54 e1000_82546_rev_3,
56 e1000_82541, 55 e1000_82541,
57 e1000_82541_rev_2, 56 e1000_82541_rev_2,
58 e1000_82547, 57 e1000_82547,
59 e1000_82547_rev_2, 58 e1000_82547_rev_2,
60 e1000_82571, 59 e1000_num_macs
61 e1000_82572,
62 e1000_82573,
63 e1000_80003es2lan,
64 e1000_ich8lan,
65 e1000_num_macs
66} e1000_mac_type; 60} e1000_mac_type;
67 61
68typedef enum { 62typedef enum {
69 e1000_eeprom_uninitialized = 0, 63 e1000_eeprom_uninitialized = 0,
70 e1000_eeprom_spi, 64 e1000_eeprom_spi,
71 e1000_eeprom_microwire, 65 e1000_eeprom_microwire,
72 e1000_eeprom_flash, 66 e1000_eeprom_flash,
73 e1000_eeprom_ich8, 67 e1000_eeprom_none, /* No NVM support */
74 e1000_eeprom_none, /* No NVM support */ 68 e1000_num_eeprom_types
75 e1000_num_eeprom_types
76} e1000_eeprom_type; 69} e1000_eeprom_type;
77 70
78/* Media Types */ 71/* Media Types */
79typedef enum { 72typedef enum {
80 e1000_media_type_copper = 0, 73 e1000_media_type_copper = 0,
81 e1000_media_type_fiber = 1, 74 e1000_media_type_fiber = 1,
82 e1000_media_type_internal_serdes = 2, 75 e1000_media_type_internal_serdes = 2,
83 e1000_num_media_types 76 e1000_num_media_types
84} e1000_media_type; 77} e1000_media_type;
85 78
86typedef enum { 79typedef enum {
87 e1000_10_half = 0, 80 e1000_10_half = 0,
88 e1000_10_full = 1, 81 e1000_10_full = 1,
89 e1000_100_half = 2, 82 e1000_100_half = 2,
90 e1000_100_full = 3 83 e1000_100_full = 3
91} e1000_speed_duplex_type; 84} e1000_speed_duplex_type;
92 85
93/* Flow Control Settings */ 86/* Flow Control Settings */
94typedef enum { 87typedef enum {
95 E1000_FC_NONE = 0, 88 E1000_FC_NONE = 0,
96 E1000_FC_RX_PAUSE = 1, 89 E1000_FC_RX_PAUSE = 1,
97 E1000_FC_TX_PAUSE = 2, 90 E1000_FC_TX_PAUSE = 2,
98 E1000_FC_FULL = 3, 91 E1000_FC_FULL = 3,
99 E1000_FC_DEFAULT = 0xFF 92 E1000_FC_DEFAULT = 0xFF
100} e1000_fc_type; 93} e1000_fc_type;
101 94
102struct e1000_shadow_ram { 95struct e1000_shadow_ram {
103 u16 eeprom_word; 96 u16 eeprom_word;
104 bool modified; 97 bool modified;
105}; 98};
106 99
107/* PCI bus types */ 100/* PCI bus types */
108typedef enum { 101typedef enum {
109 e1000_bus_type_unknown = 0, 102 e1000_bus_type_unknown = 0,
110 e1000_bus_type_pci, 103 e1000_bus_type_pci,
111 e1000_bus_type_pcix, 104 e1000_bus_type_pcix,
112 e1000_bus_type_pci_express, 105 e1000_bus_type_reserved
113 e1000_bus_type_reserved
114} e1000_bus_type; 106} e1000_bus_type;
115 107
116/* PCI bus speeds */ 108/* PCI bus speeds */
117typedef enum { 109typedef enum {
118 e1000_bus_speed_unknown = 0, 110 e1000_bus_speed_unknown = 0,
119 e1000_bus_speed_33, 111 e1000_bus_speed_33,
120 e1000_bus_speed_66, 112 e1000_bus_speed_66,
121 e1000_bus_speed_100, 113 e1000_bus_speed_100,
122 e1000_bus_speed_120, 114 e1000_bus_speed_120,
123 e1000_bus_speed_133, 115 e1000_bus_speed_133,
124 e1000_bus_speed_2500, 116 e1000_bus_speed_reserved
125 e1000_bus_speed_reserved
126} e1000_bus_speed; 117} e1000_bus_speed;
127 118
128/* PCI bus widths */ 119/* PCI bus widths */
129typedef enum { 120typedef enum {
130 e1000_bus_width_unknown = 0, 121 e1000_bus_width_unknown = 0,
131 /* These PCIe values should literally match the possible return values 122 e1000_bus_width_32,
132 * from config space */ 123 e1000_bus_width_64,
133 e1000_bus_width_pciex_1 = 1, 124 e1000_bus_width_reserved
134 e1000_bus_width_pciex_2 = 2,
135 e1000_bus_width_pciex_4 = 4,
136 e1000_bus_width_32,
137 e1000_bus_width_64,
138 e1000_bus_width_reserved
139} e1000_bus_width; 125} e1000_bus_width;
140 126
141/* PHY status info structure and supporting enums */ 127/* PHY status info structure and supporting enums */
142typedef enum { 128typedef enum {
143 e1000_cable_length_50 = 0, 129 e1000_cable_length_50 = 0,
144 e1000_cable_length_50_80, 130 e1000_cable_length_50_80,
145 e1000_cable_length_80_110, 131 e1000_cable_length_80_110,
146 e1000_cable_length_110_140, 132 e1000_cable_length_110_140,
147 e1000_cable_length_140, 133 e1000_cable_length_140,
148 e1000_cable_length_undefined = 0xFF 134 e1000_cable_length_undefined = 0xFF
149} e1000_cable_length; 135} e1000_cable_length;
150 136
151typedef enum { 137typedef enum {
152 e1000_gg_cable_length_60 = 0, 138 e1000_gg_cable_length_60 = 0,
153 e1000_gg_cable_length_60_115 = 1, 139 e1000_gg_cable_length_60_115 = 1,
154 e1000_gg_cable_length_115_150 = 2, 140 e1000_gg_cable_length_115_150 = 2,
155 e1000_gg_cable_length_150 = 4 141 e1000_gg_cable_length_150 = 4
156} e1000_gg_cable_length; 142} e1000_gg_cable_length;
157 143
158typedef enum { 144typedef enum {
159 e1000_igp_cable_length_10 = 10, 145 e1000_igp_cable_length_10 = 10,
160 e1000_igp_cable_length_20 = 20, 146 e1000_igp_cable_length_20 = 20,
161 e1000_igp_cable_length_30 = 30, 147 e1000_igp_cable_length_30 = 30,
162 e1000_igp_cable_length_40 = 40, 148 e1000_igp_cable_length_40 = 40,
163 e1000_igp_cable_length_50 = 50, 149 e1000_igp_cable_length_50 = 50,
164 e1000_igp_cable_length_60 = 60, 150 e1000_igp_cable_length_60 = 60,
165 e1000_igp_cable_length_70 = 70, 151 e1000_igp_cable_length_70 = 70,
166 e1000_igp_cable_length_80 = 80, 152 e1000_igp_cable_length_80 = 80,
167 e1000_igp_cable_length_90 = 90, 153 e1000_igp_cable_length_90 = 90,
168 e1000_igp_cable_length_100 = 100, 154 e1000_igp_cable_length_100 = 100,
169 e1000_igp_cable_length_110 = 110, 155 e1000_igp_cable_length_110 = 110,
170 e1000_igp_cable_length_115 = 115, 156 e1000_igp_cable_length_115 = 115,
171 e1000_igp_cable_length_120 = 120, 157 e1000_igp_cable_length_120 = 120,
172 e1000_igp_cable_length_130 = 130, 158 e1000_igp_cable_length_130 = 130,
173 e1000_igp_cable_length_140 = 140, 159 e1000_igp_cable_length_140 = 140,
174 e1000_igp_cable_length_150 = 150, 160 e1000_igp_cable_length_150 = 150,
175 e1000_igp_cable_length_160 = 160, 161 e1000_igp_cable_length_160 = 160,
176 e1000_igp_cable_length_170 = 170, 162 e1000_igp_cable_length_170 = 170,
177 e1000_igp_cable_length_180 = 180 163 e1000_igp_cable_length_180 = 180
178} e1000_igp_cable_length; 164} e1000_igp_cable_length;
179 165
180typedef enum { 166typedef enum {
181 e1000_10bt_ext_dist_enable_normal = 0, 167 e1000_10bt_ext_dist_enable_normal = 0,
182 e1000_10bt_ext_dist_enable_lower, 168 e1000_10bt_ext_dist_enable_lower,
183 e1000_10bt_ext_dist_enable_undefined = 0xFF 169 e1000_10bt_ext_dist_enable_undefined = 0xFF
184} e1000_10bt_ext_dist_enable; 170} e1000_10bt_ext_dist_enable;
185 171
186typedef enum { 172typedef enum {
187 e1000_rev_polarity_normal = 0, 173 e1000_rev_polarity_normal = 0,
188 e1000_rev_polarity_reversed, 174 e1000_rev_polarity_reversed,
189 e1000_rev_polarity_undefined = 0xFF 175 e1000_rev_polarity_undefined = 0xFF
190} e1000_rev_polarity; 176} e1000_rev_polarity;
191 177
192typedef enum { 178typedef enum {
193 e1000_downshift_normal = 0, 179 e1000_downshift_normal = 0,
194 e1000_downshift_activated, 180 e1000_downshift_activated,
195 e1000_downshift_undefined = 0xFF 181 e1000_downshift_undefined = 0xFF
196} e1000_downshift; 182} e1000_downshift;
197 183
198typedef enum { 184typedef enum {
199 e1000_smart_speed_default = 0, 185 e1000_smart_speed_default = 0,
200 e1000_smart_speed_on, 186 e1000_smart_speed_on,
201 e1000_smart_speed_off 187 e1000_smart_speed_off
202} e1000_smart_speed; 188} e1000_smart_speed;
203 189
204typedef enum { 190typedef enum {
205 e1000_polarity_reversal_enabled = 0, 191 e1000_polarity_reversal_enabled = 0,
206 e1000_polarity_reversal_disabled, 192 e1000_polarity_reversal_disabled,
207 e1000_polarity_reversal_undefined = 0xFF 193 e1000_polarity_reversal_undefined = 0xFF
208} e1000_polarity_reversal; 194} e1000_polarity_reversal;
209 195
210typedef enum { 196typedef enum {
211 e1000_auto_x_mode_manual_mdi = 0, 197 e1000_auto_x_mode_manual_mdi = 0,
212 e1000_auto_x_mode_manual_mdix, 198 e1000_auto_x_mode_manual_mdix,
213 e1000_auto_x_mode_auto1, 199 e1000_auto_x_mode_auto1,
214 e1000_auto_x_mode_auto2, 200 e1000_auto_x_mode_auto2,
215 e1000_auto_x_mode_undefined = 0xFF 201 e1000_auto_x_mode_undefined = 0xFF
216} e1000_auto_x_mode; 202} e1000_auto_x_mode;
217 203
218typedef enum { 204typedef enum {
219 e1000_1000t_rx_status_not_ok = 0, 205 e1000_1000t_rx_status_not_ok = 0,
220 e1000_1000t_rx_status_ok, 206 e1000_1000t_rx_status_ok,
221 e1000_1000t_rx_status_undefined = 0xFF 207 e1000_1000t_rx_status_undefined = 0xFF
222} e1000_1000t_rx_status; 208} e1000_1000t_rx_status;
223 209
224typedef enum { 210typedef enum {
225 e1000_phy_m88 = 0, 211 e1000_phy_m88 = 0,
226 e1000_phy_igp, 212 e1000_phy_igp,
227 e1000_phy_igp_2,
228 e1000_phy_gg82563,
229 e1000_phy_igp_3,
230 e1000_phy_ife,
231 e1000_phy_undefined = 0xFF 213 e1000_phy_undefined = 0xFF
232} e1000_phy_type; 214} e1000_phy_type;
233 215
234typedef enum { 216typedef enum {
235 e1000_ms_hw_default = 0, 217 e1000_ms_hw_default = 0,
236 e1000_ms_force_master, 218 e1000_ms_force_master,
237 e1000_ms_force_slave, 219 e1000_ms_force_slave,
238 e1000_ms_auto 220 e1000_ms_auto
239} e1000_ms_type; 221} e1000_ms_type;
240 222
241typedef enum { 223typedef enum {
242 e1000_ffe_config_enabled = 0, 224 e1000_ffe_config_enabled = 0,
243 e1000_ffe_config_active, 225 e1000_ffe_config_active,
244 e1000_ffe_config_blocked 226 e1000_ffe_config_blocked
245} e1000_ffe_config; 227} e1000_ffe_config;
246 228
247typedef enum { 229typedef enum {
248 e1000_dsp_config_disabled = 0, 230 e1000_dsp_config_disabled = 0,
249 e1000_dsp_config_enabled, 231 e1000_dsp_config_enabled,
250 e1000_dsp_config_activated, 232 e1000_dsp_config_activated,
251 e1000_dsp_config_undefined = 0xFF 233 e1000_dsp_config_undefined = 0xFF
252} e1000_dsp_config; 234} e1000_dsp_config;
253 235
254struct e1000_phy_info { 236struct e1000_phy_info {
255 e1000_cable_length cable_length; 237 e1000_cable_length cable_length;
256 e1000_10bt_ext_dist_enable extended_10bt_distance; 238 e1000_10bt_ext_dist_enable extended_10bt_distance;
257 e1000_rev_polarity cable_polarity; 239 e1000_rev_polarity cable_polarity;
258 e1000_downshift downshift; 240 e1000_downshift downshift;
259 e1000_polarity_reversal polarity_correction; 241 e1000_polarity_reversal polarity_correction;
260 e1000_auto_x_mode mdix_mode; 242 e1000_auto_x_mode mdix_mode;
261 e1000_1000t_rx_status local_rx; 243 e1000_1000t_rx_status local_rx;
262 e1000_1000t_rx_status remote_rx; 244 e1000_1000t_rx_status remote_rx;
263}; 245};
264 246
265struct e1000_phy_stats { 247struct e1000_phy_stats {
266 u32 idle_errors; 248 u32 idle_errors;
267 u32 receive_errors; 249 u32 receive_errors;
268}; 250};
269 251
270struct e1000_eeprom_info { 252struct e1000_eeprom_info {
271 e1000_eeprom_type type; 253 e1000_eeprom_type type;
272 u16 word_size; 254 u16 word_size;
273 u16 opcode_bits; 255 u16 opcode_bits;
274 u16 address_bits; 256 u16 address_bits;
275 u16 delay_usec; 257 u16 delay_usec;
276 u16 page_size; 258 u16 page_size;
277 bool use_eerd;
278 bool use_eewr;
279}; 259};
280 260
281/* Flex ASF Information */ 261/* Flex ASF Information */
282#define E1000_HOST_IF_MAX_SIZE 2048 262#define E1000_HOST_IF_MAX_SIZE 2048
283 263
284typedef enum { 264typedef enum {
285 e1000_byte_align = 0, 265 e1000_byte_align = 0,
286 e1000_word_align = 1, 266 e1000_word_align = 1,
287 e1000_dword_align = 2 267 e1000_dword_align = 2
288} e1000_align_type; 268} e1000_align_type;
289 269
290
291
292/* Error Codes */ 270/* Error Codes */
293#define E1000_SUCCESS 0 271#define E1000_SUCCESS 0
294#define E1000_ERR_EEPROM 1 272#define E1000_ERR_EEPROM 1
@@ -301,7 +279,6 @@ typedef enum {
301#define E1000_ERR_MASTER_REQUESTS_PENDING 10 279#define E1000_ERR_MASTER_REQUESTS_PENDING 10
302#define E1000_ERR_HOST_INTERFACE_COMMAND 11 280#define E1000_ERR_HOST_INTERFACE_COMMAND 11
303#define E1000_BLK_PHY_RESET 12 281#define E1000_BLK_PHY_RESET 12
304#define E1000_ERR_SWFW_SYNC 13
305 282
306#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ 283#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
307 (((_value) & 0xff00) >> 8)) 284 (((_value) & 0xff00) >> 8))
@@ -318,19 +295,17 @@ s32 e1000_setup_link(struct e1000_hw *hw);
318s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); 295s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
319void e1000_config_collision_dist(struct e1000_hw *hw); 296void e1000_config_collision_dist(struct e1000_hw *hw);
320s32 e1000_check_for_link(struct e1000_hw *hw); 297s32 e1000_check_for_link(struct e1000_hw *hw);
321s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); 298s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex);
322s32 e1000_force_mac_fc(struct e1000_hw *hw); 299s32 e1000_force_mac_fc(struct e1000_hw *hw);
323 300
324/* PHY */ 301/* PHY */
325s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data); 302s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data);
326s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); 303s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
327s32 e1000_phy_hw_reset(struct e1000_hw *hw); 304s32 e1000_phy_hw_reset(struct e1000_hw *hw);
328s32 e1000_phy_reset(struct e1000_hw *hw); 305s32 e1000_phy_reset(struct e1000_hw *hw);
329s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 306s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330s32 e1000_validate_mdi_setting(struct e1000_hw *hw); 307s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
331 308
332void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
333
334/* EEPROM Functions */ 309/* EEPROM Functions */
335s32 e1000_init_eeprom_params(struct e1000_hw *hw); 310s32 e1000_init_eeprom_params(struct e1000_hw *hw);
336 311
@@ -338,66 +313,63 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw);
338u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); 313u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
339 314
340#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 315#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 316#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
342 317
343#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ 318#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
344#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ 319#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
345#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ 320#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
346#define E1000_MNG_IAMT_MODE 0x3 321#define E1000_MNG_IAMT_MODE 0x3
347#define E1000_MNG_ICH_IAMT_MODE 0x2 322#define E1000_MNG_ICH_IAMT_MODE 0x2
348#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ 323#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
349 324
350#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ 325#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
351#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ 326#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
352#define E1000_VFTA_ENTRY_SHIFT 0x5 327#define E1000_VFTA_ENTRY_SHIFT 0x5
353#define E1000_VFTA_ENTRY_MASK 0x7F 328#define E1000_VFTA_ENTRY_MASK 0x7F
354#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 329#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
355 330
356struct e1000_host_mng_command_header { 331struct e1000_host_mng_command_header {
357 u8 command_id; 332 u8 command_id;
358 u8 checksum; 333 u8 checksum;
359 u16 reserved1; 334 u16 reserved1;
360 u16 reserved2; 335 u16 reserved2;
361 u16 command_length; 336 u16 command_length;
362}; 337};
363 338
364struct e1000_host_mng_command_info { 339struct e1000_host_mng_command_info {
365 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 340 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
366 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ 341 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */
367}; 342};
368#ifdef __BIG_ENDIAN 343#ifdef __BIG_ENDIAN
369struct e1000_host_mng_dhcp_cookie{ 344struct e1000_host_mng_dhcp_cookie {
370 u32 signature; 345 u32 signature;
371 u16 vlan_id; 346 u16 vlan_id;
372 u8 reserved0; 347 u8 reserved0;
373 u8 status; 348 u8 status;
374 u32 reserved1; 349 u32 reserved1;
375 u8 checksum; 350 u8 checksum;
376 u8 reserved3; 351 u8 reserved3;
377 u16 reserved2; 352 u16 reserved2;
378}; 353};
379#else 354#else
380struct e1000_host_mng_dhcp_cookie{ 355struct e1000_host_mng_dhcp_cookie {
381 u32 signature; 356 u32 signature;
382 u8 status; 357 u8 status;
383 u8 reserved0; 358 u8 reserved0;
384 u16 vlan_id; 359 u16 vlan_id;
385 u32 reserved1; 360 u32 reserved1;
386 u16 reserved2; 361 u16 reserved2;
387 u8 reserved3; 362 u8 reserved3;
388 u8 checksum; 363 u8 checksum;
389}; 364};
390#endif 365#endif
391 366
392s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
393 u16 length);
394bool e1000_check_mng_mode(struct e1000_hw *hw); 367bool e1000_check_mng_mode(struct e1000_hw *hw);
395bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 368s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
396s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
397s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); 369s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
398s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); 370s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
399s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data); 371s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
400s32 e1000_read_mac_addr(struct e1000_hw * hw); 372s32 e1000_read_mac_addr(struct e1000_hw *hw);
401 373
402/* Filters (multicast, vlan, receive) */ 374/* Filters (multicast, vlan, receive) */
403u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); 375u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
@@ -417,18 +389,15 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
417/* Everything else */ 389/* Everything else */
418void e1000_reset_adaptive(struct e1000_hw *hw); 390void e1000_reset_adaptive(struct e1000_hw *hw);
419void e1000_update_adaptive(struct e1000_hw *hw); 391void e1000_update_adaptive(struct e1000_hw *hw);
420void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr); 392void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
393 u32 frame_len, u8 * mac_addr);
421void e1000_get_bus_info(struct e1000_hw *hw); 394void e1000_get_bus_info(struct e1000_hw *hw);
422void e1000_pci_set_mwi(struct e1000_hw *hw); 395void e1000_pci_set_mwi(struct e1000_hw *hw);
423void e1000_pci_clear_mwi(struct e1000_hw *hw); 396void e1000_pci_clear_mwi(struct e1000_hw *hw);
424s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
425void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); 397void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
426int e1000_pcix_get_mmrbc(struct e1000_hw *hw); 398int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
427/* Port I/O is only supported on 82544 and newer */ 399/* Port I/O is only supported on 82544 and newer */
428void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); 400void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
429s32 e1000_disable_pciex_master(struct e1000_hw *hw);
430s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
431
432 401
433#define E1000_READ_REG_IO(a, reg) \ 402#define E1000_READ_REG_IO(a, reg) \
434 e1000_read_reg_io((a), E1000_##reg) 403 e1000_read_reg_io((a), E1000_##reg)
@@ -471,36 +440,7 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
471#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 440#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
472#define E1000_DEV_ID_82547EI 0x1019 441#define E1000_DEV_ID_82547EI 0x1019
473#define E1000_DEV_ID_82547EI_MOBILE 0x101A 442#define E1000_DEV_ID_82547EI_MOBILE 0x101A
474#define E1000_DEV_ID_82571EB_COPPER 0x105E
475#define E1000_DEV_ID_82571EB_FIBER 0x105F
476#define E1000_DEV_ID_82571EB_SERDES 0x1060
477#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
478#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
479#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
480#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
481#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
482#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
483#define E1000_DEV_ID_82572EI_COPPER 0x107D
484#define E1000_DEV_ID_82572EI_FIBER 0x107E
485#define E1000_DEV_ID_82572EI_SERDES 0x107F
486#define E1000_DEV_ID_82572EI 0x10B9
487#define E1000_DEV_ID_82573E 0x108B
488#define E1000_DEV_ID_82573E_IAMT 0x108C
489#define E1000_DEV_ID_82573L 0x109A
490#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 443#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
491#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
492#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
493#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
494#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
495
496#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
497#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
498#define E1000_DEV_ID_ICH8_IGP_C 0x104B
499#define E1000_DEV_ID_ICH8_IFE 0x104C
500#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
501#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
502#define E1000_DEV_ID_ICH8_IGP_M 0x104D
503
504 444
505#define NODE_ADDRESS_SIZE 6 445#define NODE_ADDRESS_SIZE 6
506#define ETH_LENGTH_OF_ADDRESS 6 446#define ETH_LENGTH_OF_ADDRESS 6
@@ -523,21 +463,20 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
523 463
524/* The sizes (in bytes) of a ethernet packet */ 464/* The sizes (in bytes) of a ethernet packet */
525#define ENET_HEADER_SIZE 14 465#define ENET_HEADER_SIZE 14
526#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ 466#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
527#define ETHERNET_FCS_SIZE 4 467#define ETHERNET_FCS_SIZE 4
528#define MINIMUM_ETHERNET_PACKET_SIZE \ 468#define MINIMUM_ETHERNET_PACKET_SIZE \
529 (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) 469 (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
530#define CRC_LENGTH ETHERNET_FCS_SIZE 470#define CRC_LENGTH ETHERNET_FCS_SIZE
531#define MAX_JUMBO_FRAME_SIZE 0x3F00 471#define MAX_JUMBO_FRAME_SIZE 0x3F00
532 472
533
534/* 802.1q VLAN Packet Sizes */ 473/* 802.1q VLAN Packet Sizes */
535#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ 474#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
536 475
537/* Ethertype field values */ 476/* Ethertype field values */
538#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ 477#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
539#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ 478#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
540#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ 479#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
541 480
542/* Packet Header defines */ 481/* Packet Header defines */
543#define IP_PROTOCOL_TCP 6 482#define IP_PROTOCOL_TCP 6
@@ -567,15 +506,6 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
567 E1000_IMS_RXSEQ | \ 506 E1000_IMS_RXSEQ | \
568 E1000_IMS_LSC) 507 E1000_IMS_LSC)
569 508
570/* Additional interrupts need to be handled for e1000_ich8lan:
571 DSW = The FW changed the status of the DISSW bit in FWSM
572 PHYINT = The LAN connected device generates an interrupt
573 EPRST = Manageability reset event */
574#define IMS_ICH8LAN_ENABLE_MASK (\
575 E1000_IMS_DSW | \
576 E1000_IMS_PHYINT | \
577 E1000_IMS_EPRST)
578
579/* Number of high/low register pairs in the RAR. The RAR (Receive Address 509/* Number of high/low register pairs in the RAR. The RAR (Receive Address
580 * Registers) holds the directed and multicast addresses that we monitor. We 510 * Registers) holds the directed and multicast addresses that we monitor. We
581 * reserve one of these spots for our directed address, allowing us room for 511 * reserve one of these spots for our directed address, allowing us room for
@@ -583,100 +513,98 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
583 */ 513 */
584#define E1000_RAR_ENTRIES 15 514#define E1000_RAR_ENTRIES 15
585 515
586#define E1000_RAR_ENTRIES_ICH8LAN 6
587
588#define MIN_NUMBER_OF_DESCRIPTORS 8 516#define MIN_NUMBER_OF_DESCRIPTORS 8
589#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 517#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
590 518
591/* Receive Descriptor */ 519/* Receive Descriptor */
592struct e1000_rx_desc { 520struct e1000_rx_desc {
593 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 521 __le64 buffer_addr; /* Address of the descriptor's data buffer */
594 __le16 length; /* Length of data DMAed into data buffer */ 522 __le16 length; /* Length of data DMAed into data buffer */
595 __le16 csum; /* Packet checksum */ 523 __le16 csum; /* Packet checksum */
596 u8 status; /* Descriptor status */ 524 u8 status; /* Descriptor status */
597 u8 errors; /* Descriptor Errors */ 525 u8 errors; /* Descriptor Errors */
598 __le16 special; 526 __le16 special;
599}; 527};
600 528
601/* Receive Descriptor - Extended */ 529/* Receive Descriptor - Extended */
602union e1000_rx_desc_extended { 530union e1000_rx_desc_extended {
603 struct { 531 struct {
604 __le64 buffer_addr; 532 __le64 buffer_addr;
605 __le64 reserved; 533 __le64 reserved;
606 } read; 534 } read;
607 struct { 535 struct {
608 struct { 536 struct {
609 __le32 mrq; /* Multiple Rx Queues */ 537 __le32 mrq; /* Multiple Rx Queues */
610 union { 538 union {
611 __le32 rss; /* RSS Hash */ 539 __le32 rss; /* RSS Hash */
612 struct { 540 struct {
613 __le16 ip_id; /* IP id */ 541 __le16 ip_id; /* IP id */
614 __le16 csum; /* Packet Checksum */ 542 __le16 csum; /* Packet Checksum */
615 } csum_ip; 543 } csum_ip;
616 } hi_dword; 544 } hi_dword;
617 } lower; 545 } lower;
618 struct { 546 struct {
619 __le32 status_error; /* ext status/error */ 547 __le32 status_error; /* ext status/error */
620 __le16 length; 548 __le16 length;
621 __le16 vlan; /* VLAN tag */ 549 __le16 vlan; /* VLAN tag */
622 } upper; 550 } upper;
623 } wb; /* writeback */ 551 } wb; /* writeback */
624}; 552};
625 553
626#define MAX_PS_BUFFERS 4 554#define MAX_PS_BUFFERS 4
627/* Receive Descriptor - Packet Split */ 555/* Receive Descriptor - Packet Split */
628union e1000_rx_desc_packet_split { 556union e1000_rx_desc_packet_split {
629 struct { 557 struct {
630 /* one buffer for protocol header(s), three data buffers */ 558 /* one buffer for protocol header(s), three data buffers */
631 __le64 buffer_addr[MAX_PS_BUFFERS]; 559 __le64 buffer_addr[MAX_PS_BUFFERS];
632 } read; 560 } read;
633 struct { 561 struct {
634 struct { 562 struct {
635 __le32 mrq; /* Multiple Rx Queues */ 563 __le32 mrq; /* Multiple Rx Queues */
636 union { 564 union {
637 __le32 rss; /* RSS Hash */ 565 __le32 rss; /* RSS Hash */
638 struct { 566 struct {
639 __le16 ip_id; /* IP id */ 567 __le16 ip_id; /* IP id */
640 __le16 csum; /* Packet Checksum */ 568 __le16 csum; /* Packet Checksum */
641 } csum_ip; 569 } csum_ip;
642 } hi_dword; 570 } hi_dword;
643 } lower; 571 } lower;
644 struct { 572 struct {
645 __le32 status_error; /* ext status/error */ 573 __le32 status_error; /* ext status/error */
646 __le16 length0; /* length of buffer 0 */ 574 __le16 length0; /* length of buffer 0 */
647 __le16 vlan; /* VLAN tag */ 575 __le16 vlan; /* VLAN tag */
648 } middle; 576 } middle;
649 struct { 577 struct {
650 __le16 header_status; 578 __le16 header_status;
651 __le16 length[3]; /* length of buffers 1-3 */ 579 __le16 length[3]; /* length of buffers 1-3 */
652 } upper; 580 } upper;
653 __le64 reserved; 581 __le64 reserved;
654 } wb; /* writeback */ 582 } wb; /* writeback */
655}; 583};
656 584
657/* Receive Decriptor bit definitions */ 585/* Receive Descriptor bit definitions */
658#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 586#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
659#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 587#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
660#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 588#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
661#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 589#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
662#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 590#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
663#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 591#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
664#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 592#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
665#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 593#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
666#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ 594#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
667#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 595#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
668#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ 596#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
669#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 597#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
670#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 598#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
671#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ 599#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
672#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ 600#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
673#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ 601#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
674#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ 602#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
675#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 603#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
676#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 604#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
677#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 605#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
678#define E1000_RXD_SPC_PRI_SHIFT 13 606#define E1000_RXD_SPC_PRI_SHIFT 13
679#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 607#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
680#define E1000_RXD_SPC_CFI_SHIFT 12 608#define E1000_RXD_SPC_CFI_SHIFT 12
681 609
682#define E1000_RXDEXT_STATERR_CE 0x01000000 610#define E1000_RXDEXT_STATERR_CE 0x01000000
@@ -698,7 +626,6 @@ union e1000_rx_desc_packet_split {
698 E1000_RXD_ERR_CXE | \ 626 E1000_RXD_ERR_CXE | \
699 E1000_RXD_ERR_RXE) 627 E1000_RXD_ERR_RXE)
700 628
701
702/* Same mask, but for extended and packet split descriptors */ 629/* Same mask, but for extended and packet split descriptors */
703#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 630#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
704 E1000_RXDEXT_STATERR_CE | \ 631 E1000_RXDEXT_STATERR_CE | \
@@ -707,152 +634,145 @@ union e1000_rx_desc_packet_split {
707 E1000_RXDEXT_STATERR_CXE | \ 634 E1000_RXDEXT_STATERR_CXE | \
708 E1000_RXDEXT_STATERR_RXE) 635 E1000_RXDEXT_STATERR_RXE)
709 636
710
711/* Transmit Descriptor */ 637/* Transmit Descriptor */
712struct e1000_tx_desc { 638struct e1000_tx_desc {
713 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 639 __le64 buffer_addr; /* Address of the descriptor's data buffer */
714 union { 640 union {
715 __le32 data; 641 __le32 data;
716 struct { 642 struct {
717 __le16 length; /* Data buffer length */ 643 __le16 length; /* Data buffer length */
718 u8 cso; /* Checksum offset */ 644 u8 cso; /* Checksum offset */
719 u8 cmd; /* Descriptor control */ 645 u8 cmd; /* Descriptor control */
720 } flags; 646 } flags;
721 } lower; 647 } lower;
722 union { 648 union {
723 __le32 data; 649 __le32 data;
724 struct { 650 struct {
725 u8 status; /* Descriptor status */ 651 u8 status; /* Descriptor status */
726 u8 css; /* Checksum start */ 652 u8 css; /* Checksum start */
727 __le16 special; 653 __le16 special;
728 } fields; 654 } fields;
729 } upper; 655 } upper;
730}; 656};
731 657
732/* Transmit Descriptor bit definitions */ 658/* Transmit Descriptor bit definitions */
733#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ 659#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
734#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ 660#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
735#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ 661#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
736#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ 662#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
737#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ 663#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
738#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 664#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
739#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ 665#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
740#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ 666#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
741#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ 667#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
742#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ 668#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
743#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ 669#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
744#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ 670#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
745#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ 671#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
746#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ 672#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
747#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ 673#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
748#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ 674#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
749#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ 675#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
750#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ 676#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
751#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ 677#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
752#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ 678#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
753 679
754/* Offload Context Descriptor */ 680/* Offload Context Descriptor */
755struct e1000_context_desc { 681struct e1000_context_desc {
756 union { 682 union {
757 __le32 ip_config; 683 __le32 ip_config;
758 struct { 684 struct {
759 u8 ipcss; /* IP checksum start */ 685 u8 ipcss; /* IP checksum start */
760 u8 ipcso; /* IP checksum offset */ 686 u8 ipcso; /* IP checksum offset */
761 __le16 ipcse; /* IP checksum end */ 687 __le16 ipcse; /* IP checksum end */
762 } ip_fields; 688 } ip_fields;
763 } lower_setup; 689 } lower_setup;
764 union { 690 union {
765 __le32 tcp_config; 691 __le32 tcp_config;
766 struct { 692 struct {
767 u8 tucss; /* TCP checksum start */ 693 u8 tucss; /* TCP checksum start */
768 u8 tucso; /* TCP checksum offset */ 694 u8 tucso; /* TCP checksum offset */
769 __le16 tucse; /* TCP checksum end */ 695 __le16 tucse; /* TCP checksum end */
770 } tcp_fields; 696 } tcp_fields;
771 } upper_setup; 697 } upper_setup;
772 __le32 cmd_and_length; /* */ 698 __le32 cmd_and_length; /* */
773 union { 699 union {
774 __le32 data; 700 __le32 data;
775 struct { 701 struct {
776 u8 status; /* Descriptor status */ 702 u8 status; /* Descriptor status */
777 u8 hdr_len; /* Header length */ 703 u8 hdr_len; /* Header length */
778 __le16 mss; /* Maximum segment size */ 704 __le16 mss; /* Maximum segment size */
779 } fields; 705 } fields;
780 } tcp_seg_setup; 706 } tcp_seg_setup;
781}; 707};
782 708
783/* Offload data descriptor */ 709/* Offload data descriptor */
784struct e1000_data_desc { 710struct e1000_data_desc {
785 __le64 buffer_addr; /* Address of the descriptor's buffer address */ 711 __le64 buffer_addr; /* Address of the descriptor's buffer address */
786 union { 712 union {
787 __le32 data; 713 __le32 data;
788 struct { 714 struct {
789 __le16 length; /* Data buffer length */ 715 __le16 length; /* Data buffer length */
790 u8 typ_len_ext; /* */ 716 u8 typ_len_ext; /* */
791 u8 cmd; /* */ 717 u8 cmd; /* */
792 } flags; 718 } flags;
793 } lower; 719 } lower;
794 union { 720 union {
795 __le32 data; 721 __le32 data;
796 struct { 722 struct {
797 u8 status; /* Descriptor status */ 723 u8 status; /* Descriptor status */
798 u8 popts; /* Packet Options */ 724 u8 popts; /* Packet Options */
799 __le16 special; /* */ 725 __le16 special; /* */
800 } fields; 726 } fields;
801 } upper; 727 } upper;
802}; 728};
803 729
804/* Filters */ 730/* Filters */
805#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ 731#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
806#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ 732#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
807#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 733#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
808
809#define E1000_NUM_UNICAST_ICH8LAN 7
810#define E1000_MC_TBL_SIZE_ICH8LAN 32
811
812 734
813/* Receive Address Register */ 735/* Receive Address Register */
814struct e1000_rar { 736struct e1000_rar {
815 volatile __le32 low; /* receive address low */ 737 volatile __le32 low; /* receive address low */
816 volatile __le32 high; /* receive address high */ 738 volatile __le32 high; /* receive address high */
817}; 739};
818 740
819/* Number of entries in the Multicast Table Array (MTA). */ 741/* Number of entries in the Multicast Table Array (MTA). */
820#define E1000_NUM_MTA_REGISTERS 128 742#define E1000_NUM_MTA_REGISTERS 128
821#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
822 743
823/* IPv4 Address Table Entry */ 744/* IPv4 Address Table Entry */
824struct e1000_ipv4_at_entry { 745struct e1000_ipv4_at_entry {
825 volatile u32 ipv4_addr; /* IP Address (RW) */ 746 volatile u32 ipv4_addr; /* IP Address (RW) */
826 volatile u32 reserved; 747 volatile u32 reserved;
827}; 748};
828 749
829/* Four wakeup IP addresses are supported */ 750/* Four wakeup IP addresses are supported */
830#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 751#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
831#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 752#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
832#define E1000_IP4AT_SIZE_ICH8LAN 3
833#define E1000_IP6AT_SIZE 1 753#define E1000_IP6AT_SIZE 1
834 754
835/* IPv6 Address Table Entry */ 755/* IPv6 Address Table Entry */
836struct e1000_ipv6_at_entry { 756struct e1000_ipv6_at_entry {
837 volatile u8 ipv6_addr[16]; 757 volatile u8 ipv6_addr[16];
838}; 758};
839 759
840/* Flexible Filter Length Table Entry */ 760/* Flexible Filter Length Table Entry */
841struct e1000_fflt_entry { 761struct e1000_fflt_entry {
842 volatile u32 length; /* Flexible Filter Length (RW) */ 762 volatile u32 length; /* Flexible Filter Length (RW) */
843 volatile u32 reserved; 763 volatile u32 reserved;
844}; 764};
845 765
846/* Flexible Filter Mask Table Entry */ 766/* Flexible Filter Mask Table Entry */
847struct e1000_ffmt_entry { 767struct e1000_ffmt_entry {
848 volatile u32 mask; /* Flexible Filter Mask (RW) */ 768 volatile u32 mask; /* Flexible Filter Mask (RW) */
849 volatile u32 reserved; 769 volatile u32 reserved;
850}; 770};
851 771
852/* Flexible Filter Value Table Entry */ 772/* Flexible Filter Value Table Entry */
853struct e1000_ffvt_entry { 773struct e1000_ffvt_entry {
854 volatile u32 value; /* Flexible Filter Value (RW) */ 774 volatile u32 value; /* Flexible Filter Value (RW) */
855 volatile u32 reserved; 775 volatile u32 reserved;
856}; 776};
857 777
858/* Four Flexible Filters are supported */ 778/* Four Flexible Filters are supported */
@@ -879,211 +799,211 @@ struct e1000_ffvt_entry {
879 * R/clr - register is read only and is cleared when read 799 * R/clr - register is read only and is cleared when read
880 * A - register array 800 * A - register array
881 */ 801 */
882#define E1000_CTRL 0x00000 /* Device Control - RW */ 802#define E1000_CTRL 0x00000 /* Device Control - RW */
883#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ 803#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
884#define E1000_STATUS 0x00008 /* Device Status - RO */ 804#define E1000_STATUS 0x00008 /* Device Status - RO */
885#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ 805#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
886#define E1000_EERD 0x00014 /* EEPROM Read - RW */ 806#define E1000_EERD 0x00014 /* EEPROM Read - RW */
887#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 807#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
888#define E1000_FLA 0x0001C /* Flash Access - RW */ 808#define E1000_FLA 0x0001C /* Flash Access - RW */
889#define E1000_MDIC 0x00020 /* MDI Control - RW */ 809#define E1000_MDIC 0x00020 /* MDI Control - RW */
890#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 810#define E1000_SCTL 0x00024 /* SerDes Control - RW */
891#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 811#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
892#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 812#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
893#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 813#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
894#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 814#define E1000_FCT 0x00030 /* Flow Control Type - RW */
895#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 815#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
896#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ 816#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
897#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ 817#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
898#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 818#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
899#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 819#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
900#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 820#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
901#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 821#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
902#define E1000_RCTL 0x00100 /* RX Control - RW */ 822#define E1000_RCTL 0x00100 /* RX Control - RW */
903#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 823#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
904#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ 824#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
905#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ 825#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
906#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ 826#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
907#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ 827#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
908#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ 828#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
909#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 829#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
910#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ 830#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
911#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ 831#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
912#define E1000_TCTL 0x00400 /* TX Control - RW */ 832#define E1000_TCTL 0x00400 /* TX Control - RW */
913#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ 833#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
914#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ 834#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
915#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 835#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
916#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 836#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
917#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 837#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
918#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ 838#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
919#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ 839#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
920#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ 840#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
921#define FEXTNVM_SW_CONFIG 0x0001 841#define FEXTNVM_SW_CONFIG 0x0001
922#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 842#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
923#define E1000_PBS 0x01008 /* Packet Buffer Size */ 843#define E1000_PBS 0x01008 /* Packet Buffer Size */
924#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 844#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
925#define E1000_FLASH_UPDATES 1000 845#define E1000_FLASH_UPDATES 1000
926#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ 846#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
927#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ 847#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
928#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 848#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
929#define E1000_FLSWCTL 0x01030 /* FLASH control register */ 849#define E1000_FLSWCTL 0x01030 /* FLASH control register */
930#define E1000_FLSWDATA 0x01034 /* FLASH data register */ 850#define E1000_FLSWDATA 0x01034 /* FLASH data register */
931#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ 851#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
932#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ 852#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
933#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ 853#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
934#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 854#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
935#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 855#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
936#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ 856#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
937#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 857#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
938#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 858#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
939#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ 859#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
940#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ 860#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
941#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ 861#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
942#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ 862#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
943#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ 863#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
944#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ 864#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
945#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ 865#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
946#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ 866#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
947#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ 867#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
948#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ 868#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
949#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ 869#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
950#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ 870#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
951#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 871#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
952#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 872#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
953#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ 873#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
954#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 874#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
955#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ 875#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
956#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 876#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
957#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 877#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
958#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ 878#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
959#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ 879#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
960#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ 880#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
961#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ 881#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
962#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ 882#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
963#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ 883#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
964#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ 884#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
965#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ 885#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
966#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ 886#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
967#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 887#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
968#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 888#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
969#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 889#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
970#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 890#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
971#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 891#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
972#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 892#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
973#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 893#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
974#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 894#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
975#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 895#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
976#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 896#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
977#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 897#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
978#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 898#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
979#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 899#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
980#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 900#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
981#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ 901#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
982#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ 902#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
983#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ 903#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
984#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ 904#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
985#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ 905#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
986#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ 906#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
987#define E1000_COLC 0x04028 /* Collision Count - R/clr */ 907#define E1000_COLC 0x04028 /* Collision Count - R/clr */
988#define E1000_DC 0x04030 /* Defer Count - R/clr */ 908#define E1000_DC 0x04030 /* Defer Count - R/clr */
989#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ 909#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
990#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ 910#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
991#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ 911#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
992#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ 912#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
993#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ 913#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
994#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ 914#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
995#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ 915#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
996#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ 916#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
997#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ 917#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
998#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ 918#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
999#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ 919#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
1000#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ 920#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
1001#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ 921#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
1002#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ 922#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
1003#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ 923#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
1004#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ 924#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
1005#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ 925#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
1006#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ 926#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
1007#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ 927#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
1008#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ 928#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
1009#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ 929#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
1010#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ 930#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
1011#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ 931#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
1012#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ 932#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
1013#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ 933#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
1014#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ 934#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
1015#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ 935#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
1016#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ 936#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
1017#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ 937#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
1018#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ 938#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
1019#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ 939#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
1020#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ 940#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
1021#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ 941#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
1022#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ 942#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
1023#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ 943#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
1024#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ 944#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
1025#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ 945#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
1026#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ 946#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
1027#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ 947#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
1028#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ 948#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
1029#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ 949#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
1030#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ 950#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
1031#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ 951#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
1032#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ 952#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
1033#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 953#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
1034#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 954#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
1035#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 955#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
1036#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ 956#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
1037#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ 957#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
1038#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ 958#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
1039#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ 959#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
1040#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ 960#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
1041#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ 961#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
1042#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ 962#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
1043#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ 963#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
1044#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ 964#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
1045#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 965#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
1046#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 966#define E1000_RFCTL 0x05008 /* Receive Filter Control */
1047#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 967#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
1048#define E1000_RA 0x05400 /* Receive Address - RW Array */ 968#define E1000_RA 0x05400 /* Receive Address - RW Array */
1049#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 969#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
1050#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 970#define E1000_WUC 0x05800 /* Wakeup Control - RW */
1051#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ 971#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
1052#define E1000_WUS 0x05810 /* Wakeup Status - RO */ 972#define E1000_WUS 0x05810 /* Wakeup Status - RO */
1053#define E1000_MANC 0x05820 /* Management Control - RW */ 973#define E1000_MANC 0x05820 /* Management Control - RW */
1054#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ 974#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
1055#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ 975#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
1056#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ 976#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
1057#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ 977#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
1058#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ 978#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
1059#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ 979#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
1060#define E1000_HOST_IF 0x08800 /* Host Interface */ 980#define E1000_HOST_IF 0x08800 /* Host Interface */
1061#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 981#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
1062#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 982#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
1063 983
1064#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ 984#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
1065#define E1000_MDPHYA 0x0003C /* PHY address - RW */ 985#define E1000_MDPHYA 0x0003C /* PHY address - RW */
1066#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */ 986#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
1067#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ 987#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
1068 988
1069#define E1000_GCR 0x05B00 /* PCI-Ex Control */ 989#define E1000_GCR 0x05B00 /* PCI-Ex Control */
1070#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ 990#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
1071#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ 991#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
1072#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ 992#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
1073#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ 993#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
1074#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ 994#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
1075#define E1000_SWSM 0x05B50 /* SW Semaphore */ 995#define E1000_SWSM 0x05B50 /* SW Semaphore */
1076#define E1000_FWSM 0x05B54 /* FW Semaphore */ 996#define E1000_FWSM 0x05B54 /* FW Semaphore */
1077#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ 997#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
1078#define E1000_HICR 0x08F00 /* Host Inteface Control */ 998#define E1000_HICR 0x08F00 /* Host Interface Control */
1079 999
1080/* RSS registers */ 1000/* RSS registers */
1081#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ 1001#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
1082#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ 1002#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
1083#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ 1003#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
1084#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ 1004#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
1085#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ 1005#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
1086#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ 1006#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
1087/* Register Set (82542) 1007/* Register Set (82542)
1088 * 1008 *
1089 * Some of the 82542 registers are located at different offsets than they are 1009 * Some of the 82542 registers are located at different offsets than they are
@@ -1123,19 +1043,19 @@ struct e1000_ffvt_entry {
1123#define E1000_82542_RDLEN0 E1000_82542_RDLEN 1043#define E1000_82542_RDLEN0 E1000_82542_RDLEN
1124#define E1000_82542_RDH0 E1000_82542_RDH 1044#define E1000_82542_RDH0 E1000_82542_RDH
1125#define E1000_82542_RDT0 E1000_82542_RDT 1045#define E1000_82542_RDT0 E1000_82542_RDT
1126#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication 1046#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
1127 * RX Control - RW */ 1047 * RX Control - RW */
1128#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) 1048#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
1129#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ 1049#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
1130#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ 1050#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
1131#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ 1051#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
1132#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ 1052#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
1133#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ 1053#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
1134#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ 1054#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
1135#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ 1055#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
1136#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ 1056#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
1137#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ 1057#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
1138#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ 1058#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
1139#define E1000_82542_RDTR1 0x00130 1059#define E1000_82542_RDTR1 0x00130
1140#define E1000_82542_RDBAL1 0x00138 1060#define E1000_82542_RDBAL1 0x00138
1141#define E1000_82542_RDBAH1 0x0013C 1061#define E1000_82542_RDBAH1 0x0013C
@@ -1302,288 +1222,281 @@ struct e1000_ffvt_entry {
1302#define E1000_82542_RSSIR E1000_RSSIR 1222#define E1000_82542_RSSIR E1000_RSSIR
1303#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA 1223#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
1304#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC 1224#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
1305#define E1000_82542_MANC2H E1000_MANC2H
1306 1225
1307/* Statistics counters collected by the MAC */ 1226/* Statistics counters collected by the MAC */
1308struct e1000_hw_stats { 1227struct e1000_hw_stats {
1309 u64 crcerrs; 1228 u64 crcerrs;
1310 u64 algnerrc; 1229 u64 algnerrc;
1311 u64 symerrs; 1230 u64 symerrs;
1312 u64 rxerrc; 1231 u64 rxerrc;
1313 u64 txerrc; 1232 u64 txerrc;
1314 u64 mpc; 1233 u64 mpc;
1315 u64 scc; 1234 u64 scc;
1316 u64 ecol; 1235 u64 ecol;
1317 u64 mcc; 1236 u64 mcc;
1318 u64 latecol; 1237 u64 latecol;
1319 u64 colc; 1238 u64 colc;
1320 u64 dc; 1239 u64 dc;
1321 u64 tncrs; 1240 u64 tncrs;
1322 u64 sec; 1241 u64 sec;
1323 u64 cexterr; 1242 u64 cexterr;
1324 u64 rlec; 1243 u64 rlec;
1325 u64 xonrxc; 1244 u64 xonrxc;
1326 u64 xontxc; 1245 u64 xontxc;
1327 u64 xoffrxc; 1246 u64 xoffrxc;
1328 u64 xofftxc; 1247 u64 xofftxc;
1329 u64 fcruc; 1248 u64 fcruc;
1330 u64 prc64; 1249 u64 prc64;
1331 u64 prc127; 1250 u64 prc127;
1332 u64 prc255; 1251 u64 prc255;
1333 u64 prc511; 1252 u64 prc511;
1334 u64 prc1023; 1253 u64 prc1023;
1335 u64 prc1522; 1254 u64 prc1522;
1336 u64 gprc; 1255 u64 gprc;
1337 u64 bprc; 1256 u64 bprc;
1338 u64 mprc; 1257 u64 mprc;
1339 u64 gptc; 1258 u64 gptc;
1340 u64 gorcl; 1259 u64 gorcl;
1341 u64 gorch; 1260 u64 gorch;
1342 u64 gotcl; 1261 u64 gotcl;
1343 u64 gotch; 1262 u64 gotch;
1344 u64 rnbc; 1263 u64 rnbc;
1345 u64 ruc; 1264 u64 ruc;
1346 u64 rfc; 1265 u64 rfc;
1347 u64 roc; 1266 u64 roc;
1348 u64 rlerrc; 1267 u64 rlerrc;
1349 u64 rjc; 1268 u64 rjc;
1350 u64 mgprc; 1269 u64 mgprc;
1351 u64 mgpdc; 1270 u64 mgpdc;
1352 u64 mgptc; 1271 u64 mgptc;
1353 u64 torl; 1272 u64 torl;
1354 u64 torh; 1273 u64 torh;
1355 u64 totl; 1274 u64 totl;
1356 u64 toth; 1275 u64 toth;
1357 u64 tpr; 1276 u64 tpr;
1358 u64 tpt; 1277 u64 tpt;
1359 u64 ptc64; 1278 u64 ptc64;
1360 u64 ptc127; 1279 u64 ptc127;
1361 u64 ptc255; 1280 u64 ptc255;
1362 u64 ptc511; 1281 u64 ptc511;
1363 u64 ptc1023; 1282 u64 ptc1023;
1364 u64 ptc1522; 1283 u64 ptc1522;
1365 u64 mptc; 1284 u64 mptc;
1366 u64 bptc; 1285 u64 bptc;
1367 u64 tsctc; 1286 u64 tsctc;
1368 u64 tsctfc; 1287 u64 tsctfc;
1369 u64 iac; 1288 u64 iac;
1370 u64 icrxptc; 1289 u64 icrxptc;
1371 u64 icrxatc; 1290 u64 icrxatc;
1372 u64 ictxptc; 1291 u64 ictxptc;
1373 u64 ictxatc; 1292 u64 ictxatc;
1374 u64 ictxqec; 1293 u64 ictxqec;
1375 u64 ictxqmtc; 1294 u64 ictxqmtc;
1376 u64 icrxdmtc; 1295 u64 icrxdmtc;
1377 u64 icrxoc; 1296 u64 icrxoc;
1378}; 1297};
1379 1298
1380/* Structure containing variables used by the shared code (e1000_hw.c) */ 1299/* Structure containing variables used by the shared code (e1000_hw.c) */
1381struct e1000_hw { 1300struct e1000_hw {
1382 u8 __iomem *hw_addr; 1301 u8 __iomem *hw_addr;
1383 u8 __iomem *flash_address; 1302 u8 __iomem *flash_address;
1384 e1000_mac_type mac_type; 1303 e1000_mac_type mac_type;
1385 e1000_phy_type phy_type; 1304 e1000_phy_type phy_type;
1386 u32 phy_init_script; 1305 u32 phy_init_script;
1387 e1000_media_type media_type; 1306 e1000_media_type media_type;
1388 void *back; 1307 void *back;
1389 struct e1000_shadow_ram *eeprom_shadow_ram; 1308 struct e1000_shadow_ram *eeprom_shadow_ram;
1390 u32 flash_bank_size; 1309 u32 flash_bank_size;
1391 u32 flash_base_addr; 1310 u32 flash_base_addr;
1392 e1000_fc_type fc; 1311 e1000_fc_type fc;
1393 e1000_bus_speed bus_speed; 1312 e1000_bus_speed bus_speed;
1394 e1000_bus_width bus_width; 1313 e1000_bus_width bus_width;
1395 e1000_bus_type bus_type; 1314 e1000_bus_type bus_type;
1396 struct e1000_eeprom_info eeprom; 1315 struct e1000_eeprom_info eeprom;
1397 e1000_ms_type master_slave; 1316 e1000_ms_type master_slave;
1398 e1000_ms_type original_master_slave; 1317 e1000_ms_type original_master_slave;
1399 e1000_ffe_config ffe_config_state; 1318 e1000_ffe_config ffe_config_state;
1400 u32 asf_firmware_present; 1319 u32 asf_firmware_present;
1401 u32 eeprom_semaphore_present; 1320 u32 eeprom_semaphore_present;
1402 u32 swfw_sync_present; 1321 unsigned long io_base;
1403 u32 swfwhw_semaphore_present; 1322 u32 phy_id;
1404 unsigned long io_base; 1323 u32 phy_revision;
1405 u32 phy_id; 1324 u32 phy_addr;
1406 u32 phy_revision; 1325 u32 original_fc;
1407 u32 phy_addr; 1326 u32 txcw;
1408 u32 original_fc; 1327 u32 autoneg_failed;
1409 u32 txcw; 1328 u32 max_frame_size;
1410 u32 autoneg_failed; 1329 u32 min_frame_size;
1411 u32 max_frame_size; 1330 u32 mc_filter_type;
1412 u32 min_frame_size; 1331 u32 num_mc_addrs;
1413 u32 mc_filter_type; 1332 u32 collision_delta;
1414 u32 num_mc_addrs; 1333 u32 tx_packet_delta;
1415 u32 collision_delta; 1334 u32 ledctl_default;
1416 u32 tx_packet_delta; 1335 u32 ledctl_mode1;
1417 u32 ledctl_default; 1336 u32 ledctl_mode2;
1418 u32 ledctl_mode1; 1337 bool tx_pkt_filtering;
1419 u32 ledctl_mode2;
1420 bool tx_pkt_filtering;
1421 struct e1000_host_mng_dhcp_cookie mng_cookie; 1338 struct e1000_host_mng_dhcp_cookie mng_cookie;
1422 u16 phy_spd_default; 1339 u16 phy_spd_default;
1423 u16 autoneg_advertised; 1340 u16 autoneg_advertised;
1424 u16 pci_cmd_word; 1341 u16 pci_cmd_word;
1425 u16 fc_high_water; 1342 u16 fc_high_water;
1426 u16 fc_low_water; 1343 u16 fc_low_water;
1427 u16 fc_pause_time; 1344 u16 fc_pause_time;
1428 u16 current_ifs_val; 1345 u16 current_ifs_val;
1429 u16 ifs_min_val; 1346 u16 ifs_min_val;
1430 u16 ifs_max_val; 1347 u16 ifs_max_val;
1431 u16 ifs_step_size; 1348 u16 ifs_step_size;
1432 u16 ifs_ratio; 1349 u16 ifs_ratio;
1433 u16 device_id; 1350 u16 device_id;
1434 u16 vendor_id; 1351 u16 vendor_id;
1435 u16 subsystem_id; 1352 u16 subsystem_id;
1436 u16 subsystem_vendor_id; 1353 u16 subsystem_vendor_id;
1437 u8 revision_id; 1354 u8 revision_id;
1438 u8 autoneg; 1355 u8 autoneg;
1439 u8 mdix; 1356 u8 mdix;
1440 u8 forced_speed_duplex; 1357 u8 forced_speed_duplex;
1441 u8 wait_autoneg_complete; 1358 u8 wait_autoneg_complete;
1442 u8 dma_fairness; 1359 u8 dma_fairness;
1443 u8 mac_addr[NODE_ADDRESS_SIZE]; 1360 u8 mac_addr[NODE_ADDRESS_SIZE];
1444 u8 perm_mac_addr[NODE_ADDRESS_SIZE]; 1361 u8 perm_mac_addr[NODE_ADDRESS_SIZE];
1445 bool disable_polarity_correction; 1362 bool disable_polarity_correction;
1446 bool speed_downgraded; 1363 bool speed_downgraded;
1447 e1000_smart_speed smart_speed; 1364 e1000_smart_speed smart_speed;
1448 e1000_dsp_config dsp_config_state; 1365 e1000_dsp_config dsp_config_state;
1449 bool get_link_status; 1366 bool get_link_status;
1450 bool serdes_link_down; 1367 bool serdes_has_link;
1451 bool tbi_compatibility_en; 1368 bool tbi_compatibility_en;
1452 bool tbi_compatibility_on; 1369 bool tbi_compatibility_on;
1453 bool laa_is_present; 1370 bool laa_is_present;
1454 bool phy_reset_disable; 1371 bool phy_reset_disable;
1455 bool initialize_hw_bits_disable; 1372 bool initialize_hw_bits_disable;
1456 bool fc_send_xon; 1373 bool fc_send_xon;
1457 bool fc_strict_ieee; 1374 bool fc_strict_ieee;
1458 bool report_tx_early; 1375 bool report_tx_early;
1459 bool adaptive_ifs; 1376 bool adaptive_ifs;
1460 bool ifs_params_forced; 1377 bool ifs_params_forced;
1461 bool in_ifs_mode; 1378 bool in_ifs_mode;
1462 bool mng_reg_access_disabled; 1379 bool mng_reg_access_disabled;
1463 bool leave_av_bit_off; 1380 bool leave_av_bit_off;
1464 bool kmrn_lock_loss_workaround_disabled; 1381 bool bad_tx_carr_stats_fd;
1465 bool bad_tx_carr_stats_fd; 1382 bool has_smbus;
1466 bool has_manc2h;
1467 bool rx_needs_kicking;
1468 bool has_smbus;
1469}; 1383};
1470 1384
1471 1385#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
1472#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1386#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
1473#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1387#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
1474#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ 1388#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
1475#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 1389#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
1476#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ 1390#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
1477#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 1391#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
1478#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ 1392#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
1479#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
1480/* Register Bit Masks */ 1393/* Register Bit Masks */
1481/* Device Control */ 1394/* Device Control */
1482#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1395#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
1483#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1396#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
1484#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1397#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
1485#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ 1398#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
1486#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1399#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
1487#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1400#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
1488#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ 1401#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
1489#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ 1402#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
1490#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ 1403#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
1491#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ 1404#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
1492#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ 1405#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
1493#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ 1406#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
1494#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ 1407#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
1495#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ 1408#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
1496#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1409#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
1497#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1410#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
1498#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1411#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
1499#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ 1412#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
1500#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ 1413#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
1501#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ 1414#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
1502#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ 1415#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
1503#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1416#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
1504#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1417#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
1505#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ 1418#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
1506#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ 1419#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
1507#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 1420#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
1508#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ 1421#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
1509#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ 1422#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
1510#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ 1423#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
1511#define E1000_CTRL_RST 0x04000000 /* Global reset */ 1424#define E1000_CTRL_RST 0x04000000 /* Global reset */
1512#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 1425#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
1513#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 1426#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
1514#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ 1427#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
1515#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 1428#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
1516#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 1429#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
1517#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ 1430#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
1518 1431
1519/* Device Status */ 1432/* Device Status */
1520#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1433#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
1521#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1434#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
1522#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1435#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
1523#define E1000_STATUS_FUNC_SHIFT 2 1436#define E1000_STATUS_FUNC_SHIFT 2
1524#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1437#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
1525#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1438#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
1526#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ 1439#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
1527#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ 1440#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
1528#define E1000_STATUS_SPEED_MASK 0x000000C0 1441#define E1000_STATUS_SPEED_MASK 0x000000C0
1529#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ 1442#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
1530#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1443#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
1531#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1444#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
1532#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion 1445#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
1533 by EEPROM/Flash */ 1446 by EEPROM/Flash */
1534#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1447#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
1535#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ 1448#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
1536#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 1449#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
1537#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1450#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
1538#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1451#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
1539#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ 1452#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
1540#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ 1453#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
1541#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ 1454#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
1542#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ 1455#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
1543#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ 1456#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
1544#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ 1457#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
1545#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ 1458#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
1546#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ 1459#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
1547#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ 1460#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
1548#define E1000_STATUS_FUSE_8 0x04000000 1461#define E1000_STATUS_FUSE_8 0x04000000
1549#define E1000_STATUS_FUSE_9 0x08000000 1462#define E1000_STATUS_FUSE_9 0x08000000
1550#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ 1463#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
1551#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ 1464#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
1552 1465
1553/* Constants used to intrepret the masked PCI-X bus speed. */ 1466/* Constants used to interpret the masked PCI-X bus speed. */
1554#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ 1467#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
1555#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ 1468#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
1556#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ 1469#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
1557 1470
1558/* EEPROM/Flash Control */ 1471/* EEPROM/Flash Control */
1559#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ 1472#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
1560#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ 1473#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
1561#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ 1474#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
1562#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ 1475#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
1563#define E1000_EECD_FWE_MASK 0x00000030 1476#define E1000_EECD_FWE_MASK 0x00000030
1564#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ 1477#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
1565#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ 1478#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
1566#define E1000_EECD_FWE_SHIFT 4 1479#define E1000_EECD_FWE_SHIFT 4
1567#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ 1480#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
1568#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ 1481#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
1569#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ 1482#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
1570#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ 1483#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
1571#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type 1484#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
1572 * (0-small, 1-large) */ 1485 * (0-small, 1-large) */
1573#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ 1486#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
1574#ifndef E1000_EEPROM_GRANT_ATTEMPTS 1487#ifndef E1000_EEPROM_GRANT_ATTEMPTS
1575#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1488#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1576#endif 1489#endif
1577#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ 1490#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
1578#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ 1491#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
1579#define E1000_EECD_SIZE_EX_SHIFT 11 1492#define E1000_EECD_SIZE_EX_SHIFT 11
1580#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ 1493#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
1581#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ 1494#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
1582#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ 1495#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
1583#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ 1496#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
1584#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ 1497#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
1585#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ 1498#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
1586#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 1499#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
1587#define E1000_EECD_SECVAL_SHIFT 22 1500#define E1000_EECD_SECVAL_SHIFT 22
1588#define E1000_STM_OPCODE 0xDB00 1501#define E1000_STM_OPCODE 0xDB00
1589#define E1000_HICR_FW_RESET 0xC0 1502#define E1000_HICR_FW_RESET 0xC0
@@ -1593,12 +1506,12 @@ struct e1000_hw {
1593#define E1000_ICH_NVM_SIG_MASK 0xC0 1506#define E1000_ICH_NVM_SIG_MASK 0xC0
1594 1507
1595/* EEPROM Read */ 1508/* EEPROM Read */
1596#define E1000_EERD_START 0x00000001 /* Start Read */ 1509#define E1000_EERD_START 0x00000001 /* Start Read */
1597#define E1000_EERD_DONE 0x00000010 /* Read Done */ 1510#define E1000_EERD_DONE 0x00000010 /* Read Done */
1598#define E1000_EERD_ADDR_SHIFT 8 1511#define E1000_EERD_ADDR_SHIFT 8
1599#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ 1512#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
1600#define E1000_EERD_DATA_SHIFT 16 1513#define E1000_EERD_DATA_SHIFT 16
1601#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ 1514#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
1602 1515
1603/* SPI EEPROM Status Register */ 1516/* SPI EEPROM Status Register */
1604#define EEPROM_STATUS_RDY_SPI 0x01 1517#define EEPROM_STATUS_RDY_SPI 0x01
@@ -1608,25 +1521,25 @@ struct e1000_hw {
1608#define EEPROM_STATUS_WPEN_SPI 0x80 1521#define EEPROM_STATUS_WPEN_SPI 0x80
1609 1522
1610/* Extended Device Control */ 1523/* Extended Device Control */
1611#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ 1524#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
1612#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ 1525#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
1613#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN 1526#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
1614#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ 1527#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
1615#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ 1528#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
1616#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ 1529#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
1617#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ 1530#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
1618#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA 1531#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
1619#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ 1532#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
1620#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ 1533#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
1621#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ 1534#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
1622#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ 1535#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
1623#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ 1536#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
1624#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ 1537#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
1625#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ 1538#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
1626#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 1539#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
1627#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ 1540#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
1628#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 1541#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
1629#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 1542#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
1630#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1543#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1631#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1544#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1632#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1545#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1638,11 +1551,11 @@ struct e1000_hw {
1638#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1551#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
1639#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1552#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
1640#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1553#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
1641#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 1554#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
1642#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1555#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
1643#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1556#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
1644#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ 1557#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
1645#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ 1558#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
1646#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 1559#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
1647 1560
1648/* MDI Control */ 1561/* MDI Control */
@@ -1742,167 +1655,167 @@ struct e1000_hw {
1742#define E1000_LEDCTL_MODE_LED_OFF 0xF 1655#define E1000_LEDCTL_MODE_LED_OFF 0xF
1743 1656
1744/* Receive Address */ 1657/* Receive Address */
1745#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 1658#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
1746 1659
1747/* Interrupt Cause Read */ 1660/* Interrupt Cause Read */
1748#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 1661#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
1749#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ 1662#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
1750#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 1663#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
1751#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 1664#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
1752#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 1665#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
1753#define E1000_ICR_RXO 0x00000040 /* rx overrun */ 1666#define E1000_ICR_RXO 0x00000040 /* rx overrun */
1754#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 1667#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
1755#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ 1668#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
1756#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ 1669#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
1757#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ 1670#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
1758#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ 1671#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
1759#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ 1672#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
1760#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1673#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
1761#define E1000_ICR_TXD_LOW 0x00008000 1674#define E1000_ICR_TXD_LOW 0x00008000
1762#define E1000_ICR_SRPD 0x00010000 1675#define E1000_ICR_SRPD 0x00010000
1763#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ 1676#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
1764#define E1000_ICR_MNG 0x00040000 /* Manageability event */ 1677#define E1000_ICR_MNG 0x00040000 /* Manageability event */
1765#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ 1678#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
1766#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 1679#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
1767#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ 1680#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
1768#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ 1681#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
1769#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ 1682#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
1770#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ 1683#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
1771#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ 1684#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
1772#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ 1685#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
1773#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ 1686#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
1774#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ 1687#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
1775#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ 1688#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
1776#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ 1689#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
1777 1690
1778/* Interrupt Cause Set */ 1691/* Interrupt Cause Set */
1779#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1692#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
1780#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ 1693#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
1781#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 1694#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
1782#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 1695#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
1783#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 1696#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
1784#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ 1697#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
1785#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 1698#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
1786#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ 1699#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
1787#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ 1700#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
1788#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ 1701#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
1789#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ 1702#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
1790#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ 1703#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
1791#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1704#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1792#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1705#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
1793#define E1000_ICS_SRPD E1000_ICR_SRPD 1706#define E1000_ICS_SRPD E1000_ICR_SRPD
1794#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1707#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
1795#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ 1708#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
1796#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1709#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1797#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ 1710#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1798#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ 1711#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1799#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ 1712#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1800#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1713#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1801#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1714#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1802#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1715#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1803#define E1000_ICS_DSW E1000_ICR_DSW 1716#define E1000_ICS_DSW E1000_ICR_DSW
1804#define E1000_ICS_PHYINT E1000_ICR_PHYINT 1717#define E1000_ICS_PHYINT E1000_ICR_PHYINT
1805#define E1000_ICS_EPRST E1000_ICR_EPRST 1718#define E1000_ICS_EPRST E1000_ICR_EPRST
1806 1719
1807/* Interrupt Mask Set */ 1720/* Interrupt Mask Set */
1808#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1721#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
1809#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ 1722#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
1810#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 1723#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
1811#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 1724#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
1812#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 1725#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
1813#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ 1726#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
1814#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 1727#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
1815#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ 1728#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
1816#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ 1729#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
1817#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ 1730#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
1818#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ 1731#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
1819#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ 1732#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
1820#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1733#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1821#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1734#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
1822#define E1000_IMS_SRPD E1000_ICR_SRPD 1735#define E1000_IMS_SRPD E1000_ICR_SRPD
1823#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1736#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
1824#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ 1737#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
1825#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1738#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1826#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ 1739#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1827#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ 1740#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1828#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ 1741#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1829#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1742#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1830#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1743#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1831#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1744#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1832#define E1000_IMS_DSW E1000_ICR_DSW 1745#define E1000_IMS_DSW E1000_ICR_DSW
1833#define E1000_IMS_PHYINT E1000_ICR_PHYINT 1746#define E1000_IMS_PHYINT E1000_ICR_PHYINT
1834#define E1000_IMS_EPRST E1000_ICR_EPRST 1747#define E1000_IMS_EPRST E1000_ICR_EPRST
1835 1748
1836/* Interrupt Mask Clear */ 1749/* Interrupt Mask Clear */
1837#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1750#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
1838#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ 1751#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
1839#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ 1752#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
1840#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 1753#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
1841#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 1754#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
1842#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ 1755#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
1843#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 1756#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
1844#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ 1757#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
1845#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ 1758#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
1846#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ 1759#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
1847#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ 1760#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
1848#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ 1761#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
1849#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1762#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1850#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1763#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
1851#define E1000_IMC_SRPD E1000_ICR_SRPD 1764#define E1000_IMC_SRPD E1000_ICR_SRPD
1852#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ 1765#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
1853#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ 1766#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
1854#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1767#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
1855#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ 1768#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1856#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ 1769#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1857#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ 1770#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1858#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1771#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1859#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1772#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1860#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1773#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1861#define E1000_IMC_DSW E1000_ICR_DSW 1774#define E1000_IMC_DSW E1000_ICR_DSW
1862#define E1000_IMC_PHYINT E1000_ICR_PHYINT 1775#define E1000_IMC_PHYINT E1000_ICR_PHYINT
1863#define E1000_IMC_EPRST E1000_ICR_EPRST 1776#define E1000_IMC_EPRST E1000_ICR_EPRST
1864 1777
1865/* Receive Control */ 1778/* Receive Control */
1866#define E1000_RCTL_RST 0x00000001 /* Software reset */ 1779#define E1000_RCTL_RST 0x00000001 /* Software reset */
1867#define E1000_RCTL_EN 0x00000002 /* enable */ 1780#define E1000_RCTL_EN 0x00000002 /* enable */
1868#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ 1781#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
1869#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ 1782#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
1870#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ 1783#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
1871#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ 1784#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
1872#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ 1785#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
1873#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1786#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
1874#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1787#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
1875#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1788#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
1876#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ 1789#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
1877#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 1790#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
1878#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1791#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
1879#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1792#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
1880#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ 1793#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
1881#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 1794#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
1882#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ 1795#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
1883#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ 1796#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
1884#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ 1797#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
1885#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ 1798#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
1886#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ 1799#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
1887#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 1800#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
1888/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ 1801/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
1889#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ 1802#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
1890#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ 1803#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
1891#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 1804#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
1892#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 1805#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
1893/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ 1806/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
1894#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ 1807#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
1895#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ 1808#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
1896#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ 1809#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
1897#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 1810#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
1898#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 1811#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
1899#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ 1812#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
1900#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ 1813#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
1901#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1814#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
1902#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1815#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
1903#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1816#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
1904#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ 1817#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
1905#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ 1818#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
1906 1819
1907/* Use byte values for the following shift parameters 1820/* Use byte values for the following shift parameters
1908 * Usage: 1821 * Usage:
@@ -1925,10 +1838,10 @@ struct e1000_hw {
1925#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 1838#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
1926#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 1839#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
1927 1840
1928#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ 1841#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
1929#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ 1842#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
1930#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ 1843#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
1931#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ 1844#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
1932 1845
1933/* SW_W_SYNC definitions */ 1846/* SW_W_SYNC definitions */
1934#define E1000_SWFW_EEP_SM 0x0001 1847#define E1000_SWFW_EEP_SM 0x0001
@@ -1937,17 +1850,17 @@ struct e1000_hw {
1937#define E1000_SWFW_MAC_CSR_SM 0x0008 1850#define E1000_SWFW_MAC_CSR_SM 0x0008
1938 1851
1939/* Receive Descriptor */ 1852/* Receive Descriptor */
1940#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ 1853#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
1941#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ 1854#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
1942#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ 1855#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
1943#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ 1856#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
1944#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ 1857#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
1945 1858
1946/* Flow Control */ 1859/* Flow Control */
1947#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ 1860#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
1948#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ 1861#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
1949#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1862#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
1950#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1863#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
1951 1864
1952/* Header split receive */ 1865/* Header split receive */
1953#define E1000_RFCTL_ISCSI_DIS 0x00000001 1866#define E1000_RFCTL_ISCSI_DIS 0x00000001
@@ -1967,66 +1880,64 @@ struct e1000_hw {
1967#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 1880#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
1968 1881
1969/* Receive Descriptor Control */ 1882/* Receive Descriptor Control */
1970#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1883#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
1971#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ 1884#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
1972#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ 1885#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
1973#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ 1886#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
1974 1887
1975/* Transmit Descriptor Control */ 1888/* Transmit Descriptor Control */
1976#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ 1889#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
1977#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ 1890#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
1978#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ 1891#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
1979#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1892#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
1980#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1893#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
1981#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1894#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1982#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1895#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1983 still to be processed. */ 1896 still to be processed. */
1984/* Transmit Configuration Word */ 1897/* Transmit Configuration Word */
1985#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1898#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1986#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1899#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
1987#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ 1900#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
1988#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ 1901#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
1989#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ 1902#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
1990#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ 1903#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
1991#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ 1904#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
1992#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ 1905#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
1993#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ 1906#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
1994#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ 1907#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
1995 1908
1996/* Receive Configuration Word */ 1909/* Receive Configuration Word */
1997#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ 1910#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
1998#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ 1911#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
1999#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ 1912#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
2000#define E1000_RXCW_CC 0x10000000 /* Receive config change */ 1913#define E1000_RXCW_CC 0x10000000 /* Receive config change */
2001#define E1000_RXCW_C 0x20000000 /* Receive config */ 1914#define E1000_RXCW_C 0x20000000 /* Receive config */
2002#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ 1915#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
2003#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ 1916#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
2004 1917
2005/* Transmit Control */ 1918/* Transmit Control */
2006#define E1000_TCTL_RST 0x00000001 /* software reset */ 1919#define E1000_TCTL_RST 0x00000001 /* software reset */
2007#define E1000_TCTL_EN 0x00000002 /* enable tx */ 1920#define E1000_TCTL_EN 0x00000002 /* enable tx */
2008#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ 1921#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
2009#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ 1922#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
2010#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ 1923#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
2011#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ 1924#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
2012#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ 1925#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
2013#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1926#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
2014#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1927#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
2015#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1928#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
2016#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ 1929#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
2017/* Extended Transmit Control */ 1930/* Extended Transmit Control */
2018#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ 1931#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
2019#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ 1932#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
2020
2021#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
2022 1933
2023/* Receive Checksum Control */ 1934/* Receive Checksum Control */
2024#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1935#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
2025#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1936#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
2026#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1937#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
2027#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1938#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
2028#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 1939#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
2029#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ 1940#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
2030 1941
2031/* Multiple Receive Queue Control */ 1942/* Multiple Receive Queue Control */
2032#define E1000_MRQC_ENABLE_MASK 0x00000003 1943#define E1000_MRQC_ENABLE_MASK 0x00000003
@@ -2042,141 +1953,141 @@ struct e1000_hw {
2042 1953
2043/* Definitions for power management and wakeup registers */ 1954/* Definitions for power management and wakeup registers */
2044/* Wake Up Control */ 1955/* Wake Up Control */
2045#define E1000_WUC_APME 0x00000001 /* APM Enable */ 1956#define E1000_WUC_APME 0x00000001 /* APM Enable */
2046#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ 1957#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
2047#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ 1958#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
2048#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ 1959#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
2049#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ 1960#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
2050 1961
2051/* Wake Up Filter Control */ 1962/* Wake Up Filter Control */
2052#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 1963#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
2053#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ 1964#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
2054#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ 1965#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
2055#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ 1966#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
2056#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 1967#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
2057#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1968#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
2058#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1969#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
2059#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1970#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
2060#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ 1971#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
2061#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1972#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
2062#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1973#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
2063#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ 1974#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
2064#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ 1975#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
2065#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ 1976#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
2066#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ 1977#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
2067#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ 1978#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
2068 1979
2069/* Wake Up Status */ 1980/* Wake Up Status */
2070#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ 1981#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
2071#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ 1982#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
2072#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ 1983#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
2073#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ 1984#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
2074#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ 1985#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
2075#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ 1986#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
2076#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ 1987#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
2077#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ 1988#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
2078#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ 1989#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
2079#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ 1990#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
2080#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ 1991#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
2081#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ 1992#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
2082#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ 1993#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
2083 1994
2084/* Management Control */ 1995/* Management Control */
2085#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 1996#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
2086#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 1997#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
2087#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ 1998#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
2088#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ 1999#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
2089#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ 2000#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
2090#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ 2001#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
2091#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ 2002#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
2092#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ 2003#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
2093#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 2004#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
2094#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 2005#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
2095 * Filtering */ 2006 * Filtering */
2096#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ 2007#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
2097#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 2008#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
2098#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 2009#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
2099#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 2010#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
2100#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ 2011#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
2101#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 2012#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
2102#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 2013#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
2103 * filtering */ 2014 * filtering */
2104#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 2015#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
2105 * memory */ 2016 * memory */
2106#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address 2017#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
2107 * filtering */ 2018 * filtering */
2108#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ 2019#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
2109#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ 2020#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
2110#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 2021#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
2111#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 2022#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
2112#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ 2023#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
2113#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ 2024#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
2114#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ 2025#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
2115#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ 2026#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
2116 2027
2117#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 2028#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
2118#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 2029#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
2119 2030
2120/* SW Semaphore Register */ 2031/* SW Semaphore Register */
2121#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 2032#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
2122#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 2033#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
2123#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 2034#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
2124#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ 2035#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
2125 2036
2126/* FW Semaphore Register */ 2037/* FW Semaphore Register */
2127#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ 2038#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
2128#define E1000_FWSM_MODE_SHIFT 1 2039#define E1000_FWSM_MODE_SHIFT 1
2129#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ 2040#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
2130 2041
2131#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ 2042#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
2132#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ 2043#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
2133#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ 2044#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
2134#define E1000_FWSM_SKUEL_SHIFT 29 2045#define E1000_FWSM_SKUEL_SHIFT 29
2135#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ 2046#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
2136#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ 2047#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
2137#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ 2048#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
2138#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ 2049#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
2139 2050
2140/* FFLT Debug Register */ 2051/* FFLT Debug Register */
2141#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ 2052#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
2142 2053
2143typedef enum { 2054typedef enum {
2144 e1000_mng_mode_none = 0, 2055 e1000_mng_mode_none = 0,
2145 e1000_mng_mode_asf, 2056 e1000_mng_mode_asf,
2146 e1000_mng_mode_pt, 2057 e1000_mng_mode_pt,
2147 e1000_mng_mode_ipmi, 2058 e1000_mng_mode_ipmi,
2148 e1000_mng_mode_host_interface_only 2059 e1000_mng_mode_host_interface_only
2149} e1000_mng_mode; 2060} e1000_mng_mode;
2150 2061
2151/* Host Inteface Control Register */ 2062/* Host Interface Control Register */
2152#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ 2063#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
2153#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done 2064#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
2154 * to put command in RAM */ 2065 * to put command in RAM */
2155#define E1000_HICR_SV 0x00000004 /* Status Validity */ 2066#define E1000_HICR_SV 0x00000004 /* Status Validity */
2156#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ 2067#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
2157 2068
2158/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ 2069/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
2159#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ 2070#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
2160#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ 2071#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
2161#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ 2072#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
2162#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ 2073#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
2163 2074
2164struct e1000_host_command_header { 2075struct e1000_host_command_header {
2165 u8 command_id; 2076 u8 command_id;
2166 u8 command_length; 2077 u8 command_length;
2167 u8 command_options; /* I/F bits for command, status for return */ 2078 u8 command_options; /* I/F bits for command, status for return */
2168 u8 checksum; 2079 u8 checksum;
2169}; 2080};
2170struct e1000_host_command_info { 2081struct e1000_host_command_info {
2171 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 2082 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
2172 u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ 2083 u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
2173}; 2084};
2174 2085
2175/* Host SMB register #0 */ 2086/* Host SMB register #0 */
2176#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ 2087#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
2177#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ 2088#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
2178#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ 2089#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
2179#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ 2090#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
2180 2091
2181/* Host SMB register #1 */ 2092/* Host SMB register #1 */
2182#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN 2093#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
@@ -2185,10 +2096,10 @@ struct e1000_host_command_info {
2185#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT 2096#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
2186 2097
2187/* FW Status Register */ 2098/* FW Status Register */
2188#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ 2099#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
2189 2100
2190/* Wake Up Packet Length */ 2101/* Wake Up Packet Length */
2191#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 2102#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
2192 2103
2193#define E1000_MDALIGN 4096 2104#define E1000_MDALIGN 4096
2194 2105
@@ -2242,24 +2153,24 @@ struct e1000_host_command_info {
2242#define PCI_EX_LINK_WIDTH_SHIFT 4 2153#define PCI_EX_LINK_WIDTH_SHIFT 4
2243 2154
2244/* EEPROM Commands - Microwire */ 2155/* EEPROM Commands - Microwire */
2245#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ 2156#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
2246#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ 2157#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
2247#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ 2158#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
2248#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ 2159#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
2249#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ 2160#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */
2250 2161
2251/* EEPROM Commands - SPI */ 2162/* EEPROM Commands - SPI */
2252#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 2163#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
2253#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ 2164#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
2254#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 2165#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
2255#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 2166#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
2256#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ 2167#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
2257#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ 2168#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
2258#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ 2169#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
2259#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ 2170#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
2260#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ 2171#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
2261#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ 2172#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
2262#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ 2173#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
2263 2174
2264/* EEPROM Size definitions */ 2175/* EEPROM Size definitions */
2265#define EEPROM_WORD_SIZE_SHIFT 6 2176#define EEPROM_WORD_SIZE_SHIFT 6
@@ -2270,7 +2181,7 @@ struct e1000_host_command_info {
2270#define EEPROM_COMPAT 0x0003 2181#define EEPROM_COMPAT 0x0003
2271#define EEPROM_ID_LED_SETTINGS 0x0004 2182#define EEPROM_ID_LED_SETTINGS 0x0004
2272#define EEPROM_VERSION 0x0005 2183#define EEPROM_VERSION 0x0005
2273#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ 2184#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
2274#define EEPROM_PHY_CLASS_WORD 0x0007 2185#define EEPROM_PHY_CLASS_WORD 0x0007
2275#define EEPROM_INIT_CONTROL1_REG 0x000A 2186#define EEPROM_INIT_CONTROL1_REG 0x000A
2276#define EEPROM_INIT_CONTROL2_REG 0x000F 2187#define EEPROM_INIT_CONTROL2_REG 0x000F
@@ -2283,22 +2194,16 @@ struct e1000_host_command_info {
2283#define EEPROM_FLASH_VERSION 0x0032 2194#define EEPROM_FLASH_VERSION 0x0032
2284#define EEPROM_CHECKSUM_REG 0x003F 2195#define EEPROM_CHECKSUM_REG 0x003F
2285 2196
2286#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ 2197#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
2287#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ 2198#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
2288 2199
2289/* Word definitions for ID LED Settings */ 2200/* Word definitions for ID LED Settings */
2290#define ID_LED_RESERVED_0000 0x0000 2201#define ID_LED_RESERVED_0000 0x0000
2291#define ID_LED_RESERVED_FFFF 0xFFFF 2202#define ID_LED_RESERVED_FFFF 0xFFFF
2292#define ID_LED_RESERVED_82573 0xF746
2293#define ID_LED_DEFAULT_82573 0x1811
2294#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ 2203#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
2295 (ID_LED_OFF1_OFF2 << 8) | \ 2204 (ID_LED_OFF1_OFF2 << 8) | \
2296 (ID_LED_DEF1_DEF2 << 4) | \ 2205 (ID_LED_DEF1_DEF2 << 4) | \
2297 (ID_LED_DEF1_DEF2)) 2206 (ID_LED_DEF1_DEF2))
2298#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
2299 (ID_LED_DEF1_OFF2 << 8) | \
2300 (ID_LED_DEF1_ON2 << 4) | \
2301 (ID_LED_DEF1_DEF2))
2302#define ID_LED_DEF1_DEF2 0x1 2207#define ID_LED_DEF1_DEF2 0x1
2303#define ID_LED_DEF1_ON2 0x2 2208#define ID_LED_DEF1_ON2 0x2
2304#define ID_LED_DEF1_OFF2 0x3 2209#define ID_LED_DEF1_OFF2 0x3
@@ -2313,7 +2218,6 @@ struct e1000_host_command_info {
2313#define IGP_ACTIVITY_LED_ENABLE 0x0300 2218#define IGP_ACTIVITY_LED_ENABLE 0x0300
2314#define IGP_LED3_MODE 0x07000000 2219#define IGP_LED3_MODE 0x07000000
2315 2220
2316
2317/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ 2221/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
2318#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F 2222#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
2319 2223
@@ -2384,11 +2288,8 @@ struct e1000_host_command_info {
2384 2288
2385#define DEFAULT_82542_TIPG_IPGR2 10 2289#define DEFAULT_82542_TIPG_IPGR2 10
2386#define DEFAULT_82543_TIPG_IPGR2 6 2290#define DEFAULT_82543_TIPG_IPGR2 6
2387#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
2388#define E1000_TIPG_IPGR2_SHIFT 20 2291#define E1000_TIPG_IPGR2_SHIFT 20
2389 2292
2390#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
2391#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008
2392#define E1000_TXDMAC_DPP 0x00000001 2293#define E1000_TXDMAC_DPP 0x00000001
2393 2294
2394/* Adaptive IFS defines */ 2295/* Adaptive IFS defines */
@@ -2421,9 +2322,9 @@ struct e1000_host_command_info {
2421#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 2322#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
2422 2323
2423/* PBA constants */ 2324/* PBA constants */
2424#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ 2325#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
2425#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ 2326#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
2426#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 2327#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
2427#define E1000_PBA_20K 0x0014 2328#define E1000_PBA_20K 0x0014
2428#define E1000_PBA_22K 0x0016 2329#define E1000_PBA_22K 0x0016
2429#define E1000_PBA_24K 0x0018 2330#define E1000_PBA_24K 0x0018
@@ -2432,7 +2333,7 @@ struct e1000_host_command_info {
2432#define E1000_PBA_34K 0x0022 2333#define E1000_PBA_34K 0x0022
2433#define E1000_PBA_38K 0x0026 2334#define E1000_PBA_38K 0x0026
2434#define E1000_PBA_40K 0x0028 2335#define E1000_PBA_40K 0x0028
2435#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ 2336#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
2436 2337
2437#define E1000_PBS_16K E1000_PBA_16K 2338#define E1000_PBS_16K E1000_PBA_16K
2438 2339
@@ -2442,9 +2343,9 @@ struct e1000_host_command_info {
2442#define FLOW_CONTROL_TYPE 0x8808 2343#define FLOW_CONTROL_TYPE 0x8808
2443 2344
2444/* The historical defaults for the flow control values are given below. */ 2345/* The historical defaults for the flow control values are given below. */
2445#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ 2346#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
2446#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ 2347#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
2447#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ 2348#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
2448 2349
2449/* PCIX Config space */ 2350/* PCIX Config space */
2450#define PCIX_COMMAND_REGISTER 0xE6 2351#define PCIX_COMMAND_REGISTER 0xE6
@@ -2458,7 +2359,6 @@ struct e1000_host_command_info {
2458#define PCIX_STATUS_HI_MMRBC_4K 0x3 2359#define PCIX_STATUS_HI_MMRBC_4K 0x3
2459#define PCIX_STATUS_HI_MMRBC_2K 0x2 2360#define PCIX_STATUS_HI_MMRBC_2K 0x2
2460 2361
2461
2462/* Number of bits required to shift right the "pause" bits from the 2362/* Number of bits required to shift right the "pause" bits from the
2463 * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. 2363 * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
2464 */ 2364 */
@@ -2479,14 +2379,11 @@ struct e1000_host_command_info {
2479 */ 2379 */
2480#define ILOS_SHIFT 3 2380#define ILOS_SHIFT 3
2481 2381
2482
2483#define RECEIVE_BUFFER_ALIGN_SIZE (256) 2382#define RECEIVE_BUFFER_ALIGN_SIZE (256)
2484 2383
2485/* Number of milliseconds we wait for auto-negotiation to complete */ 2384/* Number of milliseconds we wait for auto-negotiation to complete */
2486#define LINK_UP_TIMEOUT 500 2385#define LINK_UP_TIMEOUT 500
2487 2386
2488/* Number of 100 microseconds we wait for PCI Express master disable */
2489#define MASTER_DISABLE_TIMEOUT 800
2490/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ 2387/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
2491#define AUTO_READ_DONE_TIMEOUT 10 2388#define AUTO_READ_DONE_TIMEOUT 10
2492/* Number of milliseconds we wait for PHY configuration done after MAC reset */ 2389/* Number of milliseconds we wait for PHY configuration done after MAC reset */
@@ -2534,7 +2431,6 @@ struct e1000_host_command_info {
2534 (((length) > (adapter)->min_frame_size) && \ 2431 (((length) > (adapter)->min_frame_size) && \
2535 ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) 2432 ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
2536 2433
2537
2538/* Structures, enums, and macros for the PHY */ 2434/* Structures, enums, and macros for the PHY */
2539 2435
2540/* Bit definitions for the Management Data IO (MDIO) and Management Data 2436/* Bit definitions for the Management Data IO (MDIO) and Management Data
@@ -2551,49 +2447,49 @@ struct e1000_host_command_info {
2551 2447
2552/* PHY 1000 MII Register/Bit Definitions */ 2448/* PHY 1000 MII Register/Bit Definitions */
2553/* PHY Registers defined by IEEE */ 2449/* PHY Registers defined by IEEE */
2554#define PHY_CTRL 0x00 /* Control Register */ 2450#define PHY_CTRL 0x00 /* Control Register */
2555#define PHY_STATUS 0x01 /* Status Regiser */ 2451#define PHY_STATUS 0x01 /* Status Register */
2556#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ 2452#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
2557#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ 2453#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
2558#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ 2454#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
2559#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ 2455#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
2560#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ 2456#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
2561#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ 2457#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
2562#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ 2458#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
2563#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ 2459#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
2564#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ 2460#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
2565#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ 2461#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
2566 2462
2567#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ 2463#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
2568#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ 2464#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
2569 2465
2570/* M88E1000 Specific Registers */ 2466/* M88E1000 Specific Registers */
2571#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 2467#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
2572#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ 2468#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
2573#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ 2469#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
2574#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ 2470#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
2575#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ 2471#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
2576#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ 2472#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
2577 2473
2578#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ 2474#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
2579#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ 2475#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
2580#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ 2476#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
2581#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ 2477#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
2582#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ 2478#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
2583 2479
2584#define IGP01E1000_IEEE_REGS_PAGE 0x0000 2480#define IGP01E1000_IEEE_REGS_PAGE 0x0000
2585#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 2481#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
2586#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 2482#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
2587 2483
2588/* IGP01E1000 Specific Registers */ 2484/* IGP01E1000 Specific Registers */
2589#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ 2485#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
2590#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ 2486#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
2591#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ 2487#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
2592#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 2488#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
2593#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 2489#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
2594#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 2490#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
2595#define IGP02E1000_PHY_POWER_MGMT 0x19 2491#define IGP02E1000_PHY_POWER_MGMT 0x19
2596#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 2492#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
2597 2493
2598/* IGP01E1000 AGC Registers - stores the cable length values*/ 2494/* IGP01E1000 AGC Registers - stores the cable length values*/
2599#define IGP01E1000_PHY_AGC_A 0x1172 2495#define IGP01E1000_PHY_AGC_A 0x1172
@@ -2636,192 +2532,119 @@ struct e1000_host_command_info {
2636 2532
2637#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 2533#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
2638 2534
2639/* Bits...
2640 * 15-5: page
2641 * 4-0: register offset
2642 */
2643#define GG82563_PAGE_SHIFT 5
2644#define GG82563_REG(page, reg) \
2645 (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
2646#define GG82563_MIN_ALT_REG 30
2647
2648/* GG82563 Specific Registers */
2649#define GG82563_PHY_SPEC_CTRL \
2650 GG82563_REG(0, 16) /* PHY Specific Control */
2651#define GG82563_PHY_SPEC_STATUS \
2652 GG82563_REG(0, 17) /* PHY Specific Status */
2653#define GG82563_PHY_INT_ENABLE \
2654 GG82563_REG(0, 18) /* Interrupt Enable */
2655#define GG82563_PHY_SPEC_STATUS_2 \
2656 GG82563_REG(0, 19) /* PHY Specific Status 2 */
2657#define GG82563_PHY_RX_ERR_CNTR \
2658 GG82563_REG(0, 21) /* Receive Error Counter */
2659#define GG82563_PHY_PAGE_SELECT \
2660 GG82563_REG(0, 22) /* Page Select */
2661#define GG82563_PHY_SPEC_CTRL_2 \
2662 GG82563_REG(0, 26) /* PHY Specific Control 2 */
2663#define GG82563_PHY_PAGE_SELECT_ALT \
2664 GG82563_REG(0, 29) /* Alternate Page Select */
2665#define GG82563_PHY_TEST_CLK_CTRL \
2666 GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
2667
2668#define GG82563_PHY_MAC_SPEC_CTRL \
2669 GG82563_REG(2, 21) /* MAC Specific Control Register */
2670#define GG82563_PHY_MAC_SPEC_CTRL_2 \
2671 GG82563_REG(2, 26) /* MAC Specific Control 2 */
2672
2673#define GG82563_PHY_DSP_DISTANCE \
2674 GG82563_REG(5, 26) /* DSP Distance */
2675
2676/* Page 193 - Port Control Registers */
2677#define GG82563_PHY_KMRN_MODE_CTRL \
2678 GG82563_REG(193, 16) /* Kumeran Mode Control */
2679#define GG82563_PHY_PORT_RESET \
2680 GG82563_REG(193, 17) /* Port Reset */
2681#define GG82563_PHY_REVISION_ID \
2682 GG82563_REG(193, 18) /* Revision ID */
2683#define GG82563_PHY_DEVICE_ID \
2684 GG82563_REG(193, 19) /* Device ID */
2685#define GG82563_PHY_PWR_MGMT_CTRL \
2686 GG82563_REG(193, 20) /* Power Management Control */
2687#define GG82563_PHY_RATE_ADAPT_CTRL \
2688 GG82563_REG(193, 25) /* Rate Adaptation Control */
2689
2690/* Page 194 - KMRN Registers */
2691#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
2692 GG82563_REG(194, 16) /* FIFO's Control/Status */
2693#define GG82563_PHY_KMRN_CTRL \
2694 GG82563_REG(194, 17) /* Control */
2695#define GG82563_PHY_INBAND_CTRL \
2696 GG82563_REG(194, 18) /* Inband Control */
2697#define GG82563_PHY_KMRN_DIAGNOSTIC \
2698 GG82563_REG(194, 19) /* Diagnostic */
2699#define GG82563_PHY_ACK_TIMEOUTS \
2700 GG82563_REG(194, 20) /* Acknowledge Timeouts */
2701#define GG82563_PHY_ADV_ABILITY \
2702 GG82563_REG(194, 21) /* Advertised Ability */
2703#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
2704 GG82563_REG(194, 23) /* Link Partner Advertised Ability */
2705#define GG82563_PHY_ADV_NEXT_PAGE \
2706 GG82563_REG(194, 24) /* Advertised Next Page */
2707#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
2708 GG82563_REG(194, 25) /* Link Partner Advertised Next page */
2709#define GG82563_PHY_KMRN_MISC \
2710 GG82563_REG(194, 26) /* Misc. */
2711
2712/* PHY Control Register */ 2535/* PHY Control Register */
2713#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ 2536#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
2714#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ 2537#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
2715#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 2538#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
2716#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 2539#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
2717#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ 2540#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
2718#define MII_CR_POWER_DOWN 0x0800 /* Power down */ 2541#define MII_CR_POWER_DOWN 0x0800 /* Power down */
2719#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ 2542#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
2720#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ 2543#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
2721#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ 2544#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
2722#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ 2545#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
2723 2546
2724/* PHY Status Register */ 2547/* PHY Status Register */
2725#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ 2548#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
2726#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ 2549#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
2727#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ 2550#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
2728#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ 2551#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
2729#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ 2552#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
2730#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ 2553#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
2731#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ 2554#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
2732#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ 2555#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
2733#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ 2556#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
2734#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ 2557#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
2735#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ 2558#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
2736#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ 2559#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
2737#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ 2560#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
2738#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ 2561#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
2739#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ 2562#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
2740 2563
2741/* Autoneg Advertisement Register */ 2564/* Autoneg Advertisement Register */
2742#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ 2565#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
2743#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ 2566#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
2744#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ 2567#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
2745#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ 2568#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
2746#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ 2569#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
2747#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ 2570#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
2748#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ 2571#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
2749#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ 2572#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
2750#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ 2573#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
2751#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ 2574#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
2752 2575
2753/* Link Partner Ability Register (Base Page) */ 2576/* Link Partner Ability Register (Base Page) */
2754#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ 2577#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
2755#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ 2578#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
2756#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ 2579#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
2757#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ 2580#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
2758#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ 2581#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
2759#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ 2582#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
2760#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ 2583#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
2761#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 2584#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
2762#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ 2585#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
2763#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ 2586#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
2764#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ 2587#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
2765 2588
2766/* Autoneg Expansion Register */ 2589/* Autoneg Expansion Register */
2767#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ 2590#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
2768#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ 2591#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
2769#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ 2592#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
2770#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ 2593#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
2771#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ 2594#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
2772 2595
2773/* Next Page TX Register */ 2596/* Next Page TX Register */
2774#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ 2597#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
2775#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges 2598#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
2776 * of different NP 2599 * of different NP
2777 */ 2600 */
2778#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg 2601#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
2779 * 0 = cannot comply with msg 2602 * 0 = cannot comply with msg
2780 */ 2603 */
2781#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ 2604#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
2782#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow 2605#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
2783 * 0 = sending last NP 2606 * 0 = sending last NP
2784 */ 2607 */
2785 2608
2786/* Link Partner Next Page Register */ 2609/* Link Partner Next Page Register */
2787#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ 2610#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
2788#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges 2611#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
2789 * of different NP 2612 * of different NP
2790 */ 2613 */
2791#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg 2614#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
2792 * 0 = cannot comply with msg 2615 * 0 = cannot comply with msg
2793 */ 2616 */
2794#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ 2617#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
2795#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ 2618#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
2796#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow 2619#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
2797 * 0 = sending last NP 2620 * 0 = sending last NP
2798 */ 2621 */
2799 2622
2800/* 1000BASE-T Control Register */ 2623/* 1000BASE-T Control Register */
2801#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ 2624#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
2802#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 2625#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
2803#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 2626#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
2804#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ 2627#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
2805 /* 0=DTE device */ 2628 /* 0=DTE device */
2806#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ 2629#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
2807 /* 0=Configure PHY as Slave */ 2630 /* 0=Configure PHY as Slave */
2808#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ 2631#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
2809 /* 0=Automatic Master/Slave config */ 2632 /* 0=Automatic Master/Slave config */
2810#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ 2633#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
2811#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ 2634#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
2812#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ 2635#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
2813#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ 2636#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
2814#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ 2637#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
2815 2638
2816/* 1000BASE-T Status Register */ 2639/* 1000BASE-T Status Register */
2817#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ 2640#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
2818#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ 2641#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
2819#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ 2642#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
2820#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ 2643#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
2821#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ 2644#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
2822#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ 2645#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
2823#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ 2646#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
2824#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ 2647#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
2825#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 2648#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
2826#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 2649#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
2827#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 2650#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
@@ -2829,64 +2652,64 @@ struct e1000_host_command_info {
2829#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 2652#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
2830 2653
2831/* Extended Status Register */ 2654/* Extended Status Register */
2832#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ 2655#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
2833#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ 2656#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
2834#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ 2657#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
2835#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ 2658#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
2836 2659
2837#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ 2660#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
2838#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ 2661#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
2839 2662
2840#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ 2663#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
2841 /* (0=enable, 1=disable) */ 2664 /* (0=enable, 1=disable) */
2842 2665
2843/* M88E1000 PHY Specific Control Register */ 2666/* M88E1000 PHY Specific Control Register */
2844#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ 2667#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
2845#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ 2668#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
2846#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ 2669#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
2847#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 2670#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
2848 * 0=CLK125 toggling 2671 * 0=CLK125 toggling
2849 */ 2672 */
2850#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ 2673#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
2851 /* Manual MDI configuration */ 2674 /* Manual MDI configuration */
2852#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ 2675#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
2853#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 2676#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
2854 * 100BASE-TX/10BASE-T: 2677 * 100BASE-TX/10BASE-T:
2855 * MDI Mode 2678 * MDI Mode
2856 */ 2679 */
2857#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled 2680#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
2858 * all speeds. 2681 * all speeds.
2859 */ 2682 */
2860#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 2683#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
2861 /* 1=Enable Extended 10BASE-T distance 2684 /* 1=Enable Extended 10BASE-T distance
2862 * (Lower 10BASE-T RX Threshold) 2685 * (Lower 10BASE-T RX Threshold)
2863 * 0=Normal 10BASE-T RX Threshold */ 2686 * 0=Normal 10BASE-T RX Threshold */
2864#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 2687#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
2865 /* 1=5-Bit interface in 100BASE-TX 2688 /* 1=5-Bit interface in 100BASE-TX
2866 * 0=MII interface in 100BASE-TX */ 2689 * 0=MII interface in 100BASE-TX */
2867#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ 2690#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
2868#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ 2691#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
2869#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ 2692#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
2870 2693
2871#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 2694#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
2872#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 2695#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
2873#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 2696#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
2874 2697
2875/* M88E1000 PHY Specific Status Register */ 2698/* M88E1000 PHY Specific Status Register */
2876#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ 2699#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
2877#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 2700#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
2878#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 2701#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
2879#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 2702#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
2880#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; 2703#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
2881 * 3=110-140M;4=>140M */ 2704 * 3=110-140M;4=>140M */
2882#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ 2705#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
2883#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ 2706#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
2884#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ 2707#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
2885#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ 2708#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
2886#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ 2709#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
2887#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ 2710#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
2888#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ 2711#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
2889#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 2712#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
2890 2713
2891#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 2714#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
2892#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 2715#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
@@ -2894,12 +2717,12 @@ struct e1000_host_command_info {
2894#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 2717#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
2895 2718
2896/* M88E1000 Extended PHY Specific Control Register */ 2719/* M88E1000 Extended PHY Specific Control Register */
2897#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ 2720#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
2898#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. 2721#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
2899 * Will assert lost lock and bring 2722 * Will assert lost lock and bring
2900 * link down if idle not seen 2723 * link down if idle not seen
2901 * within 1ms in 1000BASE-T 2724 * within 1ms in 1000BASE-T
2902 */ 2725 */
2903/* Number of times we will attempt to autonegotiate before downshifting if we 2726/* Number of times we will attempt to autonegotiate before downshifting if we
2904 * are the master */ 2727 * are the master */
2905#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 2728#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
@@ -2914,9 +2737,9 @@ struct e1000_host_command_info {
2914#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 2737#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
2915#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 2738#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
2916#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 2739#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
2917#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ 2740#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
2918#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 2741#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
2919#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ 2742#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
2920 2743
2921/* M88EC018 Rev 2 specific DownShift settings */ 2744/* M88EC018 Rev 2 specific DownShift settings */
2922#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 2745#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
@@ -2938,18 +2761,18 @@ struct e1000_host_command_info {
2938#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 2761#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
2939 2762
2940/* IGP01E1000 Specific Port Status Register - R/O */ 2763/* IGP01E1000 Specific Port Status Register - R/O */
2941#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ 2764#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
2942#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 2765#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
2943#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C 2766#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
2944#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 2767#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
2945#define IGP01E1000_PSSR_LINK_UP 0x0400 2768#define IGP01E1000_PSSR_LINK_UP 0x0400
2946#define IGP01E1000_PSSR_MDIX 0x0800 2769#define IGP01E1000_PSSR_MDIX 0x0800
2947#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ 2770#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
2948#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 2771#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
2949#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 2772#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
2950#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 2773#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
2951#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ 2774#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
2952#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ 2775#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
2953 2776
2954/* IGP01E1000 Specific Port Control Register - R/W */ 2777/* IGP01E1000 Specific Port Control Register - R/W */
2955#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 2778#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
@@ -2957,16 +2780,16 @@ struct e1000_host_command_info {
2957#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 2780#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
2958#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 2781#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
2959#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 2782#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
2960#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ 2783#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
2961 2784
2962/* IGP01E1000 Specific Port Link Health Register */ 2785/* IGP01E1000 Specific Port Link Health Register */
2963#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 2786#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
2964#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 2787#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
2965#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 2788#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
2966#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 2789#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
2967#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ 2790#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
2968#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ 2791#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
2969#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ 2792#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
2970#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 2793#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
2971#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 2794#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
2972#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 2795#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
@@ -2981,9 +2804,9 @@ struct e1000_host_command_info {
2981#define IGP01E1000_MSE_CHANNEL_B 0x0F00 2804#define IGP01E1000_MSE_CHANNEL_B 0x0F00
2982#define IGP01E1000_MSE_CHANNEL_A 0xF000 2805#define IGP01E1000_MSE_CHANNEL_A 0xF000
2983 2806
2984#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ 2807#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
2985#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ 2808#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
2986#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ 2809#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
2987 2810
2988/* IGP01E1000 DSP reset macros */ 2811/* IGP01E1000 DSP reset macros */
2989#define DSP_RESET_ENABLE 0x0 2812#define DSP_RESET_ENABLE 0x0
@@ -2992,8 +2815,8 @@ struct e1000_host_command_info {
2992 2815
2993/* IGP01E1000 & IGP02E1000 AGC Registers */ 2816/* IGP01E1000 & IGP02E1000 AGC Registers */
2994 2817
2995#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2818#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
2996#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ 2819#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
2997 2820
2998/* IGP02E1000 AGC Register Length 9-bit mask */ 2821/* IGP02E1000 AGC Register Length 9-bit mask */
2999#define IGP02E1000_AGC_LENGTH_MASK 0x7F 2822#define IGP02E1000_AGC_LENGTH_MASK 0x7F
@@ -3011,9 +2834,9 @@ struct e1000_host_command_info {
3011#define IGP01E1000_PHY_POLARITY_MASK 0x0078 2834#define IGP01E1000_PHY_POLARITY_MASK 0x0078
3012 2835
3013/* IGP01E1000 GMII FIFO Register */ 2836/* IGP01E1000 GMII FIFO Register */
3014#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed 2837#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
3015 * on Link-Up */ 2838 * on Link-Up */
3016#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ 2839#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
3017 2840
3018/* IGP01E1000 Analog Register */ 2841/* IGP01E1000 Analog Register */
3019#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 2842#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
@@ -3032,114 +2855,6 @@ struct e1000_host_command_info {
3032#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 2855#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
3033#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 2856#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
3034 2857
3035/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
3036#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */
3037#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */
3038#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */
3039#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */
3040#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
3041#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */
3042#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */
3043#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */
3044#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */
3045#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300
3046#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */
3047#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */
3048#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */
3049#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */
3050#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */
3051#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000
3052#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12
3053
3054/* PHY Specific Status Register (Page 0, Register 17) */
3055#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */
3056#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */
3057#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */
3058#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */
3059#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */
3060#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */
3061#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */
3062#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */
3063#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */
3064#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
3065#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */
3066#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */
3067#define GG82563_PSSR_SPEED_MASK 0xC000
3068#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */
3069#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */
3070#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */
3071
3072/* PHY Specific Status Register 2 (Page 0, Register 19) */
3073#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */
3074#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */
3075#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
3076#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */
3077#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */
3078#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */
3079#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */
3080#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */
3081#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */
3082#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */
3083#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */
3084#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */
3085#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */
3086
3087/* PHY Specific Control Register 2 (Page 0, Register 26) */
3088#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */
3089#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C
3090#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */
3091#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */
3092#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */
3093#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */
3094#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */
3095#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000
3096#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */
3097#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */
3098
3099/* MAC Specific Control Register (Page 2, Register 21) */
3100/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
3101#define GG82563_MSCR_TX_CLK_MASK 0x0007
3102#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004
3103#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005
3104#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006
3105#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007
3106
3107#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
3108
3109/* DSP Distance Register (Page 5, Register 26) */
3110#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
3111 1 = 50-80M;
3112 2 = 80-110M;
3113 3 = 110-140M;
3114 4 = >140M */
3115
3116/* Kumeran Mode Control Register (Page 193, Register 16) */
3117#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
3118#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */
3119#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080
3120#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400
3121#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */
3122#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
3123
3124/* Power Management Control Register (Page 193, Register 20) */
3125#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */
3126#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */
3127#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */
3128#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */
3129#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
3130#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
3131#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
3132#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */
3133#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300
3134#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */
3135#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */
3136#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */
3137#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */
3138
3139/* In-Band Control Register (Page 194, Register 18) */
3140#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */
3141
3142
3143/* Bit definitions for valid PHY IDs. */ 2858/* Bit definitions for valid PHY IDs. */
3144/* I = Integrated 2859/* I = Integrated
3145 * E = External 2860 * E = External
@@ -3154,8 +2869,6 @@ struct e1000_host_command_info {
3154#define M88E1011_I_REV_4 0x04 2869#define M88E1011_I_REV_4 0x04
3155#define M88E1111_I_PHY_ID 0x01410CC0 2870#define M88E1111_I_PHY_ID 0x01410CC0
3156#define L1LXT971A_PHY_ID 0x001378E0 2871#define L1LXT971A_PHY_ID 0x001378E0
3157#define GG82563_E_PHY_ID 0x01410CA0
3158
3159 2872
3160/* Bits... 2873/* Bits...
3161 * 15-5: page 2874 * 15-5: page
@@ -3166,41 +2879,41 @@ struct e1000_host_command_info {
3166 (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) 2879 (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
3167 2880
3168#define IGP3_PHY_PORT_CTRL \ 2881#define IGP3_PHY_PORT_CTRL \
3169 PHY_REG(769, 17) /* Port General Configuration */ 2882 PHY_REG(769, 17) /* Port General Configuration */
3170#define IGP3_PHY_RATE_ADAPT_CTRL \ 2883#define IGP3_PHY_RATE_ADAPT_CTRL \
3171 PHY_REG(769, 25) /* Rate Adapter Control Register */ 2884 PHY_REG(769, 25) /* Rate Adapter Control Register */
3172 2885
3173#define IGP3_KMRN_FIFO_CTRL_STATS \ 2886#define IGP3_KMRN_FIFO_CTRL_STATS \
3174 PHY_REG(770, 16) /* KMRN FIFO's control/status register */ 2887 PHY_REG(770, 16) /* KMRN FIFO's control/status register */
3175#define IGP3_KMRN_POWER_MNG_CTRL \ 2888#define IGP3_KMRN_POWER_MNG_CTRL \
3176 PHY_REG(770, 17) /* KMRN Power Management Control Register */ 2889 PHY_REG(770, 17) /* KMRN Power Management Control Register */
3177#define IGP3_KMRN_INBAND_CTRL \ 2890#define IGP3_KMRN_INBAND_CTRL \
3178 PHY_REG(770, 18) /* KMRN Inband Control Register */ 2891 PHY_REG(770, 18) /* KMRN Inband Control Register */
3179#define IGP3_KMRN_DIAG \ 2892#define IGP3_KMRN_DIAG \
3180 PHY_REG(770, 19) /* KMRN Diagnostic register */ 2893 PHY_REG(770, 19) /* KMRN Diagnostic register */
3181#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ 2894#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
3182#define IGP3_KMRN_ACK_TIMEOUT \ 2895#define IGP3_KMRN_ACK_TIMEOUT \
3183 PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ 2896 PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
3184 2897
3185#define IGP3_VR_CTRL \ 2898#define IGP3_VR_CTRL \
3186 PHY_REG(776, 18) /* Voltage regulator control register */ 2899 PHY_REG(776, 18) /* Voltage regulator control register */
3187#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ 2900#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
3188#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ 2901#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
3189 2902
3190#define IGP3_CAPABILITY \ 2903#define IGP3_CAPABILITY \
3191 PHY_REG(776, 19) /* IGP3 Capability Register */ 2904 PHY_REG(776, 19) /* IGP3 Capability Register */
3192 2905
3193/* Capabilities for SKU Control */ 2906/* Capabilities for SKU Control */
3194#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ 2907#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
3195#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ 2908#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
3196#define IGP3_CAP_ASF 0x0004 /* Support ASF */ 2909#define IGP3_CAP_ASF 0x0004 /* Support ASF */
3197#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ 2910#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
3198#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ 2911#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
3199#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ 2912#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
3200#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ 2913#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
3201#define IGP3_CAP_RSS 0x0080 /* Support RSS */ 2914#define IGP3_CAP_RSS 0x0080 /* Support RSS */
3202#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ 2915#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
3203#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ 2916#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
3204 2917
3205#define IGP3_PPC_JORDAN_EN 0x0001 2918#define IGP3_PPC_JORDAN_EN 0x0001
3206#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 2919#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
@@ -3210,69 +2923,69 @@ struct e1000_host_command_info {
3210#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 2923#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
3211#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 2924#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
3212 2925
3213#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ 2926#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
3214#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ 2927#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
3215 2928
3216#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) 2929#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
3217#define IGP3_KMRN_EC_DIS_INBAND 0x0080 2930#define IGP3_KMRN_EC_DIS_INBAND 0x0080
3218 2931
3219#define IGP03E1000_E_PHY_ID 0x02A80390 2932#define IGP03E1000_E_PHY_ID 0x02A80390
3220#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ 2933#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
3221#define IFE_PLUS_E_PHY_ID 0x02A80320 2934#define IFE_PLUS_E_PHY_ID 0x02A80320
3222#define IFE_C_E_PHY_ID 0x02A80310 2935#define IFE_C_E_PHY_ID 0x02A80310
3223 2936
3224#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ 2937#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
3225#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ 2938#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
3226#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ 2939#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
3227#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */ 2940#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */
3228#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ 2941#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
3229#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ 2942#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
3230#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ 2943#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
3231#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ 2944#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
3232#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ 2945#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
3233#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ 2946#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
3234#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ 2947#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
3235#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ 2948#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
3236#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ 2949#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
3237 2950
3238#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */ 2951#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */
3239#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ 2952#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
3240#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ 2953#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
3241#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ 2954#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
3242#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ 2955#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
3243#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ 2956#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
3244#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ 2957#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
3245#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 2958#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
3246 2959
3247#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */ 2960#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */
3248#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ 2961#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
3249#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ 2962#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
3250#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ 2963#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
3251#define IFE_PSC_FORCE_POLARITY_SHIFT 5 2964#define IFE_PSC_FORCE_POLARITY_SHIFT 5
3252#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 2965#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
3253 2966
3254#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ 2967#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
3255#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ 2968#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
3256#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ 2969#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
3257#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ 2970#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
3258#define IFE_PMC_MDIX_MODE_SHIFT 6 2971#define IFE_PMC_MDIX_MODE_SHIFT 6
3259#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ 2972#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
3260 2973
3261#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ 2974#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
3262#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ 2975#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
3263#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ 2976#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
3264#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ 2977#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
3265#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ 2978#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
3266#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ 2979#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
3267#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ 2980#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
3268#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ 2981#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
3269#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ 2982#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
3270#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ 2983#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
3271#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ 2984#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
3272 2985
3273#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ 2986#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
3274#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ 2987#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
3275#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ 2988#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
3276#define ICH_FLASH_SEG_SIZE_256 256 2989#define ICH_FLASH_SEG_SIZE_256 256
3277#define ICH_FLASH_SEG_SIZE_4K 4096 2990#define ICH_FLASH_SEG_SIZE_4K 4096
3278#define ICH_FLASH_SEG_SIZE_64K 65536 2991#define ICH_FLASH_SEG_SIZE_64K 65536
@@ -3305,74 +3018,6 @@ struct e1000_host_command_info {
3305#define ICH_GFPREG_BASE_MASK 0x1FFF 3018#define ICH_GFPREG_BASE_MASK 0x1FFF
3306#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF 3019#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
3307 3020
3308/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
3309/* Offset 04h HSFSTS */
3310union ich8_hws_flash_status {
3311 struct ich8_hsfsts {
3312#ifdef __BIG_ENDIAN
3313 u16 reserved2 :6;
3314 u16 fldesvalid :1;
3315 u16 flockdn :1;
3316 u16 flcdone :1;
3317 u16 flcerr :1;
3318 u16 dael :1;
3319 u16 berasesz :2;
3320 u16 flcinprog :1;
3321 u16 reserved1 :2;
3322#else
3323 u16 flcdone :1; /* bit 0 Flash Cycle Done */
3324 u16 flcerr :1; /* bit 1 Flash Cycle Error */
3325 u16 dael :1; /* bit 2 Direct Access error Log */
3326 u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
3327 u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
3328 u16 reserved1 :2; /* bit 13:6 Reserved */
3329 u16 reserved2 :6; /* bit 13:6 Reserved */
3330 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
3331 u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
3332#endif
3333 } hsf_status;
3334 u16 regval;
3335};
3336
3337/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
3338/* Offset 06h FLCTL */
3339union ich8_hws_flash_ctrl {
3340 struct ich8_hsflctl {
3341#ifdef __BIG_ENDIAN
3342 u16 fldbcount :2;
3343 u16 flockdn :6;
3344 u16 flcgo :1;
3345 u16 flcycle :2;
3346 u16 reserved :5;
3347#else
3348 u16 flcgo :1; /* 0 Flash Cycle Go */
3349 u16 flcycle :2; /* 2:1 Flash Cycle */
3350 u16 reserved :5; /* 7:3 Reserved */
3351 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
3352 u16 flockdn :6; /* 15:10 Reserved */
3353#endif
3354 } hsf_ctrl;
3355 u16 regval;
3356};
3357
3358/* ICH8 Flash Region Access Permissions */
3359union ich8_hws_flash_regacc {
3360 struct ich8_flracc {
3361#ifdef __BIG_ENDIAN
3362 u32 gmwag :8;
3363 u32 gmrag :8;
3364 u32 grwa :8;
3365 u32 grra :8;
3366#else
3367 u32 grra :8; /* 0:7 GbE region Read Access */
3368 u32 grwa :8; /* 8:15 GbE region Write Access */
3369 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
3370 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
3371#endif
3372 } hsf_flregacc;
3373 u16 regval;
3374};
3375
3376/* Miscellaneous PHY bit definitions. */ 3021/* Miscellaneous PHY bit definitions. */
3377#define PHY_PREAMBLE 0xFFFFFFFF 3022#define PHY_PREAMBLE 0xFFFFFFFF
3378#define PHY_SOF 0x01 3023#define PHY_SOF 0x01
@@ -3384,10 +3029,10 @@ union ich8_hws_flash_regacc {
3384#define MII_CR_SPEED_100 0x2000 3029#define MII_CR_SPEED_100 0x2000
3385#define MII_CR_SPEED_10 0x0000 3030#define MII_CR_SPEED_10 0x0000
3386#define E1000_PHY_ADDRESS 0x01 3031#define E1000_PHY_ADDRESS 0x01
3387#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 3032#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
3388#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ 3033#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
3389#define PHY_REVISION_MASK 0xFFFFFFF0 3034#define PHY_REVISION_MASK 0xFFFFFFF0
3390#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ 3035#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
3391#define REG4_SPEED_MASK 0x01E0 3036#define REG4_SPEED_MASK 0x01E0
3392#define REG9_SPEED_MASK 0x0300 3037#define REG9_SPEED_MASK 0x0300
3393#define ADVERTISE_10_HALF 0x0001 3038#define ADVERTISE_10_HALF 0x0001
@@ -3396,8 +3041,8 @@ union ich8_hws_flash_regacc {
3396#define ADVERTISE_100_FULL 0x0008 3041#define ADVERTISE_100_FULL 0x0008
3397#define ADVERTISE_1000_HALF 0x0010 3042#define ADVERTISE_1000_HALF 0x0010
3398#define ADVERTISE_1000_FULL 0x0020 3043#define ADVERTISE_1000_FULL 0x0020
3399#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ 3044#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
3400#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/ 3045#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
3401#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/ 3046#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
3402 3047
3403#endif /* _E1000_HW_H_ */ 3048#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c66dd4f9437c..bcd192ca47b0 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k3-NAPI" 34#define DRV_VERSION "7.3.21-k5-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -131,7 +131,6 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
131static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 131static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
132static int e1000_set_mac(struct net_device *netdev, void *p); 132static int e1000_set_mac(struct net_device *netdev, void *p);
133static irqreturn_t e1000_intr(int irq, void *data); 133static irqreturn_t e1000_intr(int irq, void *data);
134static irqreturn_t e1000_intr_msi(int irq, void *data);
135static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 134static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
136 struct e1000_tx_ring *tx_ring); 135 struct e1000_tx_ring *tx_ring);
137static int e1000_clean(struct napi_struct *napi, int budget); 136static int e1000_clean(struct napi_struct *napi, int budget);
@@ -258,25 +257,14 @@ module_exit(e1000_exit_module);
258 257
259static int e1000_request_irq(struct e1000_adapter *adapter) 258static int e1000_request_irq(struct e1000_adapter *adapter)
260{ 259{
261 struct e1000_hw *hw = &adapter->hw;
262 struct net_device *netdev = adapter->netdev; 260 struct net_device *netdev = adapter->netdev;
263 irq_handler_t handler = e1000_intr; 261 irq_handler_t handler = e1000_intr;
264 int irq_flags = IRQF_SHARED; 262 int irq_flags = IRQF_SHARED;
265 int err; 263 int err;
266 264
267 if (hw->mac_type >= e1000_82571) {
268 adapter->have_msi = !pci_enable_msi(adapter->pdev);
269 if (adapter->have_msi) {
270 handler = e1000_intr_msi;
271 irq_flags = 0;
272 }
273 }
274
275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 265 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
276 netdev); 266 netdev);
277 if (err) { 267 if (err) {
278 if (adapter->have_msi)
279 pci_disable_msi(adapter->pdev);
280 DPRINTK(PROBE, ERR, 268 DPRINTK(PROBE, ERR,
281 "Unable to allocate interrupt Error: %d\n", err); 269 "Unable to allocate interrupt Error: %d\n", err);
282 } 270 }
@@ -289,9 +277,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
289 struct net_device *netdev = adapter->netdev; 277 struct net_device *netdev = adapter->netdev;
290 278
291 free_irq(adapter->pdev->irq, netdev); 279 free_irq(adapter->pdev->irq, netdev);
292
293 if (adapter->have_msi)
294 pci_disable_msi(adapter->pdev);
295} 280}
296 281
297/** 282/**
@@ -345,76 +330,6 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
345 } 330 }
346} 331}
347 332
348/**
349 * e1000_release_hw_control - release control of the h/w to f/w
350 * @adapter: address of board private structure
351 *
352 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
353 * For ASF and Pass Through versions of f/w this means that the
354 * driver is no longer loaded. For AMT version (only with 82573) i
355 * of the f/w this means that the network i/f is closed.
356 *
357 **/
358
359static void e1000_release_hw_control(struct e1000_adapter *adapter)
360{
361 u32 ctrl_ext;
362 u32 swsm;
363 struct e1000_hw *hw = &adapter->hw;
364
365 /* Let firmware taken over control of h/w */
366 switch (hw->mac_type) {
367 case e1000_82573:
368 swsm = er32(SWSM);
369 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
370 break;
371 case e1000_82571:
372 case e1000_82572:
373 case e1000_80003es2lan:
374 case e1000_ich8lan:
375 ctrl_ext = er32(CTRL_EXT);
376 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
377 break;
378 default:
379 break;
380 }
381}
382
383/**
384 * e1000_get_hw_control - get control of the h/w from f/w
385 * @adapter: address of board private structure
386 *
387 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
388 * For ASF and Pass Through versions of f/w this means that
389 * the driver is loaded. For AMT version (only with 82573)
390 * of the f/w this means that the network i/f is open.
391 *
392 **/
393
394static void e1000_get_hw_control(struct e1000_adapter *adapter)
395{
396 u32 ctrl_ext;
397 u32 swsm;
398 struct e1000_hw *hw = &adapter->hw;
399
400 /* Let firmware know the driver has taken over */
401 switch (hw->mac_type) {
402 case e1000_82573:
403 swsm = er32(SWSM);
404 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
405 break;
406 case e1000_82571:
407 case e1000_82572:
408 case e1000_80003es2lan:
409 case e1000_ich8lan:
410 ctrl_ext = er32(CTRL_EXT);
411 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
412 break;
413 default:
414 break;
415 }
416}
417
418static void e1000_init_manageability(struct e1000_adapter *adapter) 333static void e1000_init_manageability(struct e1000_adapter *adapter)
419{ 334{
420 struct e1000_hw *hw = &adapter->hw; 335 struct e1000_hw *hw = &adapter->hw;
@@ -425,20 +340,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
425 /* disable hardware interception of ARP */ 340 /* disable hardware interception of ARP */
426 manc &= ~(E1000_MANC_ARP_EN); 341 manc &= ~(E1000_MANC_ARP_EN);
427 342
428 /* enable receiving management packets to the host */
429 /* this will probably generate destination unreachable messages
430 * from the host OS, but the packets will be handled on SMBUS */
431 if (hw->has_manc2h) {
432 u32 manc2h = er32(MANC2H);
433
434 manc |= E1000_MANC_EN_MNG2HOST;
435#define E1000_MNG2HOST_PORT_623 (1 << 5)
436#define E1000_MNG2HOST_PORT_664 (1 << 6)
437 manc2h |= E1000_MNG2HOST_PORT_623;
438 manc2h |= E1000_MNG2HOST_PORT_664;
439 ew32(MANC2H, manc2h);
440 }
441
442 ew32(MANC, manc); 343 ew32(MANC, manc);
443 } 344 }
444} 345}
@@ -453,12 +354,6 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
453 /* re-enable hardware interception of ARP */ 354 /* re-enable hardware interception of ARP */
454 manc |= E1000_MANC_ARP_EN; 355 manc |= E1000_MANC_ARP_EN;
455 356
456 if (hw->has_manc2h)
457 manc &= ~E1000_MANC_EN_MNG2HOST;
458
459 /* don't explicitly have to mess with MANC2H since
460 * MANC has an enable disable that gates MANC2H */
461
462 ew32(MANC, manc); 357 ew32(MANC, manc);
463 } 358 }
464} 359}
@@ -563,15 +458,6 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
563 if (er32(MANC) & E1000_MANC_SMBUS_EN) 458 if (er32(MANC) & E1000_MANC_SMBUS_EN)
564 goto out; 459 goto out;
565 break; 460 break;
566 case e1000_82571:
567 case e1000_82572:
568 case e1000_82573:
569 case e1000_80003es2lan:
570 case e1000_ich8lan:
571 if (e1000_check_mng_mode(hw) ||
572 e1000_check_phy_reset_block(hw))
573 goto out;
574 break;
575 default: 461 default:
576 goto out; 462 goto out;
577 } 463 }
@@ -599,8 +485,7 @@ void e1000_down(struct e1000_adapter *adapter)
599 ew32(RCTL, rctl & ~E1000_RCTL_EN); 485 ew32(RCTL, rctl & ~E1000_RCTL_EN);
600 /* flush and sleep below */ 486 /* flush and sleep below */
601 487
602 /* can be netif_tx_disable when NETIF_F_LLTX is removed */ 488 netif_tx_disable(netdev);
603 netif_stop_queue(netdev);
604 489
605 /* disable transmits in the hardware */ 490 /* disable transmits in the hardware */
606 tctl = er32(TCTL); 491 tctl = er32(TCTL);
@@ -671,16 +556,6 @@ void e1000_reset(struct e1000_adapter *adapter)
671 legacy_pba_adjust = true; 556 legacy_pba_adjust = true;
672 pba = E1000_PBA_30K; 557 pba = E1000_PBA_30K;
673 break; 558 break;
674 case e1000_82571:
675 case e1000_82572:
676 case e1000_80003es2lan:
677 pba = E1000_PBA_38K;
678 break;
679 case e1000_82573:
680 pba = E1000_PBA_20K;
681 break;
682 case e1000_ich8lan:
683 pba = E1000_PBA_8K;
684 case e1000_undefined: 559 case e1000_undefined:
685 case e1000_num_macs: 560 case e1000_num_macs:
686 break; 561 break;
@@ -744,16 +619,8 @@ void e1000_reset(struct e1000_adapter *adapter)
744 619
745 /* if short on rx space, rx wins and must trump tx 620 /* if short on rx space, rx wins and must trump tx
746 * adjustment or use Early Receive if available */ 621 * adjustment or use Early Receive if available */
747 if (pba < min_rx_space) { 622 if (pba < min_rx_space)
748 switch (hw->mac_type) { 623 pba = min_rx_space;
749 case e1000_82573:
750 /* ERT enabled in e1000_configure_rx */
751 break;
752 default:
753 pba = min_rx_space;
754 break;
755 }
756 }
757 } 624 }
758 } 625 }
759 626
@@ -789,7 +656,6 @@ void e1000_reset(struct e1000_adapter *adapter)
789 656
790 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 657 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
791 if (hw->mac_type >= e1000_82544 && 658 if (hw->mac_type >= e1000_82544 &&
792 hw->mac_type <= e1000_82547_rev_2 &&
793 hw->autoneg == 1 && 659 hw->autoneg == 1 &&
794 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 660 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
795 u32 ctrl = er32(CTRL); 661 u32 ctrl = er32(CTRL);
@@ -806,20 +672,6 @@ void e1000_reset(struct e1000_adapter *adapter)
806 e1000_reset_adaptive(hw); 672 e1000_reset_adaptive(hw);
807 e1000_phy_get_info(hw, &adapter->phy_info); 673 e1000_phy_get_info(hw, &adapter->phy_info);
808 674
809 if (!adapter->smart_power_down &&
810 (hw->mac_type == e1000_82571 ||
811 hw->mac_type == e1000_82572)) {
812 u16 phy_data = 0;
813 /* speed up time to link by disabling smart power down, ignore
814 * the return value of this function because there is nothing
815 * different we would do if it failed */
816 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
817 &phy_data);
818 phy_data &= ~IGP02E1000_PM_SPD;
819 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
820 phy_data);
821 }
822
823 e1000_release_manageability(adapter); 675 e1000_release_manageability(adapter);
824} 676}
825 677
@@ -1046,17 +898,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1046 goto err_sw_init; 898 goto err_sw_init;
1047 899
1048 err = -EIO; 900 err = -EIO;
1049 /* Flash BAR mapping must happen after e1000_sw_init
1050 * because it depends on mac_type */
1051 if ((hw->mac_type == e1000_ich8lan) &&
1052 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1053 hw->flash_address = pci_ioremap_bar(pdev, 1);
1054 if (!hw->flash_address)
1055 goto err_flashmap;
1056 }
1057
1058 if (e1000_check_phy_reset_block(hw))
1059 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
1060 901
1061 if (hw->mac_type >= e1000_82543) { 902 if (hw->mac_type >= e1000_82543) {
1062 netdev->features = NETIF_F_SG | 903 netdev->features = NETIF_F_SG |
@@ -1064,21 +905,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1064 NETIF_F_HW_VLAN_TX | 905 NETIF_F_HW_VLAN_TX |
1065 NETIF_F_HW_VLAN_RX | 906 NETIF_F_HW_VLAN_RX |
1066 NETIF_F_HW_VLAN_FILTER; 907 NETIF_F_HW_VLAN_FILTER;
1067 if (hw->mac_type == e1000_ich8lan)
1068 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1069 } 908 }
1070 909
1071 if ((hw->mac_type >= e1000_82544) && 910 if ((hw->mac_type >= e1000_82544) &&
1072 (hw->mac_type != e1000_82547)) 911 (hw->mac_type != e1000_82547))
1073 netdev->features |= NETIF_F_TSO; 912 netdev->features |= NETIF_F_TSO;
1074 913
1075 if (hw->mac_type > e1000_82547_rev_2)
1076 netdev->features |= NETIF_F_TSO6;
1077 if (pci_using_dac) 914 if (pci_using_dac)
1078 netdev->features |= NETIF_F_HIGHDMA; 915 netdev->features |= NETIF_F_HIGHDMA;
1079 916
1080 netdev->vlan_features |= NETIF_F_TSO; 917 netdev->vlan_features |= NETIF_F_TSO;
1081 netdev->vlan_features |= NETIF_F_TSO6;
1082 netdev->vlan_features |= NETIF_F_HW_CSUM; 918 netdev->vlan_features |= NETIF_F_HW_CSUM;
1083 netdev->vlan_features |= NETIF_F_SG; 919 netdev->vlan_features |= NETIF_F_SG;
1084 920
@@ -1153,15 +989,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 989 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM; 990 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155 break; 991 break;
1156 case e1000_ich8lan:
1157 e1000_read_eeprom(hw,
1158 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1159 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1160 break;
1161 case e1000_82546: 992 case e1000_82546:
1162 case e1000_82546_rev_3: 993 case e1000_82546_rev_3:
1163 case e1000_82571:
1164 case e1000_80003es2lan:
1165 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 994 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1166 e1000_read_eeprom(hw, 995 e1000_read_eeprom(hw,
1167 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 996 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -1185,17 +1014,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1185 break; 1014 break;
1186 case E1000_DEV_ID_82546EB_FIBER: 1015 case E1000_DEV_ID_82546EB_FIBER:
1187 case E1000_DEV_ID_82546GB_FIBER: 1016 case E1000_DEV_ID_82546GB_FIBER:
1188 case E1000_DEV_ID_82571EB_FIBER:
1189 /* Wake events only supported on port A for dual fiber 1017 /* Wake events only supported on port A for dual fiber
1190 * regardless of eeprom setting */ 1018 * regardless of eeprom setting */
1191 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1019 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1192 adapter->eeprom_wol = 0; 1020 adapter->eeprom_wol = 0;
1193 break; 1021 break;
1194 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1022 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1195 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1196 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1197 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1198 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1199 /* if quad port adapter, disable WoL on all but port A */ 1023 /* if quad port adapter, disable WoL on all but port A */
1200 if (global_quad_port_a != 0) 1024 if (global_quad_port_a != 0)
1201 adapter->eeprom_wol = 0; 1025 adapter->eeprom_wol = 0;
@@ -1213,39 +1037,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1213 1037
1214 /* print bus type/speed/width info */ 1038 /* print bus type/speed/width info */
1215 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1039 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1216 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : 1040 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1217 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), 1041 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1218 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1219 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1220 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1042 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1221 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : 1043 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1222 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1044 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1223 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : 1045 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
1224 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1225 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1226 "32-bit"));
1227 1046
1228 printk("%pM\n", netdev->dev_addr); 1047 printk("%pM\n", netdev->dev_addr);
1229 1048
1230 if (hw->bus_type == e1000_bus_type_pci_express) {
1231 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1232 "longer be supported by this driver in the future.\n",
1233 pdev->vendor, pdev->device);
1234 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1235 "driver instead.\n");
1236 }
1237
1238 /* reset the hardware with the new settings */ 1049 /* reset the hardware with the new settings */
1239 e1000_reset(adapter); 1050 e1000_reset(adapter);
1240 1051
1241 /* If the controller is 82573 and f/w is AMT, do not set
1242 * DRV_LOAD until the interface is up. For all other cases,
1243 * let the f/w know that the h/w is now under the control
1244 * of the driver. */
1245 if (hw->mac_type != e1000_82573 ||
1246 !e1000_check_mng_mode(hw))
1247 e1000_get_hw_control(adapter);
1248
1249 strcpy(netdev->name, "eth%d"); 1052 strcpy(netdev->name, "eth%d");
1250 err = register_netdev(netdev); 1053 err = register_netdev(netdev);
1251 if (err) 1054 if (err)
@@ -1260,14 +1063,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1260 return 0; 1063 return 0;
1261 1064
1262err_register: 1065err_register:
1263 e1000_release_hw_control(adapter);
1264err_eeprom: 1066err_eeprom:
1265 if (!e1000_check_phy_reset_block(hw)) 1067 e1000_phy_hw_reset(hw);
1266 e1000_phy_hw_reset(hw);
1267 1068
1268 if (hw->flash_address) 1069 if (hw->flash_address)
1269 iounmap(hw->flash_address); 1070 iounmap(hw->flash_address);
1270err_flashmap:
1271 kfree(adapter->tx_ring); 1071 kfree(adapter->tx_ring);
1272 kfree(adapter->rx_ring); 1072 kfree(adapter->rx_ring);
1273err_sw_init: 1073err_sw_init:
@@ -1298,18 +1098,18 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1298 struct e1000_adapter *adapter = netdev_priv(netdev); 1098 struct e1000_adapter *adapter = netdev_priv(netdev);
1299 struct e1000_hw *hw = &adapter->hw; 1099 struct e1000_hw *hw = &adapter->hw;
1300 1100
1101 set_bit(__E1000_DOWN, &adapter->flags);
1102 del_timer_sync(&adapter->tx_fifo_stall_timer);
1103 del_timer_sync(&adapter->watchdog_timer);
1104 del_timer_sync(&adapter->phy_info_timer);
1105
1301 cancel_work_sync(&adapter->reset_task); 1106 cancel_work_sync(&adapter->reset_task);
1302 1107
1303 e1000_release_manageability(adapter); 1108 e1000_release_manageability(adapter);
1304 1109
1305 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1306 * would have already happened in close and is redundant. */
1307 e1000_release_hw_control(adapter);
1308
1309 unregister_netdev(netdev); 1110 unregister_netdev(netdev);
1310 1111
1311 if (!e1000_check_phy_reset_block(hw)) 1112 e1000_phy_hw_reset(hw);
1312 e1000_phy_hw_reset(hw);
1313 1113
1314 kfree(adapter->tx_ring); 1114 kfree(adapter->tx_ring);
1315 kfree(adapter->rx_ring); 1115 kfree(adapter->rx_ring);
@@ -1472,12 +1272,6 @@ static int e1000_open(struct net_device *netdev)
1472 e1000_update_mng_vlan(adapter); 1272 e1000_update_mng_vlan(adapter);
1473 } 1273 }
1474 1274
1475 /* If AMT is enabled, let the firmware know that the network
1476 * interface is now open */
1477 if (hw->mac_type == e1000_82573 &&
1478 e1000_check_mng_mode(hw))
1479 e1000_get_hw_control(adapter);
1480
1481 /* before we allocate an interrupt, we must be ready to handle it. 1275 /* before we allocate an interrupt, we must be ready to handle it.
1482 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1276 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1483 * as soon as we call pci_request_irq, so we have to setup our 1277 * as soon as we call pci_request_irq, so we have to setup our
@@ -1503,7 +1297,6 @@ static int e1000_open(struct net_device *netdev)
1503 return E1000_SUCCESS; 1297 return E1000_SUCCESS;
1504 1298
1505err_req_irq: 1299err_req_irq:
1506 e1000_release_hw_control(adapter);
1507 e1000_power_down_phy(adapter); 1300 e1000_power_down_phy(adapter);
1508 e1000_free_all_rx_resources(adapter); 1301 e1000_free_all_rx_resources(adapter);
1509err_setup_rx: 1302err_setup_rx:
@@ -1548,12 +1341,6 @@ static int e1000_close(struct net_device *netdev)
1548 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1341 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1549 } 1342 }
1550 1343
1551 /* If AMT is enabled, let the firmware know that the network
1552 * interface is now closed */
1553 if (hw->mac_type == e1000_82573 &&
1554 e1000_check_mng_mode(hw))
1555 e1000_release_hw_control(adapter);
1556
1557 return 0; 1344 return 0;
1558} 1345}
1559 1346
@@ -1692,7 +1479,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1692{ 1479{
1693 u64 tdba; 1480 u64 tdba;
1694 struct e1000_hw *hw = &adapter->hw; 1481 struct e1000_hw *hw = &adapter->hw;
1695 u32 tdlen, tctl, tipg, tarc; 1482 u32 tdlen, tctl, tipg;
1696 u32 ipgr1, ipgr2; 1483 u32 ipgr1, ipgr2;
1697 1484
1698 /* Setup the HW Tx Head and Tail descriptor pointers */ 1485 /* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1714,8 +1501,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1714 } 1501 }
1715 1502
1716 /* Set the default values for the Tx Inter Packet Gap timer */ 1503 /* Set the default values for the Tx Inter Packet Gap timer */
1717 if (hw->mac_type <= e1000_82547_rev_2 && 1504 if ((hw->media_type == e1000_media_type_fiber ||
1718 (hw->media_type == e1000_media_type_fiber ||
1719 hw->media_type == e1000_media_type_internal_serdes)) 1505 hw->media_type == e1000_media_type_internal_serdes))
1720 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1506 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1721 else 1507 else
@@ -1728,10 +1514,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1728 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1514 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1729 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1515 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1730 break; 1516 break;
1731 case e1000_80003es2lan:
1732 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1733 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1734 break;
1735 default: 1517 default:
1736 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1518 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1737 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1519 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
@@ -1754,21 +1536,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1754 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1536 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1755 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1537 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1756 1538
1757 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1758 tarc = er32(TARC0);
1759 /* set the speed mode bit, we'll clear it if we're not at
1760 * gigabit link later */
1761 tarc |= (1 << 21);
1762 ew32(TARC0, tarc);
1763 } else if (hw->mac_type == e1000_80003es2lan) {
1764 tarc = er32(TARC0);
1765 tarc |= 1;
1766 ew32(TARC0, tarc);
1767 tarc = er32(TARC1);
1768 tarc |= 1;
1769 ew32(TARC1, tarc);
1770 }
1771
1772 e1000_config_collision_dist(hw); 1539 e1000_config_collision_dist(hw);
1773 1540
1774 /* Setup Transmit Descriptor Settings for eop descriptor */ 1541 /* Setup Transmit Descriptor Settings for eop descriptor */
@@ -1804,7 +1571,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1804static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1571static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1805 struct e1000_rx_ring *rxdr) 1572 struct e1000_rx_ring *rxdr)
1806{ 1573{
1807 struct e1000_hw *hw = &adapter->hw;
1808 struct pci_dev *pdev = adapter->pdev; 1574 struct pci_dev *pdev = adapter->pdev;
1809 int size, desc_len; 1575 int size, desc_len;
1810 1576
@@ -1817,10 +1583,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1817 } 1583 }
1818 memset(rxdr->buffer_info, 0, size); 1584 memset(rxdr->buffer_info, 0, size);
1819 1585
1820 if (hw->mac_type <= e1000_82547_rev_2) 1586 desc_len = sizeof(struct e1000_rx_desc);
1821 desc_len = sizeof(struct e1000_rx_desc);
1822 else
1823 desc_len = sizeof(union e1000_rx_desc_packet_split);
1824 1587
1825 /* Round up to nearest 4K */ 1588 /* Round up to nearest 4K */
1826 1589
@@ -1977,7 +1740,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1977{ 1740{
1978 u64 rdba; 1741 u64 rdba;
1979 struct e1000_hw *hw = &adapter->hw; 1742 struct e1000_hw *hw = &adapter->hw;
1980 u32 rdlen, rctl, rxcsum, ctrl_ext; 1743 u32 rdlen, rctl, rxcsum;
1981 1744
1982 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1745 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1983 rdlen = adapter->rx_ring[0].count * 1746 rdlen = adapter->rx_ring[0].count *
@@ -2004,17 +1767,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2004 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1767 ew32(ITR, 1000000000 / (adapter->itr * 256));
2005 } 1768 }
2006 1769
2007 if (hw->mac_type >= e1000_82571) {
2008 ctrl_ext = er32(CTRL_EXT);
2009 /* Reset delay timers after every interrupt */
2010 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2011 /* Auto-Mask interrupts upon ICR access */
2012 ctrl_ext |= E1000_CTRL_EXT_IAME;
2013 ew32(IAM, 0xffffffff);
2014 ew32(CTRL_EXT, ctrl_ext);
2015 E1000_WRITE_FLUSH();
2016 }
2017
2018 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1770 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2019 * the Base and Length of the Rx Descriptor Ring */ 1771 * the Base and Length of the Rx Descriptor Ring */
2020 switch (adapter->num_rx_queues) { 1772 switch (adapter->num_rx_queues) {
@@ -2329,22 +2081,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2329 2081
2330 e1000_rar_set(hw, hw->mac_addr, 0); 2082 e1000_rar_set(hw, hw->mac_addr, 0);
2331 2083
2332 /* With 82571 controllers, LAA may be overwritten (with the default)
2333 * due to controller reset from the other port. */
2334 if (hw->mac_type == e1000_82571) {
2335 /* activate the work around */
2336 hw->laa_is_present = 1;
2337
2338 /* Hold a copy of the LAA in RAR[14] This is done so that
2339 * between the time RAR[0] gets clobbered and the time it
2340 * gets fixed (in e1000_watchdog), the actual LAA is in one
2341 * of the RARs and no incoming packets directed to this port
2342 * are dropped. Eventaully the LAA will be in RAR[0] and
2343 * RAR[14] */
2344 e1000_rar_set(hw, hw->mac_addr,
2345 E1000_RAR_ENTRIES - 1);
2346 }
2347
2348 if (hw->mac_type == e1000_82542_rev2_0) 2084 if (hw->mac_type == e1000_82542_rev2_0)
2349 e1000_leave_82542_rst(adapter); 2085 e1000_leave_82542_rst(adapter);
2350 2086
@@ -2371,9 +2107,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2371 u32 rctl; 2107 u32 rctl;
2372 u32 hash_value; 2108 u32 hash_value;
2373 int i, rar_entries = E1000_RAR_ENTRIES; 2109 int i, rar_entries = E1000_RAR_ENTRIES;
2374 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? 2110 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2375 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2376 E1000_NUM_MTA_REGISTERS;
2377 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2111 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2378 2112
2379 if (!mcarray) { 2113 if (!mcarray) {
@@ -2381,13 +2115,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2381 return; 2115 return;
2382 } 2116 }
2383 2117
2384 if (hw->mac_type == e1000_ich8lan)
2385 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2386
2387 /* reserve RAR[14] for LAA over-write work-around */
2388 if (hw->mac_type == e1000_82571)
2389 rar_entries--;
2390
2391 /* Check for Promiscuous and All Multicast modes */ 2118 /* Check for Promiscuous and All Multicast modes */
2392 2119
2393 rctl = er32(RCTL); 2120 rctl = er32(RCTL);
@@ -2396,15 +2123,13 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2396 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2123 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2397 rctl &= ~E1000_RCTL_VFE; 2124 rctl &= ~E1000_RCTL_VFE;
2398 } else { 2125 } else {
2399 if (netdev->flags & IFF_ALLMULTI) { 2126 if (netdev->flags & IFF_ALLMULTI)
2400 rctl |= E1000_RCTL_MPE; 2127 rctl |= E1000_RCTL_MPE;
2401 } else { 2128 else
2402 rctl &= ~E1000_RCTL_MPE; 2129 rctl &= ~E1000_RCTL_MPE;
2403 } 2130 /* Enable VLAN filter if there is a VLAN */
2404 if (adapter->hw.mac_type != e1000_ich8lan) 2131 if (adapter->vlgrp)
2405 /* Enable VLAN filter if there is a VLAN */ 2132 rctl |= E1000_RCTL_VFE;
2406 if (adapter->vlgrp)
2407 rctl |= E1000_RCTL_VFE;
2408 } 2133 }
2409 2134
2410 if (netdev->uc.count > rar_entries - 1) { 2135 if (netdev->uc.count > rar_entries - 1) {
@@ -2427,7 +2152,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2427 * 2152 *
2428 * RAR 0 is used for the station MAC adddress 2153 * RAR 0 is used for the station MAC adddress
2429 * if there are not 14 addresses, go ahead and clear the filters 2154 * if there are not 14 addresses, go ahead and clear the filters
2430 * -- with 82571 controllers only 0-13 entries are filled here
2431 */ 2155 */
2432 i = 1; 2156 i = 1;
2433 if (use_uc) 2157 if (use_uc)
@@ -2521,12 +2245,46 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
2521 adapter->tx_fifo_head = 0; 2245 adapter->tx_fifo_head = 0;
2522 atomic_set(&adapter->tx_fifo_stall, 0); 2246 atomic_set(&adapter->tx_fifo_stall, 0);
2523 netif_wake_queue(netdev); 2247 netif_wake_queue(netdev);
2524 } else { 2248 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2525 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2249 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2526 } 2250 }
2527 } 2251 }
2528} 2252}
2529 2253
2254static bool e1000_has_link(struct e1000_adapter *adapter)
2255{
2256 struct e1000_hw *hw = &adapter->hw;
2257 bool link_active = false;
2258
2259 /* get_link_status is set on LSC (link status) interrupt or
2260 * rx sequence error interrupt. get_link_status will stay
2261 * false until the e1000_check_for_link establishes link
2262 * for copper adapters ONLY
2263 */
2264 switch (hw->media_type) {
2265 case e1000_media_type_copper:
2266 if (hw->get_link_status) {
2267 e1000_check_for_link(hw);
2268 link_active = !hw->get_link_status;
2269 } else {
2270 link_active = true;
2271 }
2272 break;
2273 case e1000_media_type_fiber:
2274 e1000_check_for_link(hw);
2275 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2276 break;
2277 case e1000_media_type_internal_serdes:
2278 e1000_check_for_link(hw);
2279 link_active = hw->serdes_has_link;
2280 break;
2281 default:
2282 break;
2283 }
2284
2285 return link_active;
2286}
2287
2530/** 2288/**
2531 * e1000_watchdog - Timer Call-back 2289 * e1000_watchdog - Timer Call-back
2532 * @data: pointer to adapter cast into an unsigned long 2290 * @data: pointer to adapter cast into an unsigned long
@@ -2538,33 +2296,16 @@ static void e1000_watchdog(unsigned long data)
2538 struct net_device *netdev = adapter->netdev; 2296 struct net_device *netdev = adapter->netdev;
2539 struct e1000_tx_ring *txdr = adapter->tx_ring; 2297 struct e1000_tx_ring *txdr = adapter->tx_ring;
2540 u32 link, tctl; 2298 u32 link, tctl;
2541 s32 ret_val;
2542
2543 ret_val = e1000_check_for_link(hw);
2544 if ((ret_val == E1000_ERR_PHY) &&
2545 (hw->phy_type == e1000_phy_igp_3) &&
2546 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2547 /* See e1000_kumeran_lock_loss_workaround() */
2548 DPRINTK(LINK, INFO,
2549 "Gigabit has been disabled, downgrading speed\n");
2550 }
2551 2299
2552 if (hw->mac_type == e1000_82573) { 2300 link = e1000_has_link(adapter);
2553 e1000_enable_tx_pkt_filtering(hw); 2301 if ((netif_carrier_ok(netdev)) && link)
2554 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id) 2302 goto link_up;
2555 e1000_update_mng_vlan(adapter);
2556 }
2557
2558 if ((hw->media_type == e1000_media_type_internal_serdes) &&
2559 !(er32(TXCW) & E1000_TXCW_ANE))
2560 link = !hw->serdes_link_down;
2561 else
2562 link = er32(STATUS) & E1000_STATUS_LU;
2563 2303
2564 if (link) { 2304 if (link) {
2565 if (!netif_carrier_ok(netdev)) { 2305 if (!netif_carrier_ok(netdev)) {
2566 u32 ctrl; 2306 u32 ctrl;
2567 bool txb2b = true; 2307 bool txb2b = true;
2308 /* update snapshot of PHY registers on LSC */
2568 e1000_get_speed_and_duplex(hw, 2309 e1000_get_speed_and_duplex(hw,
2569 &adapter->link_speed, 2310 &adapter->link_speed,
2570 &adapter->link_duplex); 2311 &adapter->link_duplex);
@@ -2589,7 +2330,7 @@ static void e1000_watchdog(unsigned long data)
2589 case SPEED_10: 2330 case SPEED_10:
2590 txb2b = false; 2331 txb2b = false;
2591 netdev->tx_queue_len = 10; 2332 netdev->tx_queue_len = 10;
2592 adapter->tx_timeout_factor = 8; 2333 adapter->tx_timeout_factor = 16;
2593 break; 2334 break;
2594 case SPEED_100: 2335 case SPEED_100:
2595 txb2b = false; 2336 txb2b = false;
@@ -2598,52 +2339,16 @@ static void e1000_watchdog(unsigned long data)
2598 break; 2339 break;
2599 } 2340 }
2600 2341
2601 if ((hw->mac_type == e1000_82571 || 2342 /* enable transmits in the hardware */
2602 hw->mac_type == e1000_82572) &&
2603 !txb2b) {
2604 u32 tarc0;
2605 tarc0 = er32(TARC0);
2606 tarc0 &= ~(1 << 21);
2607 ew32(TARC0, tarc0);
2608 }
2609
2610 /* disable TSO for pcie and 10/100 speeds, to avoid
2611 * some hardware issues */
2612 if (!adapter->tso_force &&
2613 hw->bus_type == e1000_bus_type_pci_express){
2614 switch (adapter->link_speed) {
2615 case SPEED_10:
2616 case SPEED_100:
2617 DPRINTK(PROBE,INFO,
2618 "10/100 speed: disabling TSO\n");
2619 netdev->features &= ~NETIF_F_TSO;
2620 netdev->features &= ~NETIF_F_TSO6;
2621 break;
2622 case SPEED_1000:
2623 netdev->features |= NETIF_F_TSO;
2624 netdev->features |= NETIF_F_TSO6;
2625 break;
2626 default:
2627 /* oops */
2628 break;
2629 }
2630 }
2631
2632 /* enable transmits in the hardware, need to do this
2633 * after setting TARC0 */
2634 tctl = er32(TCTL); 2343 tctl = er32(TCTL);
2635 tctl |= E1000_TCTL_EN; 2344 tctl |= E1000_TCTL_EN;
2636 ew32(TCTL, tctl); 2345 ew32(TCTL, tctl);
2637 2346
2638 netif_carrier_on(netdev); 2347 netif_carrier_on(netdev);
2639 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2348 if (!test_bit(__E1000_DOWN, &adapter->flags))
2349 mod_timer(&adapter->phy_info_timer,
2350 round_jiffies(jiffies + 2 * HZ));
2640 adapter->smartspeed = 0; 2351 adapter->smartspeed = 0;
2641 } else {
2642 /* make sure the receive unit is started */
2643 if (hw->rx_needs_kicking) {
2644 u32 rctl = er32(RCTL);
2645 ew32(RCTL, rctl | E1000_RCTL_EN);
2646 }
2647 } 2352 }
2648 } else { 2353 } else {
2649 if (netif_carrier_ok(netdev)) { 2354 if (netif_carrier_ok(netdev)) {
@@ -2652,21 +2357,16 @@ static void e1000_watchdog(unsigned long data)
2652 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2357 printk(KERN_INFO "e1000: %s NIC Link is Down\n",
2653 netdev->name); 2358 netdev->name);
2654 netif_carrier_off(netdev); 2359 netif_carrier_off(netdev);
2655 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2360
2656 2361 if (!test_bit(__E1000_DOWN, &adapter->flags))
2657 /* 80003ES2LAN workaround-- 2362 mod_timer(&adapter->phy_info_timer,
2658 * For packet buffer work-around on link down event; 2363 round_jiffies(jiffies + 2 * HZ));
2659 * disable receives in the ISR and
2660 * reset device here in the watchdog
2661 */
2662 if (hw->mac_type == e1000_80003es2lan)
2663 /* reset device */
2664 schedule_work(&adapter->reset_task);
2665 } 2364 }
2666 2365
2667 e1000_smartspeed(adapter); 2366 e1000_smartspeed(adapter);
2668 } 2367 }
2669 2368
2369link_up:
2670 e1000_update_stats(adapter); 2370 e1000_update_stats(adapter);
2671 2371
2672 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2372 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
@@ -2700,13 +2400,10 @@ static void e1000_watchdog(unsigned long data)
2700 /* Force detection of hung controller every watchdog period */ 2400 /* Force detection of hung controller every watchdog period */
2701 adapter->detect_tx_hung = true; 2401 adapter->detect_tx_hung = true;
2702 2402
2703 /* With 82571 controllers, LAA may be overwritten due to controller
2704 * reset from the other port. Set the appropriate LAA in RAR[0] */
2705 if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2706 e1000_rar_set(hw, hw->mac_addr, 0);
2707
2708 /* Reset the timer */ 2403 /* Reset the timer */
2709 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); 2404 if (!test_bit(__E1000_DOWN, &adapter->flags))
2405 mod_timer(&adapter->watchdog_timer,
2406 round_jiffies(jiffies + 2 * HZ));
2710} 2407}
2711 2408
2712enum latency_range { 2409enum latency_range {
@@ -2718,6 +2415,11 @@ enum latency_range {
2718 2415
2719/** 2416/**
2720 * e1000_update_itr - update the dynamic ITR value based on statistics 2417 * e1000_update_itr - update the dynamic ITR value based on statistics
2418 * @adapter: pointer to adapter
2419 * @itr_setting: current adapter->itr
2420 * @packets: the number of packets during this measurement interval
2421 * @bytes: the number of bytes during this measurement interval
2422 *
2721 * Stores a new ITR value based on packets and byte 2423 * Stores a new ITR value based on packets and byte
2722 * counts during the last interrupt. The advantage of per interrupt 2424 * counts during the last interrupt. The advantage of per interrupt
2723 * computation is faster updates and more accurate ITR for the current 2425 * computation is faster updates and more accurate ITR for the current
@@ -2727,10 +2429,6 @@ enum latency_range {
2727 * while increasing bulk throughput. 2429 * while increasing bulk throughput.
2728 * this functionality is controlled by the InterruptThrottleRate module 2430 * this functionality is controlled by the InterruptThrottleRate module
2729 * parameter (see e1000_param.c) 2431 * parameter (see e1000_param.c)
2730 * @adapter: pointer to adapter
2731 * @itr_setting: current adapter->itr
2732 * @packets: the number of packets during this measurement interval
2733 * @bytes: the number of bytes during this measurement interval
2734 **/ 2432 **/
2735static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2433static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2736 u16 itr_setting, int packets, int bytes) 2434 u16 itr_setting, int packets, int bytes)
@@ -3035,8 +2733,9 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3035 size -= 4; 2733 size -= 4;
3036 2734
3037 buffer_info->length = size; 2735 buffer_info->length = size;
3038 buffer_info->dma = skb_shinfo(skb)->dma_head + offset; 2736 /* set time_stamp *before* dma to help avoid a possible race */
3039 buffer_info->time_stamp = jiffies; 2737 buffer_info->time_stamp = jiffies;
2738 buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
3040 buffer_info->next_to_watch = i; 2739 buffer_info->next_to_watch = i;
3041 2740
3042 len -= size; 2741 len -= size;
@@ -3071,13 +2770,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3071 * Avoid terminating buffers within evenly-aligned 2770 * Avoid terminating buffers within evenly-aligned
3072 * dwords. */ 2771 * dwords. */
3073 if (unlikely(adapter->pcix_82544 && 2772 if (unlikely(adapter->pcix_82544 &&
3074 !((unsigned long)(frag->page+offset+size-1) & 4) && 2773 !((unsigned long)(page_to_phys(frag->page) + offset
3075 size > 4)) 2774 + size - 1) & 4) &&
2775 size > 4))
3076 size -= 4; 2776 size -= 4;
3077 2777
3078 buffer_info->length = size; 2778 buffer_info->length = size;
3079 buffer_info->dma = map[f] + offset;
3080 buffer_info->time_stamp = jiffies; 2779 buffer_info->time_stamp = jiffies;
2780 buffer_info->dma = map[f] + offset;
3081 buffer_info->next_to_watch = i; 2781 buffer_info->next_to_watch = i;
3082 2782
3083 len -= size; 2783 len -= size;
@@ -3186,41 +2886,6 @@ no_fifo_stall_required:
3186 return 0; 2886 return 0;
3187} 2887}
3188 2888
3189#define MINIMUM_DHCP_PACKET_SIZE 282
3190static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3191 struct sk_buff *skb)
3192{
3193 struct e1000_hw *hw = &adapter->hw;
3194 u16 length, offset;
3195 if (vlan_tx_tag_present(skb)) {
3196 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3197 ( hw->mng_cookie.status &
3198 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3199 return 0;
3200 }
3201 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3202 struct ethhdr *eth = (struct ethhdr *)skb->data;
3203 if ((htons(ETH_P_IP) == eth->h_proto)) {
3204 const struct iphdr *ip =
3205 (struct iphdr *)((u8 *)skb->data+14);
3206 if (IPPROTO_UDP == ip->protocol) {
3207 struct udphdr *udp =
3208 (struct udphdr *)((u8 *)ip +
3209 (ip->ihl << 2));
3210 if (ntohs(udp->dest) == 67) {
3211 offset = (u8 *)udp + 8 - skb->data;
3212 length = skb->len - offset;
3213
3214 return e1000_mng_write_dhcp_info(hw,
3215 (u8 *)udp + 8,
3216 length);
3217 }
3218 }
3219 }
3220 }
3221 return 0;
3222}
3223
3224static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 2889static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3225{ 2890{
3226 struct e1000_adapter *adapter = netdev_priv(netdev); 2891 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3279,11 +2944,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3279 return NETDEV_TX_OK; 2944 return NETDEV_TX_OK;
3280 } 2945 }
3281 2946
3282 /* 82571 and newer doesn't need the workaround that limited descriptor
3283 * length to 4kB */
3284 if (hw->mac_type >= e1000_82571)
3285 max_per_txd = 8192;
3286
3287 mss = skb_shinfo(skb)->gso_size; 2947 mss = skb_shinfo(skb)->gso_size;
3288 /* The controller does a simple calculation to 2948 /* The controller does a simple calculation to
3289 * make sure there is enough room in the FIFO before 2949 * make sure there is enough room in the FIFO before
@@ -3296,9 +2956,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3296 max_per_txd = min(mss << 2, max_per_txd); 2956 max_per_txd = min(mss << 2, max_per_txd);
3297 max_txd_pwr = fls(max_per_txd) - 1; 2957 max_txd_pwr = fls(max_per_txd) - 1;
3298 2958
3299 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3300 * points to just header, pull a few bytes of payload from
3301 * frags into skb->data */
3302 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2959 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3303 if (skb->data_len && hdr_len == len) { 2960 if (skb->data_len && hdr_len == len) {
3304 switch (hw->mac_type) { 2961 switch (hw->mac_type) {
@@ -3313,10 +2970,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3313 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 2970 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3314 break; 2971 break;
3315 /* fall through */ 2972 /* fall through */
3316 case e1000_82571:
3317 case e1000_82572:
3318 case e1000_82573:
3319 case e1000_ich8lan:
3320 pull_size = min((unsigned int)4, skb->data_len); 2973 pull_size = min((unsigned int)4, skb->data_len);
3321 if (!__pskb_pull_tail(skb, pull_size)) { 2974 if (!__pskb_pull_tail(skb, pull_size)) {
3322 DPRINTK(DRV, ERR, 2975 DPRINTK(DRV, ERR,
@@ -3361,11 +3014,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3361 if (adapter->pcix_82544) 3014 if (adapter->pcix_82544)
3362 count += nr_frags; 3015 count += nr_frags;
3363 3016
3364
3365 if (hw->tx_pkt_filtering &&
3366 (hw->mac_type == e1000_82573))
3367 e1000_transfer_dhcp_info(adapter, skb);
3368
3369 /* need: count + 2 desc gap to keep tail from touching 3017 /* need: count + 2 desc gap to keep tail from touching
3370 * head, otherwise try next time */ 3018 * head, otherwise try next time */
3371 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3019 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
@@ -3374,7 +3022,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3374 if (unlikely(hw->mac_type == e1000_82547)) { 3022 if (unlikely(hw->mac_type == e1000_82547)) {
3375 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3023 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3376 netif_stop_queue(netdev); 3024 netif_stop_queue(netdev);
3377 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 3025 if (!test_bit(__E1000_DOWN, &adapter->flags))
3026 mod_timer(&adapter->tx_fifo_stall_timer,
3027 jiffies + 1);
3378 return NETDEV_TX_BUSY; 3028 return NETDEV_TX_BUSY;
3379 } 3029 }
3380 } 3030 }
@@ -3393,14 +3043,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3393 } 3043 }
3394 3044
3395 if (likely(tso)) { 3045 if (likely(tso)) {
3396 tx_ring->last_tx_tso = 1; 3046 if (likely(hw->mac_type != e1000_82544))
3047 tx_ring->last_tx_tso = 1;
3397 tx_flags |= E1000_TX_FLAGS_TSO; 3048 tx_flags |= E1000_TX_FLAGS_TSO;
3398 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3049 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3399 tx_flags |= E1000_TX_FLAGS_CSUM; 3050 tx_flags |= E1000_TX_FLAGS_CSUM;
3400 3051
3401 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3402 * 82571 hardware supports TSO capabilities for IPv6 as well...
3403 * no longer assume, we must. */
3404 if (likely(skb->protocol == htons(ETH_P_IP))) 3052 if (likely(skb->protocol == htons(ETH_P_IP)))
3405 tx_flags |= E1000_TX_FLAGS_IPV4; 3053 tx_flags |= E1000_TX_FLAGS_IPV4;
3406 3054
@@ -3472,7 +3120,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3472 struct e1000_adapter *adapter = netdev_priv(netdev); 3120 struct e1000_adapter *adapter = netdev_priv(netdev);
3473 struct e1000_hw *hw = &adapter->hw; 3121 struct e1000_hw *hw = &adapter->hw;
3474 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3122 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3475 u16 eeprom_data = 0;
3476 3123
3477 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3124 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3478 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3125 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3483,44 +3130,23 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3483 /* Adapter-specific max frame size limits. */ 3130 /* Adapter-specific max frame size limits. */
3484 switch (hw->mac_type) { 3131 switch (hw->mac_type) {
3485 case e1000_undefined ... e1000_82542_rev2_1: 3132 case e1000_undefined ... e1000_82542_rev2_1:
3486 case e1000_ich8lan:
3487 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3133 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3488 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3134 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3489 return -EINVAL; 3135 return -EINVAL;
3490 } 3136 }
3491 break; 3137 break;
3492 case e1000_82573:
3493 /* Jumbo Frames not supported if:
3494 * - this is not an 82573L device
3495 * - ASPM is enabled in any way (0x1A bits 3:2) */
3496 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
3497 &eeprom_data);
3498 if ((hw->device_id != E1000_DEV_ID_82573L) ||
3499 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3500 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3501 DPRINTK(PROBE, ERR,
3502 "Jumbo Frames not supported.\n");
3503 return -EINVAL;
3504 }
3505 break;
3506 }
3507 /* ERT will be enabled later to enable wire speed receives */
3508
3509 /* fall through to get support */
3510 case e1000_82571:
3511 case e1000_82572:
3512 case e1000_80003es2lan:
3513#define MAX_STD_JUMBO_FRAME_SIZE 9234
3514 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3515 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3516 return -EINVAL;
3517 }
3518 break;
3519 default: 3138 default:
3520 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3139 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3521 break; 3140 break;
3522 } 3141 }
3523 3142
3143 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3144 msleep(1);
3145 /* e1000_down has a dependency on max_frame_size */
3146 hw->max_frame_size = max_frame;
3147 if (netif_running(netdev))
3148 e1000_down(adapter);
3149
3524 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3150 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3525 * means we reserve 2 more, this pushes us to allocate from the next 3151 * means we reserve 2 more, this pushes us to allocate from the next
3526 * larger slab size. 3152 * larger slab size.
@@ -3549,11 +3175,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3549 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3175 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3550 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3176 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3551 3177
3178 printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
3179 netdev->name, netdev->mtu, new_mtu);
3552 netdev->mtu = new_mtu; 3180 netdev->mtu = new_mtu;
3553 hw->max_frame_size = max_frame;
3554 3181
3555 if (netif_running(netdev)) 3182 if (netif_running(netdev))
3556 e1000_reinit_locked(adapter); 3183 e1000_up(adapter);
3184 else
3185 e1000_reset(adapter);
3186
3187 clear_bit(__E1000_RESETTING, &adapter->flags);
3557 3188
3558 return 0; 3189 return 0;
3559} 3190}
@@ -3596,14 +3227,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3596 adapter->stats.mprc += er32(MPRC); 3227 adapter->stats.mprc += er32(MPRC);
3597 adapter->stats.roc += er32(ROC); 3228 adapter->stats.roc += er32(ROC);
3598 3229
3599 if (hw->mac_type != e1000_ich8lan) { 3230 adapter->stats.prc64 += er32(PRC64);
3600 adapter->stats.prc64 += er32(PRC64); 3231 adapter->stats.prc127 += er32(PRC127);
3601 adapter->stats.prc127 += er32(PRC127); 3232 adapter->stats.prc255 += er32(PRC255);
3602 adapter->stats.prc255 += er32(PRC255); 3233 adapter->stats.prc511 += er32(PRC511);
3603 adapter->stats.prc511 += er32(PRC511); 3234 adapter->stats.prc1023 += er32(PRC1023);
3604 adapter->stats.prc1023 += er32(PRC1023); 3235 adapter->stats.prc1522 += er32(PRC1522);
3605 adapter->stats.prc1522 += er32(PRC1522);
3606 }
3607 3236
3608 adapter->stats.symerrs += er32(SYMERRS); 3237 adapter->stats.symerrs += er32(SYMERRS);
3609 adapter->stats.mpc += er32(MPC); 3238 adapter->stats.mpc += er32(MPC);
@@ -3632,14 +3261,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3632 adapter->stats.toth += er32(TOTH); 3261 adapter->stats.toth += er32(TOTH);
3633 adapter->stats.tpr += er32(TPR); 3262 adapter->stats.tpr += er32(TPR);
3634 3263
3635 if (hw->mac_type != e1000_ich8lan) { 3264 adapter->stats.ptc64 += er32(PTC64);
3636 adapter->stats.ptc64 += er32(PTC64); 3265 adapter->stats.ptc127 += er32(PTC127);
3637 adapter->stats.ptc127 += er32(PTC127); 3266 adapter->stats.ptc255 += er32(PTC255);
3638 adapter->stats.ptc255 += er32(PTC255); 3267 adapter->stats.ptc511 += er32(PTC511);
3639 adapter->stats.ptc511 += er32(PTC511); 3268 adapter->stats.ptc1023 += er32(PTC1023);
3640 adapter->stats.ptc1023 += er32(PTC1023); 3269 adapter->stats.ptc1522 += er32(PTC1522);
3641 adapter->stats.ptc1522 += er32(PTC1522);
3642 }
3643 3270
3644 adapter->stats.mptc += er32(MPTC); 3271 adapter->stats.mptc += er32(MPTC);
3645 adapter->stats.bptc += er32(BPTC); 3272 adapter->stats.bptc += er32(BPTC);
@@ -3659,20 +3286,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3659 adapter->stats.tsctc += er32(TSCTC); 3286 adapter->stats.tsctc += er32(TSCTC);
3660 adapter->stats.tsctfc += er32(TSCTFC); 3287 adapter->stats.tsctfc += er32(TSCTFC);
3661 } 3288 }
3662 if (hw->mac_type > e1000_82547_rev_2) {
3663 adapter->stats.iac += er32(IAC);
3664 adapter->stats.icrxoc += er32(ICRXOC);
3665
3666 if (hw->mac_type != e1000_ich8lan) {
3667 adapter->stats.icrxptc += er32(ICRXPTC);
3668 adapter->stats.icrxatc += er32(ICRXATC);
3669 adapter->stats.ictxptc += er32(ICTXPTC);
3670 adapter->stats.ictxatc += er32(ICTXATC);
3671 adapter->stats.ictxqec += er32(ICTXQEC);
3672 adapter->stats.ictxqmtc += er32(ICTXQMTC);
3673 adapter->stats.icrxdmtc += er32(ICRXDMTC);
3674 }
3675 }
3676 3289
3677 /* Fill out the OS statistics structure */ 3290 /* Fill out the OS statistics structure */
3678 adapter->net_stats.multicast = adapter->stats.mprc; 3291 adapter->net_stats.multicast = adapter->stats.mprc;
@@ -3731,49 +3344,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3731} 3344}
3732 3345
3733/** 3346/**
3734 * e1000_intr_msi - Interrupt Handler
3735 * @irq: interrupt number
3736 * @data: pointer to a network interface device structure
3737 **/
3738
3739static irqreturn_t e1000_intr_msi(int irq, void *data)
3740{
3741 struct net_device *netdev = data;
3742 struct e1000_adapter *adapter = netdev_priv(netdev);
3743 struct e1000_hw *hw = &adapter->hw;
3744 u32 icr = er32(ICR);
3745
3746 /* in NAPI mode read ICR disables interrupts using IAM */
3747
3748 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3749 hw->get_link_status = 1;
3750 /* 80003ES2LAN workaround-- For packet buffer work-around on
3751 * link down event; disable receives here in the ISR and reset
3752 * adapter in watchdog */
3753 if (netif_carrier_ok(netdev) &&
3754 (hw->mac_type == e1000_80003es2lan)) {
3755 /* disable receives */
3756 u32 rctl = er32(RCTL);
3757 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3758 }
3759 /* guard against interrupt when we're going down */
3760 if (!test_bit(__E1000_DOWN, &adapter->flags))
3761 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3762 }
3763
3764 if (likely(napi_schedule_prep(&adapter->napi))) {
3765 adapter->total_tx_bytes = 0;
3766 adapter->total_tx_packets = 0;
3767 adapter->total_rx_bytes = 0;
3768 adapter->total_rx_packets = 0;
3769 __napi_schedule(&adapter->napi);
3770 } else
3771 e1000_irq_enable(adapter);
3772
3773 return IRQ_HANDLED;
3774}
3775
3776/**
3777 * e1000_intr - Interrupt Handler 3347 * e1000_intr - Interrupt Handler
3778 * @irq: interrupt number 3348 * @irq: interrupt number
3779 * @data: pointer to a network interface device structure 3349 * @data: pointer to a network interface device structure
@@ -3784,43 +3354,22 @@ static irqreturn_t e1000_intr(int irq, void *data)
3784 struct net_device *netdev = data; 3354 struct net_device *netdev = data;
3785 struct e1000_adapter *adapter = netdev_priv(netdev); 3355 struct e1000_adapter *adapter = netdev_priv(netdev);
3786 struct e1000_hw *hw = &adapter->hw; 3356 struct e1000_hw *hw = &adapter->hw;
3787 u32 rctl, icr = er32(ICR); 3357 u32 icr = er32(ICR);
3788 3358
3789 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3359 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
3790 return IRQ_NONE; /* Not our interrupt */ 3360 return IRQ_NONE; /* Not our interrupt */
3791 3361
3792 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3793 * not set, then the adapter didn't send an interrupt */
3794 if (unlikely(hw->mac_type >= e1000_82571 &&
3795 !(icr & E1000_ICR_INT_ASSERTED)))
3796 return IRQ_NONE;
3797
3798 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3799 * need for the IMC write */
3800
3801 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3362 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3802 hw->get_link_status = 1; 3363 hw->get_link_status = 1;
3803 /* 80003ES2LAN workaround--
3804 * For packet buffer work-around on link down event;
3805 * disable receives here in the ISR and
3806 * reset adapter in watchdog
3807 */
3808 if (netif_carrier_ok(netdev) &&
3809 (hw->mac_type == e1000_80003es2lan)) {
3810 /* disable receives */
3811 rctl = er32(RCTL);
3812 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3813 }
3814 /* guard against interrupt when we're going down */ 3364 /* guard against interrupt when we're going down */
3815 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3365 if (!test_bit(__E1000_DOWN, &adapter->flags))
3816 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3366 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3817 } 3367 }
3818 3368
3819 if (unlikely(hw->mac_type < e1000_82571)) { 3369 /* disable interrupts, without the synchronize_irq bit */
3820 /* disable interrupts, without the synchronize_irq bit */ 3370 ew32(IMC, ~0);
3821 ew32(IMC, ~0); 3371 E1000_WRITE_FLUSH();
3822 E1000_WRITE_FLUSH(); 3372
3823 }
3824 if (likely(napi_schedule_prep(&adapter->napi))) { 3373 if (likely(napi_schedule_prep(&adapter->napi))) {
3825 adapter->total_tx_bytes = 0; 3374 adapter->total_tx_bytes = 0;
3826 adapter->total_tx_packets = 0; 3375 adapter->total_tx_packets = 0;
@@ -3844,17 +3393,13 @@ static irqreturn_t e1000_intr(int irq, void *data)
3844static int e1000_clean(struct napi_struct *napi, int budget) 3393static int e1000_clean(struct napi_struct *napi, int budget)
3845{ 3394{
3846 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3395 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3847 struct net_device *poll_dev = adapter->netdev; 3396 int tx_clean_complete = 0, work_done = 0;
3848 int tx_cleaned = 0, work_done = 0;
3849
3850 adapter = netdev_priv(poll_dev);
3851 3397
3852 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3398 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3853 3399
3854 adapter->clean_rx(adapter, &adapter->rx_ring[0], 3400 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3855 &work_done, budget);
3856 3401
3857 if (!tx_cleaned) 3402 if (!tx_clean_complete)
3858 work_done = budget; 3403 work_done = budget;
3859 3404
3860 /* If budget not fully consumed, exit the polling mode */ 3405 /* If budget not fully consumed, exit the polling mode */
@@ -3925,7 +3470,9 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3925 * sees the new next_to_clean. 3470 * sees the new next_to_clean.
3926 */ 3471 */
3927 smp_mb(); 3472 smp_mb();
3928 if (netif_queue_stopped(netdev)) { 3473
3474 if (netif_queue_stopped(netdev) &&
3475 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3929 netif_wake_queue(netdev); 3476 netif_wake_queue(netdev);
3930 ++adapter->restart_queue; 3477 ++adapter->restart_queue;
3931 } 3478 }
@@ -3935,8 +3482,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3935 /* Detect a transmit hang in hardware, this serializes the 3482 /* Detect a transmit hang in hardware, this serializes the
3936 * check with the clearing of time_stamp and movement of i */ 3483 * check with the clearing of time_stamp and movement of i */
3937 adapter->detect_tx_hung = false; 3484 adapter->detect_tx_hung = false;
3938 if (tx_ring->buffer_info[i].time_stamp && 3485 if (tx_ring->buffer_info[eop].time_stamp &&
3939 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + 3486 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3940 (adapter->tx_timeout_factor * HZ)) 3487 (adapter->tx_timeout_factor * HZ))
3941 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3488 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3942 3489
@@ -3958,7 +3505,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3958 readl(hw->hw_addr + tx_ring->tdt), 3505 readl(hw->hw_addr + tx_ring->tdt),
3959 tx_ring->next_to_use, 3506 tx_ring->next_to_use,
3960 tx_ring->next_to_clean, 3507 tx_ring->next_to_clean,
3961 tx_ring->buffer_info[i].time_stamp, 3508 tx_ring->buffer_info[eop].time_stamp,
3962 eop, 3509 eop,
3963 jiffies, 3510 jiffies,
3964 eop_desc->upper.fields.status); 3511 eop_desc->upper.fields.status);
@@ -3999,25 +3546,13 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3999 return; 3546 return;
4000 } 3547 }
4001 /* TCP/UDP Checksum has not been calculated */ 3548 /* TCP/UDP Checksum has not been calculated */
4002 if (hw->mac_type <= e1000_82547_rev_2) { 3549 if (!(status & E1000_RXD_STAT_TCPCS))
4003 if (!(status & E1000_RXD_STAT_TCPCS)) 3550 return;
4004 return; 3551
4005 } else {
4006 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
4007 return;
4008 }
4009 /* It must be a TCP or UDP packet with a valid checksum */ 3552 /* It must be a TCP or UDP packet with a valid checksum */
4010 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3553 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4011 /* TCP checksum is good */ 3554 /* TCP checksum is good */
4012 skb->ip_summed = CHECKSUM_UNNECESSARY; 3555 skb->ip_summed = CHECKSUM_UNNECESSARY;
4013 } else if (hw->mac_type > e1000_82547_rev_2) {
4014 /* IP fragment with UDP payload */
4015 /* Hardware complements the payload checksum, so we undo it
4016 * and then put the value in host order for further stack use.
4017 */
4018 __sum16 sum = (__force __sum16)htons(csum);
4019 skb->csum = csum_unfold(~sum);
4020 skb->ip_summed = CHECKSUM_COMPLETE;
4021 } 3556 }
4022 adapter->hw_csum_good++; 3557 adapter->hw_csum_good++;
4023} 3558}
@@ -4814,20 +4349,6 @@ void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4814 pcix_set_mmrbc(adapter->pdev, mmrbc); 4349 pcix_set_mmrbc(adapter->pdev, mmrbc);
4815} 4350}
4816 4351
4817s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
4818{
4819 struct e1000_adapter *adapter = hw->back;
4820 u16 cap_offset;
4821
4822 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4823 if (!cap_offset)
4824 return -E1000_ERR_CONFIG;
4825
4826 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4827
4828 return E1000_SUCCESS;
4829}
4830
4831void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4352void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4832{ 4353{
4833 outl(value, port); 4354 outl(value, port);
@@ -4850,33 +4371,27 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
4850 ctrl |= E1000_CTRL_VME; 4371 ctrl |= E1000_CTRL_VME;
4851 ew32(CTRL, ctrl); 4372 ew32(CTRL, ctrl);
4852 4373
4853 if (adapter->hw.mac_type != e1000_ich8lan) { 4374 /* enable VLAN receive filtering */
4854 /* enable VLAN receive filtering */ 4375 rctl = er32(RCTL);
4855 rctl = er32(RCTL); 4376 rctl &= ~E1000_RCTL_CFIEN;
4856 rctl &= ~E1000_RCTL_CFIEN; 4377 if (!(netdev->flags & IFF_PROMISC))
4857 if (!(netdev->flags & IFF_PROMISC)) 4378 rctl |= E1000_RCTL_VFE;
4858 rctl |= E1000_RCTL_VFE; 4379 ew32(RCTL, rctl);
4859 ew32(RCTL, rctl); 4380 e1000_update_mng_vlan(adapter);
4860 e1000_update_mng_vlan(adapter);
4861 }
4862 } else { 4381 } else {
4863 /* disable VLAN tag insert/strip */ 4382 /* disable VLAN tag insert/strip */
4864 ctrl = er32(CTRL); 4383 ctrl = er32(CTRL);
4865 ctrl &= ~E1000_CTRL_VME; 4384 ctrl &= ~E1000_CTRL_VME;
4866 ew32(CTRL, ctrl); 4385 ew32(CTRL, ctrl);
4867 4386
4868 if (adapter->hw.mac_type != e1000_ich8lan) { 4387 /* disable VLAN receive filtering */
4869 /* disable VLAN receive filtering */ 4388 rctl = er32(RCTL);
4870 rctl = er32(RCTL); 4389 rctl &= ~E1000_RCTL_VFE;
4871 rctl &= ~E1000_RCTL_VFE; 4390 ew32(RCTL, rctl);
4872 ew32(RCTL, rctl);
4873 4391
4874 if (adapter->mng_vlan_id != 4392 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
4875 (u16)E1000_MNG_VLAN_NONE) { 4393 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4876 e1000_vlan_rx_kill_vid(netdev, 4394 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4877 adapter->mng_vlan_id);
4878 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4879 }
4880 } 4395 }
4881 } 4396 }
4882 4397
@@ -4913,14 +4428,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4913 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4428 if (!test_bit(__E1000_DOWN, &adapter->flags))
4914 e1000_irq_enable(adapter); 4429 e1000_irq_enable(adapter);
4915 4430
4916 if ((hw->mng_cookie.status &
4917 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4918 (vid == adapter->mng_vlan_id)) {
4919 /* release control to f/w */
4920 e1000_release_hw_control(adapter);
4921 return;
4922 }
4923
4924 /* remove VID from filter table */ 4431 /* remove VID from filter table */
4925 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
4926 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4433 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
@@ -5031,16 +4538,13 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5031 } 4538 }
5032 4539
5033 if (hw->media_type == e1000_media_type_fiber || 4540 if (hw->media_type == e1000_media_type_fiber ||
5034 hw->media_type == e1000_media_type_internal_serdes) { 4541 hw->media_type == e1000_media_type_internal_serdes) {
5035 /* keep the laser running in D3 */ 4542 /* keep the laser running in D3 */
5036 ctrl_ext = er32(CTRL_EXT); 4543 ctrl_ext = er32(CTRL_EXT);
5037 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4544 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5038 ew32(CTRL_EXT, ctrl_ext); 4545 ew32(CTRL_EXT, ctrl_ext);
5039 } 4546 }
5040 4547
5041 /* Allow time for pending master requests to run */
5042 e1000_disable_pciex_master(hw);
5043
5044 ew32(WUC, E1000_WUC_PME_EN); 4548 ew32(WUC, E1000_WUC_PME_EN);
5045 ew32(WUFC, wufc); 4549 ew32(WUFC, wufc);
5046 } else { 4550 } else {
@@ -5056,16 +4560,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5056 if (adapter->en_mng_pt) 4560 if (adapter->en_mng_pt)
5057 *enable_wake = true; 4561 *enable_wake = true;
5058 4562
5059 if (hw->phy_type == e1000_phy_igp_3)
5060 e1000_phy_powerdown_workaround(hw);
5061
5062 if (netif_running(netdev)) 4563 if (netif_running(netdev))
5063 e1000_free_irq(adapter); 4564 e1000_free_irq(adapter);
5064 4565
5065 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5066 * would have already happened in close and is redundant. */
5067 e1000_release_hw_control(adapter);
5068
5069 pci_disable_device(pdev); 4566 pci_disable_device(pdev);
5070 4567
5071 return 0; 4568 return 0;
@@ -5131,14 +4628,6 @@ static int e1000_resume(struct pci_dev *pdev)
5131 4628
5132 netif_device_attach(netdev); 4629 netif_device_attach(netdev);
5133 4630
5134 /* If the controller is 82573 and f/w is AMT, do not set
5135 * DRV_LOAD until the interface is up. For all other cases,
5136 * let the f/w know that the h/w is now under the control
5137 * of the driver. */
5138 if (hw->mac_type != e1000_82573 ||
5139 !e1000_check_mng_mode(hw))
5140 e1000_get_hw_control(adapter);
5141
5142 return 0; 4631 return 0;
5143} 4632}
5144#endif 4633#endif
@@ -5174,7 +4663,7 @@ static void e1000_netpoll(struct net_device *netdev)
5174/** 4663/**
5175 * e1000_io_error_detected - called when PCI error is detected 4664 * e1000_io_error_detected - called when PCI error is detected
5176 * @pdev: Pointer to PCI device 4665 * @pdev: Pointer to PCI device
5177 * @state: The current pci conneection state 4666 * @state: The current pci connection state
5178 * 4667 *
5179 * This function is called after a PCI bus error affecting 4668 * This function is called after a PCI bus error affecting
5180 * this device has been detected. 4669 * this device has been detected.
@@ -5243,7 +4732,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
5243{ 4732{
5244 struct net_device *netdev = pci_get_drvdata(pdev); 4733 struct net_device *netdev = pci_get_drvdata(pdev);
5245 struct e1000_adapter *adapter = netdev_priv(netdev); 4734 struct e1000_adapter *adapter = netdev_priv(netdev);
5246 struct e1000_hw *hw = &adapter->hw;
5247 4735
5248 e1000_init_manageability(adapter); 4736 e1000_init_manageability(adapter);
5249 4737
@@ -5255,15 +4743,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
5255 } 4743 }
5256 4744
5257 netif_device_attach(netdev); 4745 netif_device_attach(netdev);
5258
5259 /* If the controller is 82573 and f/w is AMT, do not set
5260 * DRV_LOAD until the interface is up. For all other cases,
5261 * let the f/w know that the h/w is now under the control
5262 * of the driver. */
5263 if (hw->mac_type != e1000_82573 ||
5264 !e1000_check_mng_mode(hw))
5265 e1000_get_hw_control(adapter);
5266
5267} 4746}
5268 4747
5269/* e1000_main.c */ 4748/* e1000_main.c */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 213437d13154..38d2741ccae9 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -518,22 +518,6 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
518 adapter->smart_power_down = opt.def; 518 adapter->smart_power_down = opt.def;
519 } 519 }
520 } 520 }
521 { /* Kumeran Lock Loss Workaround */
522 opt = (struct e1000_option) {
523 .type = enable_option,
524 .name = "Kumeran Lock Loss Workaround",
525 .err = "defaulting to Enabled",
526 .def = OPTION_ENABLED
527 };
528
529 if (num_KumeranLockLoss > bd) {
530 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
531 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
532 adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
533 } else {
534 adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
535 }
536 }
537 521
538 switch (adapter->hw.media_type) { 522 switch (adapter->hw.media_type) {
539 case e1000_media_type_fiber: 523 case e1000_media_type_fiber:
@@ -626,12 +610,6 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
626 .p = dplx_list }} 610 .p = dplx_list }}
627 }; 611 };
628 612
629 if (e1000_check_phy_reset_block(&adapter->hw)) {
630 DPRINTK(PROBE, INFO,
631 "Link active due to SoL/IDER Session. "
632 "Speed/Duplex/AutoNeg parameter ignored.\n");
633 return;
634 }
635 if (num_Duplex > bd) { 613 if (num_Duplex > bd) {
636 dplx = Duplex[bd]; 614 dplx = Duplex[bd];
637 e1000_validate_option(&dplx, &opt, adapter); 615 e1000_validate_option(&dplx, &opt, adapter);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 16c193a6c95c..0687c6aa4e46 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4982 goto err_pci_reg; 4982 goto err_pci_reg;
4983 4983
4984 /* AER (Advanced Error Reporting) hooks */ 4984 /* AER (Advanced Error Reporting) hooks */
4985 err = pci_enable_pcie_error_reporting(pdev); 4985 pci_enable_pcie_error_reporting(pdev);
4986 if (err) {
4987 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4988 "0x%x\n", err);
4989 /* non-fatal, continue */
4990 }
4991 4986
4992 pci_set_master(pdev); 4987 pci_set_master(pdev);
4993 /* PCI config space info */ 4988 /* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5263{ 5258{
5264 struct net_device *netdev = pci_get_drvdata(pdev); 5259 struct net_device *netdev = pci_get_drvdata(pdev);
5265 struct e1000_adapter *adapter = netdev_priv(netdev); 5260 struct e1000_adapter *adapter = netdev_priv(netdev);
5266 int err;
5267 5261
5268 /* 5262 /*
5269 * flush_scheduled work may reschedule our watchdog task, so 5263 * flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5299 free_netdev(netdev); 5293 free_netdev(netdev);
5300 5294
5301 /* AER disable */ 5295 /* AER disable */
5302 err = pci_disable_pcie_error_reporting(pdev); 5296 pci_disable_pcie_error_reporting(pdev);
5303 if (err)
5304 dev_err(&pdev->dev,
5305 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5306 5297
5307 pci_disable_device(pdev); 5298 pci_disable_device(pdev);
5308} 5299}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 33b55f729742..db4b7f1603f6 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
258 } 258 }
259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { 259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
260 printk(KERN_INFO 260 printk(KERN_INFO
261 "mkiss: %s: Switchting to crc-smack\n", 261 "mkiss: %s: Switching to crc-smack\n",
262 ax->dev->name); 262 ax->dev->name);
263 ax->crcmode = CRC_MODE_SMACK; 263 ax->crcmode = CRC_MODE_SMACK;
264 } 264 }
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
272 } 272 }
273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { 273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
274 printk(KERN_INFO 274 printk(KERN_INFO
275 "mkiss: %s: Switchting to crc-flexnet\n", 275 "mkiss: %s: Switching to crc-flexnet\n",
276 ax->dev->name); 276 ax->dev->name);
277 ax->crcmode = CRC_MODE_FLEX; 277 ax->crcmode = CRC_MODE_FLEX;
278 } 278 }
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5d6c1530a8c0..714c3a4a44ef 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1246 if (err) 1246 if (err)
1247 goto err_pci_reg; 1247 goto err_pci_reg;
1248 1248
1249 err = pci_enable_pcie_error_reporting(pdev); 1249 pci_enable_pcie_error_reporting(pdev);
1250 if (err) {
1251 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1252 "0x%x\n", err);
1253 /* non-fatal, continue */
1254 }
1255 1250
1256 pci_set_master(pdev); 1251 pci_set_master(pdev);
1257 pci_save_state(pdev); 1252 pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1628 struct net_device *netdev = pci_get_drvdata(pdev); 1623 struct net_device *netdev = pci_get_drvdata(pdev);
1629 struct igb_adapter *adapter = netdev_priv(netdev); 1624 struct igb_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 1625 struct e1000_hw *hw = &adapter->hw;
1631 int err;
1632 1626
1633 /* flush_scheduled work may reschedule our watchdog task, so 1627 /* flush_scheduled work may reschedule our watchdog task, so
1634 * explicitly disable watchdog tasks from being rescheduled */ 1628 * explicitly disable watchdog tasks from being rescheduled */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682 1676
1683 free_netdev(netdev); 1677 free_netdev(netdev);
1684 1678
1685 err = pci_disable_pcie_error_reporting(pdev); 1679 pci_disable_pcie_error_reporting(pdev);
1686 if (err)
1687 dev_err(&pdev->dev,
1688 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1689 1680
1690 pci_disable_device(pdev); 1681 pci_disable_device(pdev);
1691} 1682}
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index e36e951cbc65..aa7286bc4364 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
495 cnx->remote_lp); 495 cnx->remote_lp);
496 } else { 496 } else {
497 memcpy(&cnx->cap_ack_event, event, 497 memcpy(&cnx->cap_ack_event, event,
498 sizeof(&cnx->cap_ack_event)); 498 sizeof(cnx->cap_ack_event));
499 cnx->state |= VETH_STATE_GOTCAPACK; 499 cnx->state |= VETH_STATE_GOTCAPACK;
500 veth_kick_statemachine(cnx); 500 veth_kick_statemachine(cnx);
501 } 501 }
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3192f1..e2d5343f1275 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
425#endif /* CONFIG_DCB */ 425#endif /* CONFIG_DCB */
426 default: 426 default:
427 hw_dbg(hw, "Flow control param set incorrectly\n"); 427 hw_dbg(hw, "Flow control param set incorrectly\n");
428 ret_val = -IXGBE_ERR_CONFIG; 428 ret_val = IXGBE_ERR_CONFIG;
429 goto out; 429 goto out;
430 break; 430 break;
431 } 431 }
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e172df3d..40ff120a9ad4 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1355/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1357 * @hw: pointer to hardware structure
1358 * @addr_list: the list of new addresses 1358 * @uc_list: the list of new addresses
1359 * @addr_count: number of addresses
1360 * @next: iterator function to walk the address list
1361 * 1359 *
1362 * The given list replaces any existing list. Clears the secondary addrs from 1360 * The given list replaces any existing list. Clears the secondary addrs from
1363 * receive address registers. Uses unused receive address registers for the 1361 * receive address registers. Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1663#endif /* CONFIG_DCB */ 1661#endif /* CONFIG_DCB */
1664 default: 1662 default:
1665 hw_dbg(hw, "Flow control param set incorrectly\n"); 1663 hw_dbg(hw, "Flow control param set incorrectly\n");
1666 ret_val = -IXGBE_ERR_CONFIG; 1664 ret_val = IXGBE_ERR_CONFIG;
1667 goto out; 1665 goto out;
1668 break; 1666 break;
1669 } 1667 }
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1734 s32 ret_val = 0; 1732 s32 ret_val = 0;
1735 ixgbe_link_speed speed; 1733 ixgbe_link_speed speed;
1736 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 1734 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1735 u32 links2, anlp1_reg, autoc_reg, links;
1737 bool link_up; 1736 bool link_up;
1738 1737
1739 /* 1738 /*
1740 * AN should have completed when the cable was plugged in. 1739 * AN should have completed when the cable was plugged in.
1741 * Look for reasons to bail out. Bail out if: 1740 * Look for reasons to bail out. Bail out if:
1742 * - FC autoneg is disabled, or if 1741 * - FC autoneg is disabled, or if
1743 * - we don't have multispeed fiber, or if 1742 * - link is not up.
1744 * - we're not running at 1G, or if
1745 * - link is not up, or if
1746 * - link is up but AN did not complete, or if
1747 * - link is up and AN completed but timed out
1748 * 1743 *
1749 * Since we're being called from an LSC, link is already know to be up. 1744 * Since we're being called from an LSC, link is already known to be up.
1750 * So use link_up_wait_to_complete=false. 1745 * So use link_up_wait_to_complete=false.
1751 */ 1746 */
1752 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1747 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1753 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1748
1754 1749 if (hw->fc.disable_fc_autoneg || (!link_up)) {
1755 if (hw->fc.disable_fc_autoneg ||
1756 !hw->phy.multispeed_fiber ||
1757 (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
1758 !link_up ||
1759 ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1760 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1761 hw->fc.fc_was_autonegged = false; 1750 hw->fc.fc_was_autonegged = false;
1762 hw->fc.current_mode = hw->fc.requested_mode; 1751 hw->fc.current_mode = hw->fc.requested_mode;
1763 hw_dbg(hw, "Autoneg FC was skipped.\n");
1764 goto out; 1752 goto out;
1765 } 1753 }
1766 1754
1767 /* 1755 /*
1756 * On backplane, bail out if
1757 * - backplane autoneg was not completed, or if
1758 * - link partner is not AN enabled
1759 */
1760 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1762 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1763 if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
1764 ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
1765 hw->fc.fc_was_autonegged = false;
1766 hw->fc.current_mode = hw->fc.requested_mode;
1767 goto out;
1768 }
1769 }
1770
1771 /*
1772 * On multispeed fiber at 1g, bail out if
1773 * - link is up but AN did not complete, or if
1774 * - link is up and AN completed but timed out
1775 */
1776 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1777 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1778 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1779 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1780 hw->fc.fc_was_autonegged = false;
1781 hw->fc.current_mode = hw->fc.requested_mode;
1782 goto out;
1783 }
1784 }
1785
1786 /*
1768 * Read the AN advertisement and LP ability registers and resolve 1787 * Read the AN advertisement and LP ability registers and resolve
1769 * local flow control settings accordingly 1788 * local flow control settings accordingly
1770 */ 1789 */
1771 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1790 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1772 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 1791 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1773 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && 1792 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1774 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { 1793 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1794 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1795 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1796 /*
1797 * Now we need to check if the user selected Rx ONLY
1798 * of pause frames. In this case, we had to advertise
1799 * FULL flow control because we could not advertise RX
1800 * ONLY. Hence, we must now check to see if we need to
1801 * turn OFF the TRANSMISSION of PAUSE frames.
1802 */
1803 if (hw->fc.requested_mode == ixgbe_fc_full) {
1804 hw->fc.current_mode = ixgbe_fc_full;
1805 hw_dbg(hw, "Flow Control = FULL.\n");
1806 } else {
1807 hw->fc.current_mode = ixgbe_fc_rx_pause;
1808 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1809 }
1810 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1811 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1812 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1813 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1814 hw->fc.current_mode = ixgbe_fc_tx_pause;
1815 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1816 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1817 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1818 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1819 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1820 hw->fc.current_mode = ixgbe_fc_rx_pause;
1821 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1822 } else {
1823 hw->fc.current_mode = ixgbe_fc_none;
1824 hw_dbg(hw, "Flow Control = NONE.\n");
1825 }
1826 }
1827
1828 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1775 /* 1829 /*
1776 * Now we need to check if the user selected Rx ONLY 1830 * Read the 10g AN autoc and LP ability registers and resolve
1777 * of pause frames. In this case, we had to advertise 1831 * local flow control settings accordingly
1778 * FULL flow control because we could not advertise RX
1779 * ONLY. Hence, we must now check to see if we need to
1780 * turn OFF the TRANSMISSION of PAUSE frames.
1781 */ 1832 */
1782 if (hw->fc.requested_mode == ixgbe_fc_full) { 1833 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1783 hw->fc.current_mode = ixgbe_fc_full; 1834 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1784 hw_dbg(hw, "Flow Control = FULL.\n"); 1835
1785 } else { 1836 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1837 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1838 /*
1839 * Now we need to check if the user selected Rx ONLY
1840 * of pause frames. In this case, we had to advertise
1841 * FULL flow control because we could not advertise RX
1842 * ONLY. Hence, we must now check to see if we need to
1843 * turn OFF the TRANSMISSION of PAUSE frames.
1844 */
1845 if (hw->fc.requested_mode == ixgbe_fc_full) {
1846 hw->fc.current_mode = ixgbe_fc_full;
1847 hw_dbg(hw, "Flow Control = FULL.\n");
1848 } else {
1849 hw->fc.current_mode = ixgbe_fc_rx_pause;
1850 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1851 }
1852 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1853 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1854 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1855 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1856 hw->fc.current_mode = ixgbe_fc_tx_pause;
1857 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1858 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1859 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1860 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1861 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1786 hw->fc.current_mode = ixgbe_fc_rx_pause; 1862 hw->fc.current_mode = ixgbe_fc_rx_pause;
1787 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 1863 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1864 } else {
1865 hw->fc.current_mode = ixgbe_fc_none;
1866 hw_dbg(hw, "Flow Control = NONE.\n");
1788 } 1867 }
1789 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1790 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1791 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1792 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1793 hw->fc.current_mode = ixgbe_fc_tx_pause;
1794 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1795 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1796 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1797 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1798 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1799 hw->fc.current_mode = ixgbe_fc_rx_pause;
1800 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1801 } else {
1802 hw->fc.current_mode = ixgbe_fc_none;
1803 hw_dbg(hw, "Flow Control = NONE.\n");
1804 } 1868 }
1805
1806 /* Record that current_mode is the result of a successful autoneg */ 1869 /* Record that current_mode is the result of a successful autoneg */
1807 hw->fc.fc_was_autonegged = true; 1870 hw->fc.fc_was_autonegged = true;
1808 1871
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1919#endif /* CONFIG_DCB */ 1982#endif /* CONFIG_DCB */
1920 default: 1983 default:
1921 hw_dbg(hw, "Flow control param set incorrectly\n"); 1984 hw_dbg(hw, "Flow control param set incorrectly\n");
1922 ret_val = -IXGBE_ERR_CONFIG; 1985 ret_val = IXGBE_ERR_CONFIG;
1923 goto out; 1986 goto out;
1924 break; 1987 break;
1925 } 1988 }
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1927 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 1990 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1928 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 1991 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1929 1992
1930 /* Enable and restart autoneg to inform the link partner */
1931 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1932
1933 /* Disable AN timeout */ 1993 /* Disable AN timeout */
1934 if (hw->fc.strict_ieee) 1994 if (hw->fc.strict_ieee)
1935 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 1995 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 1997 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1938 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 1998 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
1939 1999
2000 /*
2001 * Set up the 10G flow control advertisement registers so the HW
2002 * can do fc autoneg once the cable is plugged in. If we end up
2003 * using 1g instead, this is harmless.
2004 */
2005 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2006
2007 /*
2008 * The possible values of fc.requested_mode are:
2009 * 0: Flow control is completely disabled
2010 * 1: Rx flow control is enabled (we can receive pause frames,
2011 * but not send pause frames).
2012 * 2: Tx flow control is enabled (we can send pause frames but
2013 * we do not support receiving pause frames).
2014 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2015 * other: Invalid.
2016 */
2017 switch (hw->fc.requested_mode) {
2018 case ixgbe_fc_none:
2019 /* Flow control completely disabled by software override. */
2020 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2021 break;
2022 case ixgbe_fc_rx_pause:
2023 /*
2024 * Rx Flow control is enabled and Tx Flow control is
2025 * disabled by software override. Since there really
2026 * isn't a way to advertise that we are capable of RX
2027 * Pause ONLY, we will advertise that we support both
2028 * symmetric and asymmetric Rx PAUSE. Later, we will
2029 * disable the adapter's ability to send PAUSE frames.
2030 */
2031 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2032 break;
2033 case ixgbe_fc_tx_pause:
2034 /*
2035 * Tx Flow control is enabled, and Rx Flow control is
2036 * disabled by software override.
2037 */
2038 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2039 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2040 break;
2041 case ixgbe_fc_full:
2042 /* Flow control (both Rx and Tx) is enabled by SW override. */
2043 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2044 break;
2045#ifdef CONFIG_DCB
2046 case ixgbe_fc_pfc:
2047 goto out;
2048 break;
2049#endif /* CONFIG_DCB */
2050 default:
2051 hw_dbg(hw, "Flow control param set incorrectly\n");
2052 ret_val = IXGBE_ERR_CONFIG;
2053 goto out;
2054 break;
2055 }
2056 /*
2057 * AUTOC restart handles negotiation of 1G and 10G. There is
2058 * no need to set the PCS1GCTL register.
2059 */
2060 reg |= IXGBE_AUTOC_AN_RESTART;
2061 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2062 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2063
1940out: 2064out:
1941 return ret_val; 2065 return ret_val;
1942} 2066}
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2000 2124
2001 while (timeout) { 2125 while (timeout) {
2002 if (ixgbe_get_eeprom_semaphore(hw)) 2126 if (ixgbe_get_eeprom_semaphore(hw))
2003 return -IXGBE_ERR_SWFW_SYNC; 2127 return IXGBE_ERR_SWFW_SYNC;
2004 2128
2005 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2129 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2006 if (!(gssr & (fwmask | swmask))) 2130 if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2017 2141
2018 if (!timeout) { 2142 if (!timeout) {
2019 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2143 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
2020 return -IXGBE_ERR_SWFW_SYNC; 2144 return IXGBE_ERR_SWFW_SYNC;
2021 } 2145 }
2022 2146
2023 gssr |= swmask; 2147 gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 53b0a6680254..fa314cb005a4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
59 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
56 {"lsc_int", IXGBE_STAT(lsc_int)}, 60 {"lsc_int", IXGBE_STAT(lsc_int)},
57 {"tx_busy", IXGBE_STAT(tx_busy)}, 61 {"tx_busy", IXGBE_STAT(tx_busy)},
58 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c407bd9de0dd..28fbb9d281f9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
49static const char ixgbe_driver_string[] = 49static const char ixgbe_driver_string[] =
50 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 51
52#define DRV_VERSION "2.0.37-k2" 52#define DRV_VERSION "2.0.44-k2"
53const char ixgbe_driver_version[] = DRV_VERSION; 53const char ixgbe_driver_version[] = DRV_VERSION;
54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
55 55
@@ -1885,12 +1885,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1886 adapter->tx_ring[i].head = IXGBE_TDH(j); 1886 adapter->tx_ring[i].head = IXGBE_TDH(j);
1887 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1887 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1888 /* Disable Tx Head Writeback RO bit, since this hoses 1888 /*
1889 * Disable Tx Head Writeback RO bit, since this hoses
1889 * bookkeeping if things aren't delivered in order. 1890 * bookkeeping if things aren't delivered in order.
1890 */ 1891 */
1891 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 1892 switch (hw->mac.type) {
1893 case ixgbe_mac_82598EB:
1894 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1895 break;
1896 case ixgbe_mac_82599EB:
1897 default:
1898 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1899 break;
1900 }
1892 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1901 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1893 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1902 switch (hw->mac.type) {
1903 case ixgbe_mac_82598EB:
1904 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1905 break;
1906 case ixgbe_mac_82599EB:
1907 default:
1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1909 break;
1910 }
1894 } 1911 }
1895 if (hw->mac.type == ixgbe_mac_82599EB) { 1912 if (hw->mac.type == ixgbe_mac_82599EB) {
1896 /* We enable 8 traffic classes, DCB only */ 1913 /* We enable 8 traffic classes, DCB only */
@@ -4432,10 +4449,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4432 4449
4433 /* 82598 hardware only has a 32 bit counter in the high register */ 4450 /* 82598 hardware only has a 32 bit counter in the high register */
4434 if (hw->mac.type == ixgbe_mac_82599EB) { 4451 if (hw->mac.type == ixgbe_mac_82599EB) {
4452 u64 tmp;
4435 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 4453 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4436 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 4454 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4455 adapter->stats.gorc += (tmp << 32);
4437 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 4456 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4438 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 4457 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4458 adapter->stats.gotc += (tmp << 32);
4439 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 4459 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4440 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4460 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4441 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4461 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5071,7 +5091,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5071 /* Right now, we support IPv4 only */ 5091 /* Right now, we support IPv4 only */
5072 struct ixgbe_atr_input atr_input; 5092 struct ixgbe_atr_input atr_input;
5073 struct tcphdr *th; 5093 struct tcphdr *th;
5074 struct udphdr *uh;
5075 struct iphdr *iph = ip_hdr(skb); 5094 struct iphdr *iph = ip_hdr(skb);
5076 struct ethhdr *eth = (struct ethhdr *)skb->data; 5095 struct ethhdr *eth = (struct ethhdr *)skb->data;
5077 u16 vlan_id, src_port, dst_port, flex_bytes; 5096 u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5085,12 +5104,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5085 dst_port = th->dest; 5104 dst_port = th->dest;
5086 l4type |= IXGBE_ATR_L4TYPE_TCP; 5105 l4type |= IXGBE_ATR_L4TYPE_TCP;
5087 /* l4type IPv4 type is 0, no need to assign */ 5106 /* l4type IPv4 type is 0, no need to assign */
5088 } else if(iph->protocol == IPPROTO_UDP) {
5089 uh = udp_hdr(skb);
5090 src_port = uh->source;
5091 dst_port = uh->dest;
5092 l4type |= IXGBE_ATR_L4TYPE_UDP;
5093 /* l4type IPv4 type is 0, no need to assign */
5094 } else { 5107 } else {
5095 /* Unsupported L4 header, just bail here */ 5108 /* Unsupported L4 header, just bail here */
5096 return; 5109 return;
@@ -5494,12 +5507,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5494 goto err_pci_reg; 5507 goto err_pci_reg;
5495 } 5508 }
5496 5509
5497 err = pci_enable_pcie_error_reporting(pdev); 5510 pci_enable_pcie_error_reporting(pdev);
5498 if (err) {
5499 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5500 "0x%x\n", err);
5501 /* non-fatal, continue */
5502 }
5503 5511
5504 pci_set_master(pdev); 5512 pci_set_master(pdev);
5505 pci_save_state(pdev); 5513 pci_save_state(pdev);
@@ -5808,7 +5816,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5808{ 5816{
5809 struct net_device *netdev = pci_get_drvdata(pdev); 5817 struct net_device *netdev = pci_get_drvdata(pdev);
5810 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5818 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5811 int err;
5812 5819
5813 set_bit(__IXGBE_DOWN, &adapter->state); 5820 set_bit(__IXGBE_DOWN, &adapter->state);
5814 /* clear the module not found bit to make sure the worker won't 5821 /* clear the module not found bit to make sure the worker won't
@@ -5859,10 +5866,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5859 5866
5860 free_netdev(netdev); 5867 free_netdev(netdev);
5861 5868
5862 err = pci_disable_pcie_error_reporting(pdev); 5869 pci_disable_pcie_error_reporting(pdev);
5863 if (err)
5864 dev_err(&pdev->dev,
5865 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5866 5870
5867 pci_disable_device(pdev); 5871 pci_disable_device(pdev);
5868} 5872}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d7899f7d..7c93e923bf2e 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1336,6 +1336,8 @@
1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000 1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000
1337#define IXGBE_AUTOC_KX_SUPP 0x40000000 1337#define IXGBE_AUTOC_KX_SUPP 0x40000000
1338#define IXGBE_AUTOC_PAUSE 0x30000000 1338#define IXGBE_AUTOC_PAUSE 0x30000000
1339#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
1340#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
1339#define IXGBE_AUTOC_RF 0x08000000 1341#define IXGBE_AUTOC_RF 0x08000000
1340#define IXGBE_AUTOC_PD_TMR 0x06000000 1342#define IXGBE_AUTOC_PD_TMR 0x06000000
1341#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 1343#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1406,8 @@
1404#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 1406#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
1405#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 1407#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
1406 1408
1409#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
1410
1407/* PCS1GLSTA Bit Masks */ 1411/* PCS1GLSTA Bit Masks */
1408#define IXGBE_PCS1GLSTA_LINK_OK 1 1412#define IXGBE_PCS1GLSTA_LINK_OK 1
1409#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 1413#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1428,11 @@
1424#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 1428#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
1425#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 1429#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
1426 1430
1431/* ANLP1 Bit Masks */
1432#define IXGBE_ANLP1_PAUSE 0x0C00
1433#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1434#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1435
1427/* SW Semaphore Register bitmasks */ 1436/* SW Semaphore Register bitmasks */
1428#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1437#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1429#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1438#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644
index 000000000000..0be14d702beb
--- /dev/null
+++ b/drivers/net/ks8851_mll.c
@@ -0,0 +1,1697 @@
1/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/cache.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/platform_device.h>
33#include <linux/delay.h>
34
35#define DRV_NAME "ks8851_mll"
36
37static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
38#define MAX_RECV_FRAMES 32
39#define MAX_BUF_SIZE 2048
40#define TX_BUF_SIZE 2000
41#define RX_BUF_SIZE 2000
42
43#define KS_CCR 0x08
44#define CCR_EEPROM (1 << 9)
45#define CCR_SPI (1 << 8)
46#define CCR_8BIT (1 << 7)
47#define CCR_16BIT (1 << 6)
48#define CCR_32BIT (1 << 5)
49#define CCR_SHARED (1 << 4)
50#define CCR_32PIN (1 << 0)
51
52/* MAC address registers */
53#define KS_MARL 0x10
54#define KS_MARM 0x12
55#define KS_MARH 0x14
56
57#define KS_OBCR 0x20
58#define OBCR_ODS_16MA (1 << 6)
59
60#define KS_EEPCR 0x22
61#define EEPCR_EESA (1 << 4)
62#define EEPCR_EESB (1 << 3)
63#define EEPCR_EEDO (1 << 2)
64#define EEPCR_EESCK (1 << 1)
65#define EEPCR_EECS (1 << 0)
66
67#define KS_MBIR 0x24
68#define MBIR_TXMBF (1 << 12)
69#define MBIR_TXMBFA (1 << 11)
70#define MBIR_RXMBF (1 << 4)
71#define MBIR_RXMBFA (1 << 3)
72
73#define KS_GRR 0x26
74#define GRR_QMU (1 << 1)
75#define GRR_GSR (1 << 0)
76
77#define KS_WFCR 0x2A
78#define WFCR_MPRXE (1 << 7)
79#define WFCR_WF3E (1 << 3)
80#define WFCR_WF2E (1 << 2)
81#define WFCR_WF1E (1 << 1)
82#define WFCR_WF0E (1 << 0)
83
84#define KS_WF0CRC0 0x30
85#define KS_WF0CRC1 0x32
86#define KS_WF0BM0 0x34
87#define KS_WF0BM1 0x36
88#define KS_WF0BM2 0x38
89#define KS_WF0BM3 0x3A
90
91#define KS_WF1CRC0 0x40
92#define KS_WF1CRC1 0x42
93#define KS_WF1BM0 0x44
94#define KS_WF1BM1 0x46
95#define KS_WF1BM2 0x48
96#define KS_WF1BM3 0x4A
97
98#define KS_WF2CRC0 0x50
99#define KS_WF2CRC1 0x52
100#define KS_WF2BM0 0x54
101#define KS_WF2BM1 0x56
102#define KS_WF2BM2 0x58
103#define KS_WF2BM3 0x5A
104
105#define KS_WF3CRC0 0x60
106#define KS_WF3CRC1 0x62
107#define KS_WF3BM0 0x64
108#define KS_WF3BM1 0x66
109#define KS_WF3BM2 0x68
110#define KS_WF3BM3 0x6A
111
112#define KS_TXCR 0x70
113#define TXCR_TCGICMP (1 << 8)
114#define TXCR_TCGUDP (1 << 7)
115#define TXCR_TCGTCP (1 << 6)
116#define TXCR_TCGIP (1 << 5)
117#define TXCR_FTXQ (1 << 4)
118#define TXCR_TXFCE (1 << 3)
119#define TXCR_TXPE (1 << 2)
120#define TXCR_TXCRC (1 << 1)
121#define TXCR_TXE (1 << 0)
122
123#define KS_TXSR 0x72
124#define TXSR_TXLC (1 << 13)
125#define TXSR_TXMC (1 << 12)
126#define TXSR_TXFID_MASK (0x3f << 0)
127#define TXSR_TXFID_SHIFT (0)
128#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
129
130
131#define KS_RXCR1 0x74
132#define RXCR1_FRXQ (1 << 15)
133#define RXCR1_RXUDPFCC (1 << 14)
134#define RXCR1_RXTCPFCC (1 << 13)
135#define RXCR1_RXIPFCC (1 << 12)
136#define RXCR1_RXPAFMA (1 << 11)
137#define RXCR1_RXFCE (1 << 10)
138#define RXCR1_RXEFE (1 << 9)
139#define RXCR1_RXMAFMA (1 << 8)
140#define RXCR1_RXBE (1 << 7)
141#define RXCR1_RXME (1 << 6)
142#define RXCR1_RXUE (1 << 5)
143#define RXCR1_RXAE (1 << 4)
144#define RXCR1_RXINVF (1 << 1)
145#define RXCR1_RXE (1 << 0)
146#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
147 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
148
149#define KS_RXCR2 0x76
150#define RXCR2_SRDBL_MASK (0x7 << 5)
151#define RXCR2_SRDBL_SHIFT (5)
152#define RXCR2_SRDBL_4B (0x0 << 5)
153#define RXCR2_SRDBL_8B (0x1 << 5)
154#define RXCR2_SRDBL_16B (0x2 << 5)
155#define RXCR2_SRDBL_32B (0x3 << 5)
156/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
157#define RXCR2_IUFFP (1 << 4)
158#define RXCR2_RXIUFCEZ (1 << 3)
159#define RXCR2_UDPLFE (1 << 2)
160#define RXCR2_RXICMPFCC (1 << 1)
161#define RXCR2_RXSAF (1 << 0)
162
163#define KS_TXMIR 0x78
164
165#define KS_RXFHSR 0x7C
166#define RXFSHR_RXFV (1 << 15)
167#define RXFSHR_RXICMPFCS (1 << 13)
168#define RXFSHR_RXIPFCS (1 << 12)
169#define RXFSHR_RXTCPFCS (1 << 11)
170#define RXFSHR_RXUDPFCS (1 << 10)
171#define RXFSHR_RXBF (1 << 7)
172#define RXFSHR_RXMF (1 << 6)
173#define RXFSHR_RXUF (1 << 5)
174#define RXFSHR_RXMR (1 << 4)
175#define RXFSHR_RXFT (1 << 3)
176#define RXFSHR_RXFTL (1 << 2)
177#define RXFSHR_RXRF (1 << 1)
178#define RXFSHR_RXCE (1 << 0)
179#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
180 RXFSHR_RXFTL | RXFSHR_RXMR |\
181 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
182 RXFSHR_RXTCPFCS)
183#define KS_RXFHBCR 0x7E
184#define RXFHBCR_CNT_MASK 0x0FFF
185
186#define KS_TXQCR 0x80
187#define TXQCR_AETFE (1 << 2)
188#define TXQCR_TXQMAM (1 << 1)
189#define TXQCR_METFE (1 << 0)
190
191#define KS_RXQCR 0x82
192#define RXQCR_RXDTTS (1 << 12)
193#define RXQCR_RXDBCTS (1 << 11)
194#define RXQCR_RXFCTS (1 << 10)
195#define RXQCR_RXIPHTOE (1 << 9)
196#define RXQCR_RXDTTE (1 << 7)
197#define RXQCR_RXDBCTE (1 << 6)
198#define RXQCR_RXFCTE (1 << 5)
199#define RXQCR_ADRFE (1 << 4)
200#define RXQCR_SDA (1 << 3)
201#define RXQCR_RRXEF (1 << 0)
202#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
203
204#define KS_TXFDPR 0x84
205#define TXFDPR_TXFPAI (1 << 14)
206#define TXFDPR_TXFP_MASK (0x7ff << 0)
207#define TXFDPR_TXFP_SHIFT (0)
208
209#define KS_RXFDPR 0x86
210#define RXFDPR_RXFPAI (1 << 14)
211
212#define KS_RXDTTR 0x8C
213#define KS_RXDBCTR 0x8E
214
215#define KS_IER 0x90
216#define KS_ISR 0x92
217#define IRQ_LCI (1 << 15)
218#define IRQ_TXI (1 << 14)
219#define IRQ_RXI (1 << 13)
220#define IRQ_RXOI (1 << 11)
221#define IRQ_TXPSI (1 << 9)
222#define IRQ_RXPSI (1 << 8)
223#define IRQ_TXSAI (1 << 6)
224#define IRQ_RXWFDI (1 << 5)
225#define IRQ_RXMPDI (1 << 4)
226#define IRQ_LDI (1 << 3)
227#define IRQ_EDI (1 << 2)
228#define IRQ_SPIBEI (1 << 1)
229#define IRQ_DEDI (1 << 0)
230
231#define KS_RXFCTR 0x9C
232#define RXFCTR_THRESHOLD_MASK 0x00FF
233
234#define KS_RXFC 0x9D
235#define RXFCTR_RXFC_MASK (0xff << 8)
236#define RXFCTR_RXFC_SHIFT (8)
237#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
238#define RXFCTR_RXFCT_MASK (0xff << 0)
239#define RXFCTR_RXFCT_SHIFT (0)
240
241#define KS_TXNTFSR 0x9E
242
243#define KS_MAHTR0 0xA0
244#define KS_MAHTR1 0xA2
245#define KS_MAHTR2 0xA4
246#define KS_MAHTR3 0xA6
247
248#define KS_FCLWR 0xB0
249#define KS_FCHWR 0xB2
250#define KS_FCOWR 0xB4
251
252#define KS_CIDER 0xC0
253#define CIDER_ID 0x8870
254#define CIDER_REV_MASK (0x7 << 1)
255#define CIDER_REV_SHIFT (1)
256#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
257
258#define KS_CGCR 0xC6
259#define KS_IACR 0xC8
260#define IACR_RDEN (1 << 12)
261#define IACR_TSEL_MASK (0x3 << 10)
262#define IACR_TSEL_SHIFT (10)
263#define IACR_TSEL_MIB (0x3 << 10)
264#define IACR_ADDR_MASK (0x1f << 0)
265#define IACR_ADDR_SHIFT (0)
266
267#define KS_IADLR 0xD0
268#define KS_IAHDR 0xD2
269
270#define KS_PMECR 0xD4
271#define PMECR_PME_DELAY (1 << 14)
272#define PMECR_PME_POL (1 << 12)
273#define PMECR_WOL_WAKEUP (1 << 11)
274#define PMECR_WOL_MAGICPKT (1 << 10)
275#define PMECR_WOL_LINKUP (1 << 9)
276#define PMECR_WOL_ENERGY (1 << 8)
277#define PMECR_AUTO_WAKE_EN (1 << 7)
278#define PMECR_WAKEUP_NORMAL (1 << 6)
279#define PMECR_WKEVT_MASK (0xf << 2)
280#define PMECR_WKEVT_SHIFT (2)
281#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
282#define PMECR_WKEVT_ENERGY (0x1 << 2)
283#define PMECR_WKEVT_LINK (0x2 << 2)
284#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
285#define PMECR_WKEVT_FRAME (0x8 << 2)
286#define PMECR_PM_MASK (0x3 << 0)
287#define PMECR_PM_SHIFT (0)
288#define PMECR_PM_NORMAL (0x0 << 0)
289#define PMECR_PM_ENERGY (0x1 << 0)
290#define PMECR_PM_SOFTDOWN (0x2 << 0)
291#define PMECR_PM_POWERSAVE (0x3 << 0)
292
293/* Standard MII PHY data */
294#define KS_P1MBCR 0xE4
295#define P1MBCR_FORCE_FDX (1 << 8)
296
297#define KS_P1MBSR 0xE6
298#define P1MBSR_AN_COMPLETE (1 << 5)
299#define P1MBSR_AN_CAPABLE (1 << 3)
300#define P1MBSR_LINK_UP (1 << 2)
301
302#define KS_PHY1ILR 0xE8
303#define KS_PHY1IHR 0xEA
304#define KS_P1ANAR 0xEC
305#define KS_P1ANLPR 0xEE
306
307#define KS_P1SCLMD 0xF4
308#define P1SCLMD_LEDOFF (1 << 15)
309#define P1SCLMD_TXIDS (1 << 14)
310#define P1SCLMD_RESTARTAN (1 << 13)
311#define P1SCLMD_DISAUTOMDIX (1 << 10)
312#define P1SCLMD_FORCEMDIX (1 << 9)
313#define P1SCLMD_AUTONEGEN (1 << 7)
314#define P1SCLMD_FORCE100 (1 << 6)
315#define P1SCLMD_FORCEFDX (1 << 5)
316#define P1SCLMD_ADV_FLOW (1 << 4)
317#define P1SCLMD_ADV_100BT_FDX (1 << 3)
318#define P1SCLMD_ADV_100BT_HDX (1 << 2)
319#define P1SCLMD_ADV_10BT_FDX (1 << 1)
320#define P1SCLMD_ADV_10BT_HDX (1 << 0)
321
322#define KS_P1CR 0xF6
323#define P1CR_HP_MDIX (1 << 15)
324#define P1CR_REV_POL (1 << 13)
325#define P1CR_OP_100M (1 << 10)
326#define P1CR_OP_FDX (1 << 9)
327#define P1CR_OP_MDI (1 << 7)
328#define P1CR_AN_DONE (1 << 6)
329#define P1CR_LINK_GOOD (1 << 5)
330#define P1CR_PNTR_FLOW (1 << 4)
331#define P1CR_PNTR_100BT_FDX (1 << 3)
332#define P1CR_PNTR_100BT_HDX (1 << 2)
333#define P1CR_PNTR_10BT_FDX (1 << 1)
334#define P1CR_PNTR_10BT_HDX (1 << 0)
335
336/* TX Frame control */
337
338#define TXFR_TXIC (1 << 15)
339#define TXFR_TXFID_MASK (0x3f << 0)
340#define TXFR_TXFID_SHIFT (0)
341
342#define KS_P1SR 0xF8
343#define P1SR_HP_MDIX (1 << 15)
344#define P1SR_REV_POL (1 << 13)
345#define P1SR_OP_100M (1 << 10)
346#define P1SR_OP_FDX (1 << 9)
347#define P1SR_OP_MDI (1 << 7)
348#define P1SR_AN_DONE (1 << 6)
349#define P1SR_LINK_GOOD (1 << 5)
350#define P1SR_PNTR_FLOW (1 << 4)
351#define P1SR_PNTR_100BT_FDX (1 << 3)
352#define P1SR_PNTR_100BT_HDX (1 << 2)
353#define P1SR_PNTR_10BT_FDX (1 << 1)
354#define P1SR_PNTR_10BT_HDX (1 << 0)
355
356#define ENUM_BUS_NONE 0
357#define ENUM_BUS_8BIT 1
358#define ENUM_BUS_16BIT 2
359#define ENUM_BUS_32BIT 3
360
361#define MAX_MCAST_LST 32
362#define HW_MCAST_SIZE 8
363#define MAC_ADDR_LEN 6
364
365/**
366 * union ks_tx_hdr - tx header data
367 * @txb: The header as bytes
368 * @txw: The header as 16bit, little-endian words
369 *
370 * A dual representation of the tx header data to allow
371 * access to individual bytes, and to allow 16bit accesses
372 * with 16bit alignment.
373 */
374union ks_tx_hdr {
375 u8 txb[4];
376 __le16 txw[2];
377};
378
379/**
380 * struct ks_net - KS8851 driver private data
381 * @net_device : The network device we're bound to
382 * @hw_addr : start address of data register.
383 * @hw_addr_cmd : start address of command register.
384 * @txh : temporaly buffer to save status/length.
385 * @lock : Lock to ensure that the device is not accessed when busy.
386 * @pdev : Pointer to platform device.
387 * @mii : The MII state information for the mii calls.
388 * @frame_head_info : frame header information for multi-pkt rx.
389 * @statelock : Lock on this structure for tx list.
390 * @msg_enable : The message flags controlling driver output (see ethtool).
391 * @frame_cnt : number of frames received.
392 * @bus_width : i/o bus width.
393 * @irq : irq number assigned to this device.
394 * @rc_rxqcr : Cached copy of KS_RXQCR.
395 * @rc_txcr : Cached copy of KS_TXCR.
396 * @rc_ier : Cached copy of KS_IER.
397 * @sharedbus : Multipex(addr and data bus) mode indicator.
398 * @cmd_reg_cache : command register cached.
399 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
400 * @promiscuous : promiscuous mode indicator.
401 * @all_mcast : mutlicast indicator.
402 * @mcast_lst_size : size of multicast list.
403 * @mcast_lst : multicast list.
404 * @mcast_bits : multicast enabed.
405 * @mac_addr : MAC address assigned to this device.
406 * @fid : frame id.
407 * @extra_byte : number of extra byte prepended rx pkt.
408 * @enabled : indicator this device works.
409 *
410 * The @lock ensures that the chip is protected when certain operations are
411 * in progress. When the read or write packet transfer is in progress, most
412 * of the chip registers are not accessible until the transfer is finished and
413 * the DMA has been de-asserted.
414 *
415 * The @statelock is used to protect information in the structure which may
416 * need to be accessed via several sources, such as the network driver layer
417 * or one of the work queues.
418 *
419 */
420
421/* Receive multiplex framer header info */
422struct type_frame_head {
423 u16 sts; /* Frame status */
424 u16 len; /* Byte count */
425};
426
427struct ks_net {
428 struct net_device *netdev;
429 void __iomem *hw_addr;
430 void __iomem *hw_addr_cmd;
431 union ks_tx_hdr txh ____cacheline_aligned;
432 struct mutex lock; /* spinlock to be interrupt safe */
433 struct platform_device *pdev;
434 struct mii_if_info mii;
435 struct type_frame_head *frame_head_info;
436 spinlock_t statelock;
437 u32 msg_enable;
438 u32 frame_cnt;
439 int bus_width;
440 int irq;
441
442 u16 rc_rxqcr;
443 u16 rc_txcr;
444 u16 rc_ier;
445 u16 sharedbus;
446 u16 cmd_reg_cache;
447 u16 cmd_reg_cache_int;
448 u16 promiscuous;
449 u16 all_mcast;
450 u16 mcast_lst_size;
451 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
452 u8 mcast_bits[HW_MCAST_SIZE];
453 u8 mac_addr[6];
454 u8 fid;
455 u8 extra_byte;
456 u8 enabled;
457};
458
459static int msg_enable;
460
461#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
462#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
463#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
464#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
465
466#define BE3 0x8000 /* Byte Enable 3 */
467#define BE2 0x4000 /* Byte Enable 2 */
468#define BE1 0x2000 /* Byte Enable 1 */
469#define BE0 0x1000 /* Byte Enable 0 */
470
471/**
472 * register read/write calls.
473 *
474 * All these calls issue transactions to access the chip's registers. They
475 * all require that the necessary lock is held to prevent accesses when the
476 * chip is busy transfering packet data (RX/TX FIFO accesses).
477 */
478
479/**
480 * ks_rdreg8 - read 8 bit register from device
481 * @ks : The chip information
482 * @offset: The register address
483 *
484 * Read a 8bit register from the chip, returning the result
485 */
486static u8 ks_rdreg8(struct ks_net *ks, int offset)
487{
488 u16 data;
489 u8 shift_bit = offset & 0x03;
490 u8 shift_data = (offset & 1) << 3;
491 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
492 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
493 data = ioread16(ks->hw_addr);
494 return (u8)(data >> shift_data);
495}
496
497/**
498 * ks_rdreg16 - read 16 bit register from device
499 * @ks : The chip information
500 * @offset: The register address
501 *
502 * Read a 16bit register from the chip, returning the result
503 */
504
505static u16 ks_rdreg16(struct ks_net *ks, int offset)
506{
507 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
508 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
509 return ioread16(ks->hw_addr);
510}
511
512/**
513 * ks_wrreg8 - write 8bit register value to chip
514 * @ks: The chip information
515 * @offset: The register address
516 * @value: The value to write
517 *
518 */
519static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
520{
521 u8 shift_bit = (offset & 0x03);
522 u16 value_write = (u16)(value << ((offset & 1) << 3));
523 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
524 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
525 iowrite16(value_write, ks->hw_addr);
526}
527
528/**
529 * ks_wrreg16 - write 16bit register value to chip
530 * @ks: The chip information
531 * @offset: The register address
532 * @value: The value to write
533 *
534 */
535
536static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
537{
538 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
539 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
540 iowrite16(value, ks->hw_addr);
541}
542
543/**
544 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
545 * @ks: The chip state
546 * @wptr: buffer address to save data
547 * @len: length in byte to read
548 *
549 */
550static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
551{
552 len >>= 1;
553 while (len--)
554 *wptr++ = (u16)ioread16(ks->hw_addr);
555}
556
557/**
558 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
559 * @ks: The chip information
560 * @wptr: buffer address
561 * @len: length in byte to write
562 *
563 */
564static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
565{
566 len >>= 1;
567 while (len--)
568 iowrite16(*wptr++, ks->hw_addr);
569}
570
571/**
572 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information
574 *
575 */
576static inline u16 ks_tx_fifo_space(struct ks_net *ks)
577{
578 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
579}
580
581/**
582 * ks_save_cmd_reg - save the command register from the cache.
583 * @ks: The chip information
584 *
585 */
586static inline void ks_save_cmd_reg(struct ks_net *ks)
587{
588 /*ks8851 MLL has a bug to read back the command register.
589 * So rely on software to save the content of command register.
590 */
591 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
592}
593
594/**
595 * ks_restore_cmd_reg - restore the command register from the cache and
596 * write to hardware register.
597 * @ks: The chip information
598 *
599 */
600static inline void ks_restore_cmd_reg(struct ks_net *ks)
601{
602 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
603 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
604}
605
606/**
607 * ks_set_powermode - set power mode of the device
608 * @ks: The chip information
609 * @pwrmode: The power mode value to write to KS_PMECR.
610 *
611 * Change the power mode of the chip.
612 */
613static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
614{
615 unsigned pmecr;
616
617 if (netif_msg_hw(ks))
618 ks_dbg(ks, "setting power mode %d\n", pwrmode);
619
620 ks_rdreg16(ks, KS_GRR);
621 pmecr = ks_rdreg16(ks, KS_PMECR);
622 pmecr &= ~PMECR_PM_MASK;
623 pmecr |= pwrmode;
624
625 ks_wrreg16(ks, KS_PMECR, pmecr);
626}
627
628/**
629 * ks_read_config - read chip configuration of bus width.
630 * @ks: The chip information
631 *
632 */
633static void ks_read_config(struct ks_net *ks)
634{
635 u16 reg_data = 0;
636
637 /* Regardless of bus width, 8 bit read should always work.*/
638 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
639 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
640
641 /* addr/data bus are multiplexed */
642 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
643
644 /* There are garbage data when reading data from QMU,
645 depending on bus-width.
646 */
647
648 if (reg_data & CCR_8BIT) {
649 ks->bus_width = ENUM_BUS_8BIT;
650 ks->extra_byte = 1;
651 } else if (reg_data & CCR_16BIT) {
652 ks->bus_width = ENUM_BUS_16BIT;
653 ks->extra_byte = 2;
654 } else {
655 ks->bus_width = ENUM_BUS_32BIT;
656 ks->extra_byte = 4;
657 }
658}
659
660/**
661 * ks_soft_reset - issue one of the soft reset to the device
662 * @ks: The device state.
663 * @op: The bit(s) to set in the GRR
664 *
665 * Issue the relevant soft-reset command to the device's GRR register
666 * specified by @op.
667 *
668 * Note, the delays are in there as a caution to ensure that the reset
669 * has time to take effect and then complete. Since the datasheet does
670 * not currently specify the exact sequence, we have chosen something
671 * that seems to work with our device.
672 */
673static void ks_soft_reset(struct ks_net *ks, unsigned op)
674{
675 /* Disable interrupt first */
676 ks_wrreg16(ks, KS_IER, 0x0000);
677 ks_wrreg16(ks, KS_GRR, op);
678 mdelay(10); /* wait a short time to effect reset */
679 ks_wrreg16(ks, KS_GRR, 0);
680 mdelay(1); /* wait for condition to clear */
681}
682
683
684/**
685 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information
687 * @buf: buffer address to save 1 pkt
688 * @len: Pkt length
689 * Here is the sequence to read 1 pkt:
690 * 1. set sudo DMA mode
691 * 2. read prepend data
692 * 3. read pkt data
693 * 4. reset sudo DMA Mode
694 */
695static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
696{
697 u32 r = ks->extra_byte & 0x1 ;
698 u32 w = ks->extra_byte - r;
699
700 /* 1. set sudo DMA mode */
701 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
702 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
703
704 /* 2. read prepend data */
705 /**
706 * read 4 + extra bytes and discard them.
707 * extra bytes for dummy, 2 for status, 2 for len
708 */
709
710 /* use likely(r) for 8 bit access for performance */
711 if (unlikely(r))
712 ioread8(ks->hw_addr);
713 ks_inblk(ks, buf, w + 2 + 2);
714
715 /* 3. read pkt data */
716 ks_inblk(ks, buf, ALIGN(len, 4));
717
718 /* 4. reset sudo DMA Mode */
719 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
720}
721
722/**
723 * ks_rcv - read multiple pkts data from the QMU.
724 * @ks: The chip information
725 * @netdev: The network device being opened.
726 *
727 * Read all of header information before reading pkt content.
728 * It is not allowed only port of pkts in QMU after issuing
729 * interrupt ack.
730 */
731static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
732{
733 u32 i;
734 struct type_frame_head *frame_hdr = ks->frame_head_info;
735 struct sk_buff *skb;
736
737 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
738
739 /* read all header information */
740 for (i = 0; i < ks->frame_cnt; i++) {
741 /* Checking Received packet status */
742 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
743 /* Get packet len from hardware */
744 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
745 frame_hdr++;
746 }
747
748 frame_hdr = ks->frame_head_info;
749 while (ks->frame_cnt--) {
750 skb = dev_alloc_skb(frame_hdr->len + 16);
751 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
756 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev);
759 netif_rx(skb);
760 } else {
761 printk(KERN_ERR "%s: err:skb alloc\n", __func__);
762 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
763 if (skb)
764 dev_kfree_skb_irq(skb);
765 }
766 frame_hdr++;
767 }
768}
769
770/**
771 * ks_update_link_status - link status update.
772 * @netdev: The network device being opened.
773 * @ks: The chip information
774 *
775 */
776
777static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
778{
779 /* check the status of the link */
780 u32 link_up_status;
781 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
782 netif_carrier_on(netdev);
783 link_up_status = true;
784 } else {
785 netif_carrier_off(netdev);
786 link_up_status = false;
787 }
788 if (netif_msg_link(ks))
789 ks_dbg(ks, "%s: %s\n",
790 __func__, link_up_status ? "UP" : "DOWN");
791}
792
793/**
794 * ks_irq - device interrupt handler
795 * @irq: Interrupt number passed from the IRQ hnalder.
796 * @pw: The private word passed to register_irq(), our struct ks_net.
797 *
798 * This is the handler invoked to find out what happened
799 *
800 * Read the interrupt status, work out what needs to be done and then clear
801 * any of the interrupts that are not needed.
802 */
803
804static irqreturn_t ks_irq(int irq, void *pw)
805{
806 struct ks_net *ks = pw;
807 struct net_device *netdev = ks->netdev;
808 u16 status;
809
810 /*this should be the first in IRQ handler */
811 ks_save_cmd_reg(ks);
812
813 status = ks_rdreg16(ks, KS_ISR);
814 if (unlikely(!status)) {
815 ks_restore_cmd_reg(ks);
816 return IRQ_NONE;
817 }
818
819 ks_wrreg16(ks, KS_ISR, status);
820
821 if (likely(status & IRQ_RXI))
822 ks_rcv(ks, netdev);
823
824 if (unlikely(status & IRQ_LCI))
825 ks_update_link_status(netdev, ks);
826
827 if (unlikely(status & IRQ_TXI))
828 netif_wake_queue(netdev);
829
830 if (unlikely(status & IRQ_LDI)) {
831
832 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
833 pmecr &= ~PMECR_WKEVT_MASK;
834 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
835 }
836
837 /* this should be the last in IRQ handler*/
838 ks_restore_cmd_reg(ks);
839 return IRQ_HANDLED;
840}
841
842
843/**
844 * ks_net_open - open network device
845 * @netdev: The network device being opened.
846 *
847 * Called when the network device is marked active, such as a user executing
848 * 'ifconfig up' on the device.
849 */
850static int ks_net_open(struct net_device *netdev)
851{
852 struct ks_net *ks = netdev_priv(netdev);
853 int err;
854
855#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
856 /* lock the card, even if we may not actually do anything
857 * else at the moment.
858 */
859
860 if (netif_msg_ifup(ks))
861 ks_dbg(ks, "%s - entry\n", __func__);
862
863 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
865
866 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
868 ks->irq, err);
869 return err;
870 }
871
872 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name);
874
875 return 0;
876}
877
878/**
879 * ks_net_stop - close network device
880 * @netdev: The device being closed.
881 *
882 * Called to close down a network device which has been active. Cancell any
883 * work, shutdown the RX and TX process and then place the chip into a low
884 * power state whilst it is not being used.
885 */
886static int ks_net_stop(struct net_device *netdev)
887{
888 struct ks_net *ks = netdev_priv(netdev);
889
890 if (netif_msg_ifdown(ks))
891 ks_info(ks, "%s: shutting down\n", netdev->name);
892
893 netif_stop_queue(netdev);
894
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock);
898
899 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff);
902
903 /* shutdown RX process */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908
909 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 free_irq(ks->irq, netdev);
912 mutex_unlock(&ks->lock);
913 return 0;
914}
915
916
917/**
918 * ks_write_qmu - write 1 pkt data to the QMU.
919 * @ks: The chip information
920 * @pdata: buffer address to save 1 pkt
921 * @len: Pkt length in byte
922 * Here is the sequence to write 1 pkt:
923 * 1. set sudo DMA mode
924 * 2. write status/length
925 * 3. write pkt data
926 * 4. reset sudo DMA Mode
927 * 5. reset sudo DMA mode
928 * 6. Wait until pkt is out
929 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid);
943 ks->txh.txw[1] = cpu_to_le16(len);
944
945 /* 1. set sudo-DMA mode */
946 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
947 /* 2. write status/lenth info */
948 ks_outblk(ks, ks->txh.txw, 4);
949 /* 3. write pkt data */
950 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
951 /* 4. reset sudo-DMA mode */
952 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
953 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
954 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
955 /* 6. wait until TXQCR_METFE is auto-cleared */
956 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
957 ;
958}
959
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/**
971 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit
973 * @netdev : The device used to transmit the packet.
974 *
975 * Called by the network layer to transmit the @skb.
976 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
977 * So while tx is in-progress, prevent IRQ interrupt from happenning.
978 */
979static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
980{
981 int retv = NETDEV_TX_OK;
982 struct ks_net *ks = netdev_priv(netdev);
983
984 disable_irq(netdev->irq);
985 ks_disable_int(ks);
986 spin_lock(&ks->statelock);
987
988 /* Extra space are required:
989 * 4 byte for alignment, 4 for status/length, 4 for CRC
990 */
991
992 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
993 ks_write_qmu(ks, skb->data, skb->len);
994 dev_kfree_skb(skb);
995 } else
996 retv = NETDEV_TX_BUSY;
997 spin_unlock(&ks->statelock);
998 ks_enable_int(ks);
999 enable_irq(netdev->irq);
1000 return retv;
1001}
1002
1003/**
1004 * ks_start_rx - ready to serve pkts
1005 * @ks : The chip information
1006 *
1007 */
1008static void ks_start_rx(struct ks_net *ks)
1009{
1010 u16 cntl;
1011
1012 /* Enables QMU Receive (RXCR1). */
1013 cntl = ks_rdreg16(ks, KS_RXCR1);
1014 cntl |= RXCR1_RXE ;
1015 ks_wrreg16(ks, KS_RXCR1, cntl);
1016} /* ks_start_rx */
1017
1018/**
1019 * ks_stop_rx - stop to serve pkts
1020 * @ks : The chip information
1021 *
1022 */
1023static void ks_stop_rx(struct ks_net *ks)
1024{
1025 u16 cntl;
1026
1027 /* Disables QMU Receive (RXCR1). */
1028 cntl = ks_rdreg16(ks, KS_RXCR1);
1029 cntl &= ~RXCR1_RXE ;
1030 ks_wrreg16(ks, KS_RXCR1, cntl);
1031
1032} /* ks_stop_rx */
1033
1034static unsigned long const ethernet_polynomial = 0x04c11db7U;
1035
1036static unsigned long ether_gen_crc(int length, u8 *data)
1037{
1038 long crc = -1;
1039 while (--length >= 0) {
1040 u8 current_octet = *data++;
1041 int bit;
1042
1043 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1044 crc = (crc << 1) ^
1045 ((crc < 0) ^ (current_octet & 1) ?
1046 ethernet_polynomial : 0);
1047 }
1048 }
1049 return (unsigned long)crc;
1050} /* ether_gen_crc */
1051
1052/**
1053* ks_set_grpaddr - set multicast information
1054* @ks : The chip information
1055*/
1056
1057static void ks_set_grpaddr(struct ks_net *ks)
1058{
1059 u8 i;
1060 u32 index, position, value;
1061
1062 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1063
1064 for (i = 0; i < ks->mcast_lst_size; i++) {
1065 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1066 index = position >> 3;
1067 value = 1 << (position & 7);
1068 ks->mcast_bits[index] |= (u8)value;
1069 }
1070
1071 for (i = 0; i < HW_MCAST_SIZE; i++) {
1072 if (i & 1) {
1073 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1074 (ks->mcast_bits[i] << 8) |
1075 ks->mcast_bits[i - 1]);
1076 }
1077 }
1078} /* ks_set_grpaddr */
1079
1080/*
1081* ks_clear_mcast - clear multicast information
1082*
1083* @ks : The chip information
1084* This routine removes all mcast addresses set in the hardware.
1085*/
1086
1087static void ks_clear_mcast(struct ks_net *ks)
1088{
1089 u16 i, mcast_size;
1090 for (i = 0; i < HW_MCAST_SIZE; i++)
1091 ks->mcast_bits[i] = 0;
1092
1093 mcast_size = HW_MCAST_SIZE >> 2;
1094 for (i = 0; i < mcast_size; i++)
1095 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1096}
1097
1098static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1099{
1100 u16 cntl;
1101 ks->promiscuous = promiscuous_mode;
1102 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1103 cntl = ks_rdreg16(ks, KS_RXCR1);
1104
1105 cntl &= ~RXCR1_FILTER_MASK;
1106 if (promiscuous_mode)
1107 /* Enable Promiscuous mode */
1108 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1109 else
1110 /* Disable Promiscuous mode (default normal mode) */
1111 cntl |= RXCR1_RXPAFMA;
1112
1113 ks_wrreg16(ks, KS_RXCR1, cntl);
1114
1115 if (ks->enabled)
1116 ks_start_rx(ks);
1117
1118} /* ks_set_promis */
1119
1120static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1121{
1122 u16 cntl;
1123
1124 ks->all_mcast = mcast;
1125 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1126 cntl = ks_rdreg16(ks, KS_RXCR1);
1127 cntl &= ~RXCR1_FILTER_MASK;
1128 if (mcast)
1129 /* Enable "Perfect with Multicast address passed mode" */
1130 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1131 else
1132 /**
1133 * Disable "Perfect with Multicast address passed
1134 * mode" (normal mode).
1135 */
1136 cntl |= RXCR1_RXPAFMA;
1137
1138 ks_wrreg16(ks, KS_RXCR1, cntl);
1139
1140 if (ks->enabled)
1141 ks_start_rx(ks);
1142} /* ks_set_mcast */
1143
1144static void ks_set_rx_mode(struct net_device *netdev)
1145{
1146 struct ks_net *ks = netdev_priv(netdev);
1147 struct dev_mc_list *ptr;
1148
1149 /* Turn on/off promiscuous mode. */
1150 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1151 ks_set_promis(ks,
1152 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1153 /* Turn on/off all mcast mode. */
1154 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1155 ks_set_mcast(ks,
1156 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1157 else
1158 ks_set_promis(ks, false);
1159
1160 if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
1161 if (netdev->mc_count <= MAX_MCAST_LST) {
1162 int i = 0;
1163 for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
1164 if (!(*ptr->dmi_addr & 1))
1165 continue;
1166 if (i >= MAX_MCAST_LST)
1167 break;
1168 memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
1169 MAC_ADDR_LEN);
1170 }
1171 ks->mcast_lst_size = (u8)i;
1172 ks_set_grpaddr(ks);
1173 } else {
1174 /**
1175 * List too big to support so
1176 * turn on all mcast mode.
1177 */
1178 ks->mcast_lst_size = MAX_MCAST_LST;
1179 ks_set_mcast(ks, true);
1180 }
1181 } else {
1182 ks->mcast_lst_size = 0;
1183 ks_clear_mcast(ks);
1184 }
1185} /* ks_set_rx_mode */
1186
1187static void ks_set_mac(struct ks_net *ks, u8 *data)
1188{
1189 u16 *pw = (u16 *)data;
1190 u16 w, u;
1191
1192 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1193
1194 u = *pw++;
1195 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1196 ks_wrreg16(ks, KS_MARH, w);
1197
1198 u = *pw++;
1199 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1200 ks_wrreg16(ks, KS_MARM, w);
1201
1202 u = *pw;
1203 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1204 ks_wrreg16(ks, KS_MARL, w);
1205
1206 memcpy(ks->mac_addr, data, 6);
1207
1208 if (ks->enabled)
1209 ks_start_rx(ks);
1210}
1211
1212static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1213{
1214 struct ks_net *ks = netdev_priv(netdev);
1215 struct sockaddr *addr = paddr;
1216 u8 *da;
1217
1218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1219
1220 da = (u8 *)netdev->dev_addr;
1221
1222 ks_set_mac(ks, da);
1223 return 0;
1224}
1225
1226static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1227{
1228 struct ks_net *ks = netdev_priv(netdev);
1229
1230 if (!netif_running(netdev))
1231 return -EINVAL;
1232
1233 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1234}
1235
1236static const struct net_device_ops ks_netdev_ops = {
1237 .ndo_open = ks_net_open,
1238 .ndo_stop = ks_net_stop,
1239 .ndo_do_ioctl = ks_net_ioctl,
1240 .ndo_start_xmit = ks_start_xmit,
1241 .ndo_set_mac_address = ks_set_mac_address,
1242 .ndo_set_rx_mode = ks_set_rx_mode,
1243 .ndo_change_mtu = eth_change_mtu,
1244 .ndo_validate_addr = eth_validate_addr,
1245};
1246
1247/* ethtool support */
1248
1249static void ks_get_drvinfo(struct net_device *netdev,
1250 struct ethtool_drvinfo *di)
1251{
1252 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1253 strlcpy(di->version, "1.00", sizeof(di->version));
1254 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1255 sizeof(di->bus_info));
1256}
1257
1258static u32 ks_get_msglevel(struct net_device *netdev)
1259{
1260 struct ks_net *ks = netdev_priv(netdev);
1261 return ks->msg_enable;
1262}
1263
1264static void ks_set_msglevel(struct net_device *netdev, u32 to)
1265{
1266 struct ks_net *ks = netdev_priv(netdev);
1267 ks->msg_enable = to;
1268}
1269
1270static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1271{
1272 struct ks_net *ks = netdev_priv(netdev);
1273 return mii_ethtool_gset(&ks->mii, cmd);
1274}
1275
1276static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1277{
1278 struct ks_net *ks = netdev_priv(netdev);
1279 return mii_ethtool_sset(&ks->mii, cmd);
1280}
1281
1282static u32 ks_get_link(struct net_device *netdev)
1283{
1284 struct ks_net *ks = netdev_priv(netdev);
1285 return mii_link_ok(&ks->mii);
1286}
1287
1288static int ks_nway_reset(struct net_device *netdev)
1289{
1290 struct ks_net *ks = netdev_priv(netdev);
1291 return mii_nway_restart(&ks->mii);
1292}
1293
1294static const struct ethtool_ops ks_ethtool_ops = {
1295 .get_drvinfo = ks_get_drvinfo,
1296 .get_msglevel = ks_get_msglevel,
1297 .set_msglevel = ks_set_msglevel,
1298 .get_settings = ks_get_settings,
1299 .set_settings = ks_set_settings,
1300 .get_link = ks_get_link,
1301 .nway_reset = ks_nway_reset,
1302};
1303
1304/* MII interface controls */
1305
1306/**
1307 * ks_phy_reg - convert MII register into a KS8851 register
1308 * @reg: MII register number.
1309 *
1310 * Return the KS8851 register number for the corresponding MII PHY register
1311 * if possible. Return zero if the MII register has no direct mapping to the
1312 * KS8851 register set.
1313 */
1314static int ks_phy_reg(int reg)
1315{
1316 switch (reg) {
1317 case MII_BMCR:
1318 return KS_P1MBCR;
1319 case MII_BMSR:
1320 return KS_P1MBSR;
1321 case MII_PHYSID1:
1322 return KS_PHY1ILR;
1323 case MII_PHYSID2:
1324 return KS_PHY1IHR;
1325 case MII_ADVERTISE:
1326 return KS_P1ANAR;
1327 case MII_LPA:
1328 return KS_P1ANLPR;
1329 }
1330
1331 return 0x0;
1332}
1333
1334/**
1335 * ks_phy_read - MII interface PHY register read.
1336 * @netdev: The network device the PHY is on.
1337 * @phy_addr: Address of PHY (ignored as we only have one)
1338 * @reg: The register to read.
1339 *
1340 * This call reads data from the PHY register specified in @reg. Since the
1341 * device does not support all the MII registers, the non-existant values
1342 * are always returned as zero.
1343 *
1344 * We return zero for unsupported registers as the MII code does not check
1345 * the value returned for any error status, and simply returns it to the
1346 * caller. The mii-tool that the driver was tested with takes any -ve error
1347 * as real PHY capabilities, thus displaying incorrect data to the user.
1348 */
1349static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1350{
1351 struct ks_net *ks = netdev_priv(netdev);
1352 int ksreg;
1353 int result;
1354
1355 ksreg = ks_phy_reg(reg);
1356 if (!ksreg)
1357 return 0x0; /* no error return allowed, so use zero */
1358
1359 mutex_lock(&ks->lock);
1360 result = ks_rdreg16(ks, ksreg);
1361 mutex_unlock(&ks->lock);
1362
1363 return result;
1364}
1365
1366static void ks_phy_write(struct net_device *netdev,
1367 int phy, int reg, int value)
1368{
1369 struct ks_net *ks = netdev_priv(netdev);
1370 int ksreg;
1371
1372 ksreg = ks_phy_reg(reg);
1373 if (ksreg) {
1374 mutex_lock(&ks->lock);
1375 ks_wrreg16(ks, ksreg, value);
1376 mutex_unlock(&ks->lock);
1377 }
1378}
1379
1380/**
1381 * ks_read_selftest - read the selftest memory info.
1382 * @ks: The device state
1383 *
1384 * Read and check the TX/RX memory selftest information.
1385 */
1386static int ks_read_selftest(struct ks_net *ks)
1387{
1388 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1389 int ret = 0;
1390 unsigned rd;
1391
1392 rd = ks_rdreg16(ks, KS_MBIR);
1393
1394 if ((rd & both_done) != both_done) {
1395 ks_warn(ks, "Memory selftest not finished\n");
1396 return 0;
1397 }
1398
1399 if (rd & MBIR_TXMBFA) {
1400 ks_err(ks, "TX memory selftest fails\n");
1401 ret |= 1;
1402 }
1403
1404 if (rd & MBIR_RXMBFA) {
1405 ks_err(ks, "RX memory selftest fails\n");
1406 ret |= 2;
1407 }
1408
1409 ks_info(ks, "the selftest passes\n");
1410 return ret;
1411}
1412
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks)
1433{
1434 u16 w;
1435
1436 /**
1437 * Configure QMU Transmit
1438 */
1439
1440 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1441 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1442
1443 /* Setup Receive Frame Data Pointer Auto-Increment */
1444 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1445
1446 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1447 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1448
1449 /* Setup RxQ Command Control (RXQCR) */
1450 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1451 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1452
1453 /**
1454 * set the force mode to half duplex, default is full duplex
1455 * because if the auto-negotiation fails, most switch uses
1456 * half-duplex.
1457 */
1458
1459 w = ks_rdreg16(ks, KS_P1MBCR);
1460 w &= ~P1MBCR_FORCE_FDX;
1461 ks_wrreg16(ks, KS_P1MBCR, w);
1462
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w);
1465
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
1467
1468 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1470 else if (ks->all_mcast) /* Multicast address passed mode */
1471 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1472 else /* Normal mode */
1473 w |= RXCR1_RXPAFMA;
1474
1475 ks_wrreg16(ks, KS_RXCR1, w);
1476} /*ks_setup */
1477
1478
1479static void ks_setup_int(struct ks_net *ks)
1480{
1481 ks->rc_ier = 0x00;
1482 /* Clear the interrupts status of the hardware. */
1483 ks_wrreg16(ks, KS_ISR, 0xffff);
1484
1485 /* Enables the interrupts of the hardware. */
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */
1488
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks)
1512{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1514 ks->promiscuous = 0;
1515 ks->all_mcast = 0;
1516 ks->mcast_lst_size = 0;
1517
1518 ks->frame_head_info = (struct type_frame_head *) \
1519 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info) {
1521 printk(KERN_ERR "Error: Fail to allocate frame memory\n");
1522 return false;
1523 }
1524
1525 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1526 return true;
1527}
1528
1529
1530static int __devinit ks8851_probe(struct platform_device *pdev)
1531{
1532 int err = -ENOMEM;
1533 struct resource *io_d, *io_c;
1534 struct net_device *netdev;
1535 struct ks_net *ks;
1536 u16 id, data;
1537
1538 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1540
1541 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1542 goto err_mem_region;
1543
1544 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1545 goto err_mem_region1;
1546
1547 netdev = alloc_etherdev(sizeof(struct ks_net));
1548 if (!netdev)
1549 goto err_alloc_etherdev;
1550
1551 SET_NETDEV_DEV(netdev, &pdev->dev);
1552
1553 ks = netdev_priv(netdev);
1554 ks->netdev = netdev;
1555 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1556
1557 if (!ks->hw_addr)
1558 goto err_ioremap;
1559
1560 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1561 if (!ks->hw_addr_cmd)
1562 goto err_ioremap1;
1563
1564 ks->irq = platform_get_irq(pdev, 0);
1565
1566 if (ks->irq < 0) {
1567 err = ks->irq;
1568 goto err_get_irq;
1569 }
1570
1571 ks->pdev = pdev;
1572
1573 mutex_init(&ks->lock);
1574 spin_lock_init(&ks->statelock);
1575
1576 netdev->netdev_ops = &ks_netdev_ops;
1577 netdev->ethtool_ops = &ks_ethtool_ops;
1578
1579 /* setup mii state */
1580 ks->mii.dev = netdev;
1581 ks->mii.phy_id = 1,
1582 ks->mii.phy_id_mask = 1;
1583 ks->mii.reg_num_mask = 0xf;
1584 ks->mii.mdio_read = ks_phy_read;
1585 ks->mii.mdio_write = ks_phy_write;
1586
1587 ks_info(ks, "message enable is %d\n", msg_enable);
1588 /* set the default message enable */
1589 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1590 NETIF_MSG_PROBE |
1591 NETIF_MSG_LINK));
1592 ks_read_config(ks);
1593
1594 /* simple check for a valid chip being connected to the bus */
1595 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1596 ks_err(ks, "failed to read device ID\n");
1597 err = -ENODEV;
1598 goto err_register;
1599 }
1600
1601 if (ks_read_selftest(ks)) {
1602 ks_err(ks, "failed to read device ID\n");
1603 err = -ENODEV;
1604 goto err_register;
1605 }
1606
1607 err = register_netdev(netdev);
1608 if (err)
1609 goto err_register;
1610
1611 platform_set_drvdata(pdev, netdev);
1612
1613 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks);
1615 ks_disable(ks);
1616 ks_setup(ks);
1617 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621
1622 data = ks_rdreg16(ks, KS_OBCR);
1623 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1624
1625 /**
1626 * If you want to use the default MAC addr,
1627 * comment out the 2 functions below.
1628 */
1629
1630 random_ether_addr(netdev->dev_addr);
1631 ks_set_mac(ks, netdev->dev_addr);
1632
1633 id = ks_rdreg16(ks, KS_CIDER);
1634
1635 printk(KERN_INFO DRV_NAME
1636 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1637 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1638 return 0;
1639
1640err_register:
1641err_get_irq:
1642 iounmap(ks->hw_addr_cmd);
1643err_ioremap1:
1644 iounmap(ks->hw_addr);
1645err_ioremap:
1646 free_netdev(netdev);
1647err_alloc_etherdev:
1648 release_mem_region(io_c->start, resource_size(io_c));
1649err_mem_region1:
1650 release_mem_region(io_d->start, resource_size(io_d));
1651err_mem_region:
1652 return err;
1653}
1654
1655static int __devexit ks8851_remove(struct platform_device *pdev)
1656{
1657 struct net_device *netdev = platform_get_drvdata(pdev);
1658 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660
1661 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr);
1663 free_netdev(netdev);
1664 release_mem_region(iomem->start, resource_size(iomem));
1665 platform_set_drvdata(pdev, NULL);
1666 return 0;
1667
1668}
1669
1670static struct platform_driver ks8851_platform_driver = {
1671 .driver = {
1672 .name = DRV_NAME,
1673 .owner = THIS_MODULE,
1674 },
1675 .probe = ks8851_probe,
1676 .remove = __devexit_p(ks8851_remove),
1677};
1678
1679static int __init ks8851_init(void)
1680{
1681 return platform_driver_register(&ks8851_platform_driver);
1682}
1683
1684static void __exit ks8851_exit(void)
1685{
1686 platform_driver_unregister(&ks8851_platform_driver);
1687}
1688
1689module_init(ks8851_init);
1690module_exit(ks8851_exit);
1691
1692MODULE_DESCRIPTION("KS8851 MLL Network driver");
1693MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1694MODULE_LICENSE("GPL");
1695module_param_named(message, msg_enable, int, 0);
1696MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1697
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 92ceb689b4d4..2af81735386b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
828 828
829static struct platform_driver meth_driver = { 829static struct platform_driver meth_driver = {
830 .probe = meth_probe, 830 .probe = meth_probe,
831 .remove = __devexit_p(meth_remove), 831 .remove = __exit_p(meth_remove),
832 .driver = { 832 .driver = {
833 .name = "meth", 833 .name = "meth",
834 .owner = THIS_MODULE, 834 .owner = THIS_MODULE,
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index cc394d073755..5910df60c93e 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2179 * session or the special tunnel type. 2179 * session or the special tunnel type.
2180 */ 2180 */
2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, 2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2182 char __user *optval, int optlen) 2182 char __user *optval, unsigned int optlen)
2183{ 2183{
2184 struct sock *sk = sock->sk; 2184 struct sock *sk = sock->sk;
2185 struct pppol2tp_session *session = sk->sk_user_data; 2185 struct pppol2tp_session *session = sk->sk_user_data;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a9845a2f243f..30d5585beeee 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1381,15 +1381,15 @@ struct intr_context {
1381 1381
1382/* adapter flags definitions. */ 1382/* adapter flags definitions. */
1383enum { 1383enum {
1384 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ 1384 QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
1385 QL_LEGACY_ENABLED = (1 << 3), 1385 QL_LEGACY_ENABLED = 1,
1386 QL_MSI_ENABLED = (1 << 3), 1386 QL_MSI_ENABLED = 2,
1387 QL_MSIX_ENABLED = (1 << 4), 1387 QL_MSIX_ENABLED = 3,
1388 QL_DMA64 = (1 << 5), 1388 QL_DMA64 = 4,
1389 QL_PROMISCUOUS = (1 << 6), 1389 QL_PROMISCUOUS = 5,
1390 QL_ALLMULTI = (1 << 7), 1390 QL_ALLMULTI = 6,
1391 QL_PORT_CFG = (1 << 8), 1391 QL_PORT_CFG = 7,
1392 QL_CAM_RT_SET = (1 << 9), 1392 QL_CAM_RT_SET = 8,
1393}; 1393};
1394 1394
1395/* link_status bit definitions */ 1395/* link_status bit definitions */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 7783c5db81dc..3d0efea32111 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3142,14 +3142,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3142{ 3142{
3143 int status = 0; 3143 int status = 0;
3144 3144
3145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3145 /* Clear all the entries in the routing table. */
3146 status = ql_clear_routing_entries(qdev);
3146 if (status) 3147 if (status)
3147 return status; 3148 return status;
3148 3149
3149 /* Clear all the entries in the routing table. */ 3150 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3150 status = ql_clear_routing_entries(qdev);
3151 if (status) 3151 if (status)
3152 goto exit; 3152 return status;
3153 3153
3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3155 if (status) { 3155 if (status) {
@@ -3380,12 +3380,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3380 3380
3381 ql_free_rx_buffers(qdev); 3381 ql_free_rx_buffers(qdev);
3382 3382
3383 spin_lock(&qdev->hw_lock);
3384 status = ql_adapter_reset(qdev); 3383 status = ql_adapter_reset(qdev);
3385 if (status) 3384 if (status)
3386 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", 3385 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3387 qdev->func); 3386 qdev->func);
3388 spin_unlock(&qdev->hw_lock);
3389 return status; 3387 return status;
3390} 3388}
3391 3389
@@ -3705,7 +3703,7 @@ static void ql_asic_reset_work(struct work_struct *work)
3705 struct ql_adapter *qdev = 3703 struct ql_adapter *qdev =
3706 container_of(work, struct ql_adapter, asic_reset_work.work); 3704 container_of(work, struct ql_adapter, asic_reset_work.work);
3707 int status; 3705 int status;
3708 3706 rtnl_lock();
3709 status = ql_adapter_down(qdev); 3707 status = ql_adapter_down(qdev);
3710 if (status) 3708 if (status)
3711 goto error; 3709 goto error;
@@ -3713,12 +3711,12 @@ static void ql_asic_reset_work(struct work_struct *work)
3713 status = ql_adapter_up(qdev); 3711 status = ql_adapter_up(qdev);
3714 if (status) 3712 if (status)
3715 goto error; 3713 goto error;
3716 3714 rtnl_unlock();
3717 return; 3715 return;
3718error: 3716error:
3719 QPRINTK(qdev, IFUP, ALERT, 3717 QPRINTK(qdev, IFUP, ALERT,
3720 "Driver up/down cycle failed, closing device\n"); 3718 "Driver up/down cycle failed, closing device\n");
3721 rtnl_lock(); 3719
3722 set_bit(QL_ADAPTER_UP, &qdev->flags); 3720 set_bit(QL_ADAPTER_UP, &qdev->flags);
3723 dev_close(qdev->ndev); 3721 dev_close(qdev->ndev);
3724 rtnl_unlock(); 3722 rtnl_unlock();
@@ -3834,11 +3832,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3834 return err; 3832 return err;
3835 } 3833 }
3836 3834
3835 qdev->ndev = ndev;
3836 qdev->pdev = pdev;
3837 pci_set_drvdata(pdev, ndev);
3837 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3838 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3838 if (pos <= 0) { 3839 if (pos <= 0) {
3839 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 3840 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3840 "aborting.\n"); 3841 "aborting.\n");
3841 goto err_out; 3842 return pos;
3842 } else { 3843 } else {
3843 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 3844 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3844 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 3845 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3852,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3851 err = pci_request_regions(pdev, DRV_NAME); 3852 err = pci_request_regions(pdev, DRV_NAME);
3852 if (err) { 3853 if (err) {
3853 dev_err(&pdev->dev, "PCI region request failed.\n"); 3854 dev_err(&pdev->dev, "PCI region request failed.\n");
3854 goto err_out; 3855 return err;
3855 } 3856 }
3856 3857
3857 pci_set_master(pdev); 3858 pci_set_master(pdev);
@@ -3869,7 +3870,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3869 goto err_out; 3870 goto err_out;
3870 } 3871 }
3871 3872
3872 pci_set_drvdata(pdev, ndev);
3873 qdev->reg_base = 3873 qdev->reg_base =
3874 ioremap_nocache(pci_resource_start(pdev, 1), 3874 ioremap_nocache(pci_resource_start(pdev, 1),
3875 pci_resource_len(pdev, 1)); 3875 pci_resource_len(pdev, 1));
@@ -3889,8 +3889,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3889 goto err_out; 3889 goto err_out;
3890 } 3890 }
3891 3891
3892 qdev->ndev = ndev;
3893 qdev->pdev = pdev;
3894 err = ql_get_board_info(qdev); 3892 err = ql_get_board_info(qdev);
3895 if (err) { 3893 if (err) {
3896 dev_err(&pdev->dev, "Register access failed.\n"); 3894 dev_err(&pdev->dev, "Register access failed.\n");
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ecf3279fbef5..f4dfd1f679a9 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
826 826
827static struct platform_driver sgiseeq_driver = { 827static struct platform_driver sgiseeq_driver = {
828 .probe = sgiseeq_probe, 828 .probe = sgiseeq_probe,
829 .remove = __devexit_p(sgiseeq_remove), 829 .remove = __exit_p(sgiseeq_remove),
830 .driver = { 830 .driver = {
831 .name = "sgiseeq", 831 .name = "sgiseeq",
832 .owner = THIS_MODULE, 832 .owner = THIS_MODULE,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 55bad4081966..01f6811f1324 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3935#endif 3935#endif
3936 3936
3937 err = -ENOMEM; 3937 err = -ENOMEM;
3938 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3938 /* space for skge@pci:0000:04:00.0 */
3939 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
3940 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3939 if (!hw) { 3941 if (!hw) {
3940 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3942 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3941 goto err_out_free_regions; 3943 goto err_out_free_regions;
3942 } 3944 }
3945 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3943 3946
3944 hw->pdev = pdev; 3947 hw->pdev = pdev;
3945 spin_lock_init(&hw->hw_lock); 3948 spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3974 goto err_out_free_netdev; 3977 goto err_out_free_netdev;
3975 } 3978 }
3976 3979
3977 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3980 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
3978 if (err) { 3981 if (err) {
3979 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3982 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3980 dev->name, pdev->irq); 3983 dev->name, pdev->irq);
@@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3982 } 3985 }
3983 skge_show_addr(dev); 3986 skge_show_addr(dev);
3984 3987
3985 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3988 if (hw->ports > 1) {
3986 if (register_netdev(dev1) == 0) 3989 dev1 = skge_devinit(hw, 1, using_dac);
3990 if (dev1 && register_netdev(dev1) == 0)
3987 skge_show_addr(dev1); 3991 skge_show_addr(dev1);
3988 else { 3992 else {
3989 /* Failure to register second port need not be fatal */ 3993 /* Failure to register second port need not be fatal */
3990 dev_warn(&pdev->dev, "register of second port failed\n"); 3994 dev_warn(&pdev->dev, "register of second port failed\n");
3991 hw->dev[1] = NULL; 3995 hw->dev[1] = NULL;
3992 free_netdev(dev1); 3996 hw->ports = 1;
3997 if (dev1)
3998 free_netdev(dev1);
3993 } 3999 }
3994 } 4000 }
3995 pci_set_drvdata(pdev, hw); 4001 pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17caccbb7685..831de1b6e96e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2423,6 +2423,8 @@ struct skge_hw {
2423 u16 phy_addr; 2423 u16 phy_addr;
2424 spinlock_t phy_lock; 2424 spinlock_t phy_lock;
2425 struct tasklet_struct phy_task; 2425 struct tasklet_struct phy_task;
2426
2427 char irq_name[0]; /* skge@pci:000:04:00.0 */
2426}; 2428};
2427 2429
2428enum pause_control { 2430enum pause_control {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ef1165718dd7..2ab5c39f33ca 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; 4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4488 4488
4489 err = -ENOMEM; 4489 err = -ENOMEM;
4490 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 4490
4491 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4492 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4491 if (!hw) { 4493 if (!hw) {
4492 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 4494 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4493 goto err_out_free_regions; 4495 goto err_out_free_regions;
4494 } 4496 }
4495 4497
4496 hw->pdev = pdev; 4498 hw->pdev = pdev;
4499 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4497 4500
4498 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 4501 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4499 if (!hw->regs) { 4502 if (!hw->regs) {
@@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4539 4542
4540 err = request_irq(pdev->irq, sky2_intr, 4543 err = request_irq(pdev->irq, sky2_intr,
4541 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4544 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4542 dev->name, hw); 4545 hw->irq_name, hw);
4543 if (err) { 4546 if (err) {
4544 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4547 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4545 goto err_out_unregister; 4548 goto err_out_unregister;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index e0f23a101043..ed54129698b4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2085,6 +2085,8 @@ struct sky2_hw {
2085 struct timer_list watchdog_timer; 2085 struct timer_list watchdog_timer;
2086 struct work_struct restart_work; 2086 struct work_struct restart_work;
2087 wait_queue_head_t msi_wait; 2087 wait_queue_head_t msi_wait;
2088
2089 char irq_name[0];
2088}; 2090};
2089 2091
2090static inline int sky2_is_copper(const struct sky2_hw *hw) 2092static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 82b45d8797b4..524691cd9896 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2412,7 +2412,6 @@ struct ring_info {
2412 2412
2413struct tx_ring_info { 2413struct tx_ring_info {
2414 struct sk_buff *skb; 2414 struct sk_buff *skb;
2415 u32 prev_vlan_tag;
2416}; 2415};
2417 2416
2418struct tg3_config_info { 2417struct tg3_config_info {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d445845f2779..8d009760277c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -948,7 +948,7 @@ free:
948 return err; 948 return err;
949} 949}
950 950
951static void virtnet_remove(struct virtio_device *vdev) 951static void __devexit virtnet_remove(struct virtio_device *vdev)
952{ 952{
953 struct virtnet_info *vi = vdev->priv; 953 struct virtnet_info *vi = vdev->priv;
954 struct sk_buff *skb; 954 struct sk_buff *skb;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 49ea9c92b7e6..d7a764a2fc1a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -31,13 +31,12 @@ config STRIP
31 ---help--- 31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio 32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project 33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet 34 to send Internet traffic using Metricom radios. Metricom radios are
35 traffic using Metricom radios. Metricom radios are small, battery 35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 powered, 100kbit/sec packet radio transceivers, about the size and 36 the size and weight of a cellular telephone. (You may also have heard
37 weight of a cellular telephone. (You may also have heard them called 37 them called "Metricom modems" but we avoid the term "modem" because
38 "Metricom modems" but we avoid the term "modem" because it misleads 38 it misleads many people into thinking that you can plug a Metricom
39 many people into thinking that you can plug a Metricom modem into a 39 modem into a phone line and use it as a modem.)
40 phone line and use it as a modem.)
41 40
42 You can use STRIP on any Linux machine with a serial port, although 41 You can use STRIP on any Linux machine with a serial port, although
43 it is obviously most useful for people with laptop computers. If you 42 it is obviously most useful for people with laptop computers. If you
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index b3e5cf3735b0..dbd488da18b1 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; 1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; 1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1143 int chain, idx, i; 1143 int chain, idx, i;
1144 u8 f; 1144 u32 phy_data = 0;
1145 u8 f, tmp;
1145 1146
1146 switch (channel->band) { 1147 switch (channel->band) {
1147 case IEEE80211_BAND_2GHZ: 1148 case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1208 } 1209 }
1209 1210
1210 for (i = 0; i < 76; i++) { 1211 for (i = 0; i < 76; i++) {
1211 u32 phy_data;
1212 u8 tmp;
1213
1214 if (i < 25) { 1212 if (i < 25) {
1215 tmp = ar9170_interpolate_val(i, &pwrs[0][0], 1213 tmp = ar9170_interpolate_val(i, &pwrs[0][0],
1216 &vpds[0][0]); 1214 &vpds[0][0]);
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index e96091b31499..9c1397996e0a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
340 q->mmio_base + B43_PIO_TXDATA, 340 q->mmio_base + B43_PIO_TXDATA,
341 sizeof(u16)); 341 sizeof(u16));
342 if (data_len & 1) { 342 if (data_len & 1) {
343 u8 tail[2] = { 0, };
344
343 /* Write the last byte. */ 345 /* Write the last byte. */
344 ctl &= ~B43_PIO_TXCTL_WRITEHI; 346 ctl &= ~B43_PIO_TXCTL_WRITEHI;
345 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
346 b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]); 348 tail[0] = data[data_len - 1];
349 ssb_block_write(dev->dev, tail, 2,
350 q->mmio_base + B43_PIO_TXDATA,
351 sizeof(u16));
347 } 352 }
348 353
349 return ctl; 354 return ctl;
@@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
386 q->mmio_base + B43_PIO8_TXDATA, 391 q->mmio_base + B43_PIO8_TXDATA,
387 sizeof(u32)); 392 sizeof(u32));
388 if (data_len & 3) { 393 if (data_len & 3) {
389 u32 value = 0; 394 u8 tail[4] = { 0, };
390 395
391 /* Write the last few bytes. */ 396 /* Write the last few bytes. */
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 397 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31); 398 B43_PIO8_TXCTL_24_31);
394 data = &(data[data_len - 1]);
395 switch (data_len & 3) { 399 switch (data_len & 3) {
396 case 3: 400 case 3:
397 ctl |= B43_PIO8_TXCTL_16_23; 401 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
398 value |= (u32)(*data) << 16; 402 tail[0] = data[data_len - 3];
399 data--; 403 tail[1] = data[data_len - 2];
404 tail[2] = data[data_len - 1];
405 break;
400 case 2: 406 case 2:
401 ctl |= B43_PIO8_TXCTL_8_15; 407 ctl |= B43_PIO8_TXCTL_8_15;
402 value |= (u32)(*data) << 8; 408 tail[0] = data[data_len - 2];
403 data--; 409 tail[1] = data[data_len - 1];
410 break;
404 case 1: 411 case 1:
405 value |= (u32)(*data); 412 tail[0] = data[data_len - 1];
413 break;
406 } 414 }
407 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 415 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
408 b43_piotx_write32(q, B43_PIO8_TXDATA, value); 416 ssb_block_write(dev->dev, tail, 4,
417 q->mmio_base + B43_PIO8_TXDATA,
418 sizeof(u32));
409 } 419 }
410 420
411 return ctl; 421 return ctl;
@@ -693,21 +703,25 @@ data_ready:
693 q->mmio_base + B43_PIO8_RXDATA, 703 q->mmio_base + B43_PIO8_RXDATA,
694 sizeof(u32)); 704 sizeof(u32));
695 if (len & 3) { 705 if (len & 3) {
696 u32 value; 706 u8 tail[4] = { 0, };
697 char *data;
698 707
699 /* Read the last few bytes. */ 708 /* Read the last few bytes. */
700 value = b43_piorx_read32(q, B43_PIO8_RXDATA); 709 ssb_block_read(dev->dev, tail, 4,
701 data = &(skb->data[len + padding - 1]); 710 q->mmio_base + B43_PIO8_RXDATA,
711 sizeof(u32));
702 switch (len & 3) { 712 switch (len & 3) {
703 case 3: 713 case 3:
704 *data = (value >> 16); 714 skb->data[len + padding - 3] = tail[0];
705 data--; 715 skb->data[len + padding - 2] = tail[1];
716 skb->data[len + padding - 1] = tail[2];
717 break;
706 case 2: 718 case 2:
707 *data = (value >> 8); 719 skb->data[len + padding - 2] = tail[0];
708 data--; 720 skb->data[len + padding - 1] = tail[1];
721 break;
709 case 1: 722 case 1:
710 *data = value; 723 skb->data[len + padding - 1] = tail[0];
724 break;
711 } 725 }
712 } 726 }
713 } else { 727 } else {
@@ -715,11 +729,13 @@ data_ready:
715 q->mmio_base + B43_PIO_RXDATA, 729 q->mmio_base + B43_PIO_RXDATA,
716 sizeof(u16)); 730 sizeof(u16));
717 if (len & 1) { 731 if (len & 1) {
718 u16 value; 732 u8 tail[2] = { 0, };
719 733
720 /* Read the last byte. */ 734 /* Read the last byte. */
721 value = b43_piorx_read16(q, B43_PIO_RXDATA); 735 ssb_block_read(dev->dev, tail, 2,
722 skb->data[len + padding - 1] = value; 736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16));
738 skb->data[len + padding - 1] = tail[0];
723 } 739 }
724 } 740 }
725 741
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index a95caa014143..2716b91ba9fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -99,6 +99,8 @@ static struct iwl_lib_ops iwl1000_lib = {
99 .setup_deferred_work = iwl5000_setup_deferred_work, 99 .setup_deferred_work = iwl5000_setup_deferred_work,
100 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 100 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
101 .load_ucode = iwl5000_load_ucode, 101 .load_ucode = iwl5000_load_ucode,
102 .dump_nic_event_log = iwl_dump_nic_event_log,
103 .dump_nic_error_log = iwl_dump_nic_error_log,
102 .init_alive_start = iwl5000_init_alive_start, 104 .init_alive_start = iwl5000_init_alive_start,
103 .alive_notify = iwl5000_alive_notify, 105 .alive_notify = iwl5000_alive_notify,
104 .send_tx_power = iwl5000_send_tx_power, 106 .send_tx_power = iwl5000_send_tx_power,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index e9a685d8e3a1..e70c5b0af364 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2839,6 +2839,8 @@ static struct iwl_lib_ops iwl3945_lib = {
2839 .txq_free_tfd = iwl3945_hw_txq_free_tfd, 2839 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2840 .txq_init = iwl3945_hw_tx_queue_init, 2840 .txq_init = iwl3945_hw_tx_queue_init,
2841 .load_ucode = iwl3945_load_bsm, 2841 .load_ucode = iwl3945_load_bsm,
2842 .dump_nic_event_log = iwl3945_dump_nic_event_log,
2843 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2842 .apm_ops = { 2844 .apm_ops = {
2843 .init = iwl3945_apm_init, 2845 .init = iwl3945_apm_init,
2844 .reset = iwl3945_apm_reset, 2846 .reset = iwl3945_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index f24036909916..21679bf3a1aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -209,6 +209,8 @@ extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
209 struct iwl_host_cmd *cmd); 209 struct iwl_host_cmd *cmd);
210extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 210extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
211 struct ieee80211_hdr *hdr,int left); 211 struct ieee80211_hdr *hdr,int left);
212extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv);
213extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
212 214
213/* 215/*
214 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't 216 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3259b8841544..a22a0501c190 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2298,6 +2298,8 @@ static struct iwl_lib_ops iwl4965_lib = {
2298 .alive_notify = iwl4965_alive_notify, 2298 .alive_notify = iwl4965_alive_notify,
2299 .init_alive_start = iwl4965_init_alive_start, 2299 .init_alive_start = iwl4965_init_alive_start,
2300 .load_ucode = iwl4965_load_bsm, 2300 .load_ucode = iwl4965_load_bsm,
2301 .dump_nic_event_log = iwl_dump_nic_event_log,
2302 .dump_nic_error_log = iwl_dump_nic_error_log,
2301 .apm_ops = { 2303 .apm_ops = {
2302 .init = iwl4965_apm_init, 2304 .init = iwl4965_apm_init,
2303 .reset = iwl4965_apm_reset, 2305 .reset = iwl4965_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a6391c7fea53..eb08f4411000 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1535,6 +1535,8 @@ struct iwl_lib_ops iwl5000_lib = {
1535 .rx_handler_setup = iwl5000_rx_handler_setup, 1535 .rx_handler_setup = iwl5000_rx_handler_setup,
1536 .setup_deferred_work = iwl5000_setup_deferred_work, 1536 .setup_deferred_work = iwl5000_setup_deferred_work,
1537 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1537 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1538 .dump_nic_event_log = iwl_dump_nic_event_log,
1539 .dump_nic_error_log = iwl_dump_nic_error_log,
1538 .load_ucode = iwl5000_load_ucode, 1540 .load_ucode = iwl5000_load_ucode,
1539 .init_alive_start = iwl5000_init_alive_start, 1541 .init_alive_start = iwl5000_init_alive_start,
1540 .alive_notify = iwl5000_alive_notify, 1542 .alive_notify = iwl5000_alive_notify,
@@ -1585,6 +1587,8 @@ static struct iwl_lib_ops iwl5150_lib = {
1585 .rx_handler_setup = iwl5000_rx_handler_setup, 1587 .rx_handler_setup = iwl5000_rx_handler_setup,
1586 .setup_deferred_work = iwl5000_setup_deferred_work, 1588 .setup_deferred_work = iwl5000_setup_deferred_work,
1587 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1589 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1590 .dump_nic_event_log = iwl_dump_nic_event_log,
1591 .dump_nic_error_log = iwl_dump_nic_error_log,
1588 .load_ucode = iwl5000_load_ucode, 1592 .load_ucode = iwl5000_load_ucode,
1589 .init_alive_start = iwl5000_init_alive_start, 1593 .init_alive_start = iwl5000_init_alive_start,
1590 .alive_notify = iwl5000_alive_notify, 1594 .alive_notify = iwl5000_alive_notify,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 82b9c93dff54..c295b8ee9228 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -100,6 +100,8 @@ static struct iwl_lib_ops iwl6000_lib = {
100 .setup_deferred_work = iwl5000_setup_deferred_work, 100 .setup_deferred_work = iwl5000_setup_deferred_work,
101 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 101 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
102 .load_ucode = iwl5000_load_ucode, 102 .load_ucode = iwl5000_load_ucode,
103 .dump_nic_event_log = iwl_dump_nic_event_log,
104 .dump_nic_error_log = iwl_dump_nic_error_log,
103 .init_alive_start = iwl5000_init_alive_start, 105 .init_alive_start = iwl5000_init_alive_start,
104 .alive_notify = iwl5000_alive_notify, 106 .alive_notify = iwl5000_alive_notify,
105 .send_tx_power = iwl5000_send_tx_power, 107 .send_tx_power = iwl5000_send_tx_power,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 00457bff1ed1..cdc07c477457 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1526,6 +1526,191 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1526 return ret; 1526 return ret;
1527} 1527}
1528 1528
1529#ifdef CONFIG_IWLWIFI_DEBUG
1530static const char *desc_lookup_text[] = {
1531 "OK",
1532 "FAIL",
1533 "BAD_PARAM",
1534 "BAD_CHECKSUM",
1535 "NMI_INTERRUPT_WDG",
1536 "SYSASSERT",
1537 "FATAL_ERROR",
1538 "BAD_COMMAND",
1539 "HW_ERROR_TUNE_LOCK",
1540 "HW_ERROR_TEMPERATURE",
1541 "ILLEGAL_CHAN_FREQ",
1542 "VCC_NOT_STABLE",
1543 "FH_ERROR",
1544 "NMI_INTERRUPT_HOST",
1545 "NMI_INTERRUPT_ACTION_PT",
1546 "NMI_INTERRUPT_UNKNOWN",
1547 "UCODE_VERSION_MISMATCH",
1548 "HW_ERROR_ABS_LOCK",
1549 "HW_ERROR_CAL_LOCK_FAIL",
1550 "NMI_INTERRUPT_INST_ACTION_PT",
1551 "NMI_INTERRUPT_DATA_ACTION_PT",
1552 "NMI_TRM_HW_ER",
1553 "NMI_INTERRUPT_TRM",
1554 "NMI_INTERRUPT_BREAK_POINT"
1555 "DEBUG_0",
1556 "DEBUG_1",
1557 "DEBUG_2",
1558 "DEBUG_3",
1559 "UNKNOWN"
1560};
1561
1562static const char *desc_lookup(int i)
1563{
1564 int max = ARRAY_SIZE(desc_lookup_text) - 1;
1565
1566 if (i < 0 || i > max)
1567 i = max;
1568
1569 return desc_lookup_text[i];
1570}
1571
1572#define ERROR_START_OFFSET (1 * sizeof(u32))
1573#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1574
1575void iwl_dump_nic_error_log(struct iwl_priv *priv)
1576{
1577 u32 data2, line;
1578 u32 desc, time, count, base, data1;
1579 u32 blink1, blink2, ilink1, ilink2;
1580
1581 if (priv->ucode_type == UCODE_INIT)
1582 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1583 else
1584 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1585
1586 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1587 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1588 return;
1589 }
1590
1591 count = iwl_read_targ_mem(priv, base);
1592
1593 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1594 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1595 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1596 priv->status, count);
1597 }
1598
1599 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1600 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1601 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1602 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1603 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1604 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1605 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1606 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1607 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1608
1609 IWL_ERR(priv, "Desc Time "
1610 "data1 data2 line\n");
1611 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1612 desc_lookup(desc), desc, time, data1, data2, line);
1613 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
1614 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1615 ilink1, ilink2);
1616
1617}
1618
1619#define EVENT_START_OFFSET (4 * sizeof(u32))
1620
1621/**
1622 * iwl_print_event_log - Dump error event log to syslog
1623 *
1624 */
1625static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1626 u32 num_events, u32 mode)
1627{
1628 u32 i;
1629 u32 base; /* SRAM byte address of event log header */
1630 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1631 u32 ptr; /* SRAM byte address of log data */
1632 u32 ev, time, data; /* event log data */
1633
1634 if (num_events == 0)
1635 return;
1636 if (priv->ucode_type == UCODE_INIT)
1637 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1638 else
1639 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1640
1641 if (mode == 0)
1642 event_size = 2 * sizeof(u32);
1643 else
1644 event_size = 3 * sizeof(u32);
1645
1646 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1647
1648 /* "time" is actually "data" for mode 0 (no timestamp).
1649 * place event id # at far right for easier visual parsing. */
1650 for (i = 0; i < num_events; i++) {
1651 ev = iwl_read_targ_mem(priv, ptr);
1652 ptr += sizeof(u32);
1653 time = iwl_read_targ_mem(priv, ptr);
1654 ptr += sizeof(u32);
1655 if (mode == 0) {
1656 /* data, ev */
1657 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1658 } else {
1659 data = iwl_read_targ_mem(priv, ptr);
1660 ptr += sizeof(u32);
1661 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1662 time, data, ev);
1663 }
1664 }
1665}
1666
1667void iwl_dump_nic_event_log(struct iwl_priv *priv)
1668{
1669 u32 base; /* SRAM byte address of event log header */
1670 u32 capacity; /* event log capacity in # entries */
1671 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1672 u32 num_wraps; /* # times uCode wrapped to top of log */
1673 u32 next_entry; /* index of next entry to be written by uCode */
1674 u32 size; /* # entries that we'll print */
1675
1676 if (priv->ucode_type == UCODE_INIT)
1677 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1678 else
1679 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1680
1681 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1682 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1683 return;
1684 }
1685
1686 /* event log header */
1687 capacity = iwl_read_targ_mem(priv, base);
1688 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1689 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1690 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1691
1692 size = num_wraps ? capacity : next_entry;
1693
1694 /* bail out if nothing in log */
1695 if (size == 0) {
1696 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1697 return;
1698 }
1699
1700 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
1701 size, num_wraps);
1702
1703 /* if uCode has wrapped back to top of log, start at the oldest entry,
1704 * i.e the next one that uCode would fill. */
1705 if (num_wraps)
1706 iwl_print_event_log(priv, next_entry,
1707 capacity - next_entry, mode);
1708 /* (then/else) start at top of log */
1709 iwl_print_event_log(priv, 0, next_entry, mode);
1710
1711}
1712#endif
1713
1529/** 1714/**
1530 * iwl_alive_start - called after REPLY_ALIVE notification received 1715 * iwl_alive_start - called after REPLY_ALIVE notification received
1531 * from protocol/runtime uCode (initialization uCode's 1716 * from protocol/runtime uCode (initialization uCode's
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index fd26c0dc9c54..484d5c1a7312 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1309,189 +1309,6 @@ static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1309 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 1309 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1310 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 1310 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1311} 1311}
1312
1313static const char *desc_lookup_text[] = {
1314 "OK",
1315 "FAIL",
1316 "BAD_PARAM",
1317 "BAD_CHECKSUM",
1318 "NMI_INTERRUPT_WDG",
1319 "SYSASSERT",
1320 "FATAL_ERROR",
1321 "BAD_COMMAND",
1322 "HW_ERROR_TUNE_LOCK",
1323 "HW_ERROR_TEMPERATURE",
1324 "ILLEGAL_CHAN_FREQ",
1325 "VCC_NOT_STABLE",
1326 "FH_ERROR",
1327 "NMI_INTERRUPT_HOST",
1328 "NMI_INTERRUPT_ACTION_PT",
1329 "NMI_INTERRUPT_UNKNOWN",
1330 "UCODE_VERSION_MISMATCH",
1331 "HW_ERROR_ABS_LOCK",
1332 "HW_ERROR_CAL_LOCK_FAIL",
1333 "NMI_INTERRUPT_INST_ACTION_PT",
1334 "NMI_INTERRUPT_DATA_ACTION_PT",
1335 "NMI_TRM_HW_ER",
1336 "NMI_INTERRUPT_TRM",
1337 "NMI_INTERRUPT_BREAK_POINT"
1338 "DEBUG_0",
1339 "DEBUG_1",
1340 "DEBUG_2",
1341 "DEBUG_3",
1342 "UNKNOWN"
1343};
1344
1345static const char *desc_lookup(int i)
1346{
1347 int max = ARRAY_SIZE(desc_lookup_text) - 1;
1348
1349 if (i < 0 || i > max)
1350 i = max;
1351
1352 return desc_lookup_text[i];
1353}
1354
1355#define ERROR_START_OFFSET (1 * sizeof(u32))
1356#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1357
1358static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1359{
1360 u32 data2, line;
1361 u32 desc, time, count, base, data1;
1362 u32 blink1, blink2, ilink1, ilink2;
1363
1364 if (priv->ucode_type == UCODE_INIT)
1365 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1366 else
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374 count = iwl_read_targ_mem(priv, base);
1375
1376 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1377 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1378 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1379 priv->status, count);
1380 }
1381
1382 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1383 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1384 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1385 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1386 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1387 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1388 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1389 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1390 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1391
1392 IWL_ERR(priv, "Desc Time "
1393 "data1 data2 line\n");
1394 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1395 desc_lookup(desc), desc, time, data1, data2, line);
1396 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
1397 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1398 ilink1, ilink2);
1399
1400}
1401
1402#define EVENT_START_OFFSET (4 * sizeof(u32))
1403
1404/**
1405 * iwl_print_event_log - Dump error event log to syslog
1406 *
1407 */
1408static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1409 u32 num_events, u32 mode)
1410{
1411 u32 i;
1412 u32 base; /* SRAM byte address of event log header */
1413 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1414 u32 ptr; /* SRAM byte address of log data */
1415 u32 ev, time, data; /* event log data */
1416
1417 if (num_events == 0)
1418 return;
1419 if (priv->ucode_type == UCODE_INIT)
1420 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1421 else
1422 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1423
1424 if (mode == 0)
1425 event_size = 2 * sizeof(u32);
1426 else
1427 event_size = 3 * sizeof(u32);
1428
1429 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1430
1431 /* "time" is actually "data" for mode 0 (no timestamp).
1432 * place event id # at far right for easier visual parsing. */
1433 for (i = 0; i < num_events; i++) {
1434 ev = iwl_read_targ_mem(priv, ptr);
1435 ptr += sizeof(u32);
1436 time = iwl_read_targ_mem(priv, ptr);
1437 ptr += sizeof(u32);
1438 if (mode == 0) {
1439 /* data, ev */
1440 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1441 } else {
1442 data = iwl_read_targ_mem(priv, ptr);
1443 ptr += sizeof(u32);
1444 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1445 time, data, ev);
1446 }
1447 }
1448}
1449
1450void iwl_dump_nic_event_log(struct iwl_priv *priv)
1451{
1452 u32 base; /* SRAM byte address of event log header */
1453 u32 capacity; /* event log capacity in # entries */
1454 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1455 u32 num_wraps; /* # times uCode wrapped to top of log */
1456 u32 next_entry; /* index of next entry to be written by uCode */
1457 u32 size; /* # entries that we'll print */
1458
1459 if (priv->ucode_type == UCODE_INIT)
1460 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1461 else
1462 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1463
1464 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1465 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1466 return;
1467 }
1468
1469 /* event log header */
1470 capacity = iwl_read_targ_mem(priv, base);
1471 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1472 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1473 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1474
1475 size = num_wraps ? capacity : next_entry;
1476
1477 /* bail out if nothing in log */
1478 if (size == 0) {
1479 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1480 return;
1481 }
1482
1483 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
1484 size, num_wraps);
1485
1486 /* if uCode has wrapped back to top of log, start at the oldest entry,
1487 * i.e the next one that uCode would fill. */
1488 if (num_wraps)
1489 iwl_print_event_log(priv, next_entry,
1490 capacity - next_entry, mode);
1491 /* (then/else) start at top of log */
1492 iwl_print_event_log(priv, 0, next_entry, mode);
1493
1494}
1495#endif 1312#endif
1496/** 1313/**
1497 * iwl_irq_handle_error - called for HW or SW error interrupt from card 1314 * iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1506,8 +1323,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1506 1323
1507#ifdef CONFIG_IWLWIFI_DEBUG 1324#ifdef CONFIG_IWLWIFI_DEBUG
1508 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) { 1325 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) {
1509 iwl_dump_nic_error_log(priv); 1326 priv->cfg->ops->lib->dump_nic_error_log(priv);
1510 iwl_dump_nic_event_log(priv); 1327 priv->cfg->ops->lib->dump_nic_event_log(priv);
1511 iwl_print_rx_config_cmd(priv); 1328 iwl_print_rx_config_cmd(priv);
1512 } 1329 }
1513#endif 1330#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7ff9ffb2b702..e50103a956b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -166,6 +166,8 @@ struct iwl_lib_ops {
166 int (*is_valid_rtc_data_addr)(u32 addr); 166 int (*is_valid_rtc_data_addr)(u32 addr);
167 /* 1st ucode load */ 167 /* 1st ucode load */
168 int (*load_ucode)(struct iwl_priv *priv); 168 int (*load_ucode)(struct iwl_priv *priv);
169 void (*dump_nic_event_log)(struct iwl_priv *priv);
170 void (*dump_nic_error_log)(struct iwl_priv *priv);
169 /* power management */ 171 /* power management */
170 struct iwl_apm_ops apm_ops; 172 struct iwl_apm_ops apm_ops;
171 173
@@ -540,7 +542,19 @@ int iwl_pci_resume(struct pci_dev *pdev);
540/***************************************************** 542/*****************************************************
541* Error Handling Debugging 543* Error Handling Debugging
542******************************************************/ 544******************************************************/
545#ifdef CONFIG_IWLWIFI_DEBUG
543void iwl_dump_nic_event_log(struct iwl_priv *priv); 546void iwl_dump_nic_event_log(struct iwl_priv *priv);
547void iwl_dump_nic_error_log(struct iwl_priv *priv);
548#else
549static inline void iwl_dump_nic_event_log(struct iwl_priv *priv)
550{
551}
552
553static inline void iwl_dump_nic_error_log(struct iwl_priv *priv)
554{
555}
556#endif
557
544void iwl_clear_isr_stats(struct iwl_priv *priv); 558void iwl_clear_isr_stats(struct iwl_priv *priv);
545 559
546/***************************************************** 560/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index fb844859a443..a198bcf61022 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -410,7 +410,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
412 buf_size - pos, 0); 412 buf_size - pos, 0);
413 pos += strlen(buf); 413 pos += strlen(buf + pos);
414 if (buf_size - pos > 0) 414 if (buf_size - pos > 0)
415 buf[pos++] = '\n'; 415 buf[pos++] = '\n';
416 } 416 }
@@ -436,7 +436,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
436 if (sscanf(buf, "%d", &event_log_flag) != 1) 436 if (sscanf(buf, "%d", &event_log_flag) != 1)
437 return -EFAULT; 437 return -EFAULT;
438 if (event_log_flag == 1) 438 if (event_log_flag == 1)
439 iwl_dump_nic_event_log(priv); 439 priv->cfg->ops->lib->dump_nic_event_log(priv);
440 440
441 return count; 441 return count;
442} 442}
@@ -909,7 +909,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
909 "0x%.4x ", ofs); 909 "0x%.4x ", ofs);
910 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, 910 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
911 buf + pos, bufsz - pos, 0); 911 buf + pos, bufsz - pos, 0);
912 pos += strlen(buf); 912 pos += strlen(buf + pos);
913 if (bufsz - pos > 0) 913 if (bufsz - pos > 0)
914 buf[pos++] = '\n'; 914 buf[pos++] = '\n';
915 } 915 }
@@ -932,7 +932,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
932 "0x%.4x ", ofs); 932 "0x%.4x ", ofs);
933 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, 933 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
934 buf + pos, bufsz - pos, 0); 934 buf + pos, bufsz - pos, 0);
935 pos += strlen(buf); 935 pos += strlen(buf + pos);
936 if (bufsz - pos > 0) 936 if (bufsz - pos > 0)
937 buf[pos++] = '\n'; 937 buf[pos++] = '\n';
938 } 938 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a7422e52d883..c18907544701 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -197,6 +197,12 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
197 pci_free_consistent(dev, priv->hw_params.tfd_size * 197 pci_free_consistent(dev, priv->hw_params.tfd_size *
198 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 198 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
199 199
200 /* deallocate arrays */
201 kfree(txq->cmd);
202 kfree(txq->meta);
203 txq->cmd = NULL;
204 txq->meta = NULL;
205
200 /* 0-fill queue descriptor structure */ 206 /* 0-fill queue descriptor structure */
201 memset(txq, 0, sizeof(*txq)); 207 memset(txq, 0, sizeof(*txq));
202} 208}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4f2d43937283..c390dbd877e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1481,6 +1481,7 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1481 tasklet_kill(&priv->irq_tasklet); 1481 tasklet_kill(&priv->irq_tasklet);
1482} 1482}
1483 1483
1484#ifdef CONFIG_IWLWIFI_DEBUG
1484static const char *desc_lookup(int i) 1485static const char *desc_lookup(int i)
1485{ 1486{
1486 switch (i) { 1487 switch (i) {
@@ -1504,7 +1505,7 @@ static const char *desc_lookup(int i)
1504#define ERROR_START_OFFSET (1 * sizeof(u32)) 1505#define ERROR_START_OFFSET (1 * sizeof(u32))
1505#define ERROR_ELEM_SIZE (7 * sizeof(u32)) 1506#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1506 1507
1507static void iwl3945_dump_nic_error_log(struct iwl_priv *priv) 1508void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1508{ 1509{
1509 u32 i; 1510 u32 i;
1510 u32 desc, time, count, base, data1; 1511 u32 desc, time, count, base, data1;
@@ -1598,7 +1599,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1598 } 1599 }
1599} 1600}
1600 1601
1601static void iwl3945_dump_nic_event_log(struct iwl_priv *priv) 1602void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1602{ 1603{
1603 u32 base; /* SRAM byte address of event log header */ 1604 u32 base; /* SRAM byte address of event log header */
1604 u32 capacity; /* event log capacity in # entries */ 1605 u32 capacity; /* event log capacity in # entries */
@@ -1640,6 +1641,16 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1640 iwl3945_print_event_log(priv, 0, next_entry, mode); 1641 iwl3945_print_event_log(priv, 0, next_entry, mode);
1641 1642
1642} 1643}
1644#else
1645void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1646{
1647}
1648
1649void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1650{
1651}
1652
1653#endif
1643 1654
1644static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1655static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1645{ 1656{
@@ -3683,21 +3694,6 @@ static ssize_t dump_error_log(struct device *d,
3683 3694
3684static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 3695static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
3685 3696
3686static ssize_t dump_event_log(struct device *d,
3687 struct device_attribute *attr,
3688 const char *buf, size_t count)
3689{
3690 struct iwl_priv *priv = dev_get_drvdata(d);
3691 char *p = (char *)buf;
3692
3693 if (p[0] == '1')
3694 iwl3945_dump_nic_event_log(priv);
3695
3696 return strnlen(buf, count);
3697}
3698
3699static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
3700
3701/***************************************************************************** 3697/*****************************************************************************
3702 * 3698 *
3703 * driver setup and tear down 3699 * driver setup and tear down
@@ -3742,7 +3738,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3742 &dev_attr_antenna.attr, 3738 &dev_attr_antenna.attr,
3743 &dev_attr_channels.attr, 3739 &dev_attr_channels.attr,
3744 &dev_attr_dump_errors.attr, 3740 &dev_attr_dump_errors.attr,
3745 &dev_attr_dump_events.attr,
3746 &dev_attr_flags.attr, 3741 &dev_attr_flags.attr,
3747 &dev_attr_filter_flags.attr, 3742 &dev_attr_filter_flags.attr,
3748#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 3743#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 896f532182f0..38cfd79e0590 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; 631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
632 if (WARN_ON(!data->beacon_int)) 632 if (WARN_ON(!data->beacon_int))
633 data->beacon_int = 1; 633 data->beacon_int = 1;
634 if (data->started)
635 mod_timer(&data->beacon_timer,
636 jiffies + data->beacon_int);
634 } 637 }
635 638
636 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 639 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1cbd9b4a3efc..b8f5ee33445e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2381 /* Huawei-3Com */ 2381 /* Huawei-3Com */
2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, 2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
2383 /* Hercules */ 2383 /* Hercules */
2384 { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
2384 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, 2385 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
2385 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, 2386 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
2386 /* Linksys */ 2387 /* Linksys */
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index fbf965b31c14..17f38a781d47 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -192,6 +192,10 @@ config PCMCIA_AU1X00
192 tristate "Au1x00 pcmcia support" 192 tristate "Au1x00 pcmcia support"
193 depends on SOC_AU1X00 && PCMCIA 193 depends on SOC_AU1X00 && PCMCIA
194 194
195config PCMCIA_BCM63XX
196 tristate "bcm63xx pcmcia support"
197 depends on BCM63XX && PCMCIA
198
195config PCMCIA_SA1100 199config PCMCIA_SA1100
196 tristate "SA1100 support" 200 tristate "SA1100 support"
197 depends on ARM && ARCH_SA1100 && PCMCIA 201 depends on ARM && ARCH_SA1100 && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 3247828aa203..a03a38acd77d 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o
27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o 27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o 28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o 29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
30obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
30obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o 31obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
31obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o 32obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
32obj-$(CONFIG_OMAP_CF) += omap_cf.o 33obj-$(CONFIG_OMAP_CF) += omap_cf.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 9e1140f085fd..e1dccedc5960 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -363,7 +363,7 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
363 struct at91_cf_socket *cf = platform_get_drvdata(pdev); 363 struct at91_cf_socket *cf = platform_get_drvdata(pdev);
364 struct at91_cf_data *board = cf->board; 364 struct at91_cf_data *board = cf->board;
365 365
366 pcmcia_socket_dev_suspend(&pdev->dev, mesg); 366 pcmcia_socket_dev_suspend(&pdev->dev);
367 if (device_may_wakeup(&pdev->dev)) { 367 if (device_may_wakeup(&pdev->dev)) {
368 enable_irq_wake(board->det_pin); 368 enable_irq_wake(board->det_pin);
369 if (board->irq_pin) 369 if (board->irq_pin)
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index 90013341cd5f..02088704ac2c 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -515,7 +515,7 @@ static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
515static int au1x00_drv_pcmcia_suspend(struct platform_device *dev, 515static int au1x00_drv_pcmcia_suspend(struct platform_device *dev,
516 pm_message_t state) 516 pm_message_t state)
517{ 517{
518 return pcmcia_socket_dev_suspend(&dev->dev, state); 518 return pcmcia_socket_dev_suspend(&dev->dev);
519} 519}
520 520
521static int au1x00_drv_pcmcia_resume(struct platform_device *dev) 521static int au1x00_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
new file mode 100644
index 000000000000..bc88a3b19bb3
--- /dev/null
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -0,0 +1,536 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/ioport.h>
12#include <linux/timer.h>
13#include <linux/platform_device.h>
14#include <linux/delay.h>
15#include <linux/pci.h>
16#include <linux/gpio.h>
17
18#include <bcm63xx_regs.h>
19#include <bcm63xx_io.h>
20#include "bcm63xx_pcmcia.h"
21
22#define PFX "bcm63xx_pcmcia: "
23
24#ifdef CONFIG_CARDBUS
25/* if cardbus is used, platform device needs reference to actual pci
26 * device */
27static struct pci_dev *bcm63xx_cb_dev;
28#endif
29
30/*
31 * read/write helper for pcmcia regs
32 */
33static inline u32 pcmcia_readl(struct bcm63xx_pcmcia_socket *skt, u32 off)
34{
35 return bcm_readl(skt->base + off);
36}
37
38static inline void pcmcia_writel(struct bcm63xx_pcmcia_socket *skt,
39 u32 val, u32 off)
40{
41 bcm_writel(val, skt->base + off);
42}
43
44/*
45 * This callback should (re-)initialise the socket, turn on status
46 * interrupts and PCMCIA bus, and wait for power to stabilise so that
47 * the card status signals report correctly.
48 *
49 * Hardware cannot do that.
50 */
51static int bcm63xx_pcmcia_sock_init(struct pcmcia_socket *sock)
52{
53 return 0;
54}
55
56/*
57 * This callback should remove power on the socket, disable IRQs from
58 * the card, turn off status interrupts, and disable the PCMCIA bus.
59 *
60 * Hardware cannot do that.
61 */
62static int bcm63xx_pcmcia_suspend(struct pcmcia_socket *sock)
63{
64 return 0;
65}
66
67/*
68 * Implements the set_socket() operation for the in-kernel PCMCIA
69 * service (formerly SS_SetSocket in Card Services). We more or
70 * less punt all of this work and let the kernel handle the details
71 * of power configuration, reset, &c. We also record the value of
72 * `state' in order to regurgitate it to the PCMCIA core later.
73 */
74static int bcm63xx_pcmcia_set_socket(struct pcmcia_socket *sock,
75 socket_state_t *state)
76{
77 struct bcm63xx_pcmcia_socket *skt;
78 unsigned long flags;
79 u32 val;
80
81 skt = sock->driver_data;
82
83 spin_lock_irqsave(&skt->lock, flags);
84
85 /* note: hardware cannot control socket power, so we will
86 * always report SS_POWERON */
87
88 /* apply socket reset */
89 val = pcmcia_readl(skt, PCMCIA_C1_REG);
90 if (state->flags & SS_RESET)
91 val |= PCMCIA_C1_RESET_MASK;
92 else
93 val &= ~PCMCIA_C1_RESET_MASK;
94
95 /* reverse reset logic for cardbus card */
96 if (skt->card_detected && (skt->card_type & CARD_CARDBUS))
97 val ^= PCMCIA_C1_RESET_MASK;
98
99 pcmcia_writel(skt, val, PCMCIA_C1_REG);
100
101 /* keep requested state for event reporting */
102 skt->requested_state = *state;
103
104 spin_unlock_irqrestore(&skt->lock, flags);
105
106 return 0;
107}
108
109/*
110 * identity cardtype from VS[12] input, CD[12] input while only VS2 is
111 * floating, and CD[12] input while only VS1 is floating
112 */
113enum {
114 IN_VS1 = (1 << 0),
115 IN_VS2 = (1 << 1),
116 IN_CD1_VS2H = (1 << 2),
117 IN_CD2_VS2H = (1 << 3),
118 IN_CD1_VS1H = (1 << 4),
119 IN_CD2_VS1H = (1 << 5),
120};
121
122static const u8 vscd_to_cardtype[] = {
123
124 /* VS1 float, VS2 float */
125 [IN_VS1 | IN_VS2] = (CARD_PCCARD | CARD_5V),
126
127 /* VS1 grounded, VS2 float */
128 [IN_VS2] = (CARD_PCCARD | CARD_5V | CARD_3V),
129
130 /* VS1 grounded, VS2 grounded */
131 [0] = (CARD_PCCARD | CARD_5V | CARD_3V | CARD_XV),
132
133 /* VS1 tied to CD1, VS2 float */
134 [IN_VS1 | IN_VS2 | IN_CD1_VS1H] = (CARD_CARDBUS | CARD_3V),
135
136 /* VS1 grounded, VS2 tied to CD2 */
137 [IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V | CARD_XV),
138
139 /* VS1 tied to CD2, VS2 grounded */
140 [IN_VS1 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_3V | CARD_XV | CARD_YV),
141
142 /* VS1 float, VS2 grounded */
143 [IN_VS1] = (CARD_PCCARD | CARD_XV),
144
145 /* VS1 float, VS2 tied to CD2 */
146 [IN_VS1 | IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V),
147
148 /* VS1 float, VS2 tied to CD1 */
149 [IN_VS1 | IN_VS2 | IN_CD1_VS2H] = (CARD_CARDBUS | CARD_XV | CARD_YV),
150
151 /* VS1 tied to CD2, VS2 float */
152 [IN_VS1 | IN_VS2 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_YV),
153
154 /* VS2 grounded, VS1 is tied to CD1, CD2 is grounded */
155 [IN_VS1 | IN_CD1_VS1H] = 0, /* ignore cardbay */
156};
157
158/*
159 * poll hardware to check card insertion status
160 */
161static unsigned int __get_socket_status(struct bcm63xx_pcmcia_socket *skt)
162{
163 unsigned int stat;
164 u32 val;
165
166 stat = 0;
167
168 /* check CD for card presence */
169 val = pcmcia_readl(skt, PCMCIA_C1_REG);
170
171 if (!(val & PCMCIA_C1_CD1_MASK) && !(val & PCMCIA_C1_CD2_MASK))
172 stat |= SS_DETECT;
173
174 /* if new insertion, detect cardtype */
175 if ((stat & SS_DETECT) && !skt->card_detected) {
176 unsigned int stat = 0;
177
178 /* float VS1, float VS2 */
179 val |= PCMCIA_C1_VS1OE_MASK;
180 val |= PCMCIA_C1_VS2OE_MASK;
181 pcmcia_writel(skt, val, PCMCIA_C1_REG);
182
183 /* wait for output to stabilize and read VS[12] */
184 udelay(10);
185 val = pcmcia_readl(skt, PCMCIA_C1_REG);
186 stat |= (val & PCMCIA_C1_VS1_MASK) ? IN_VS1 : 0;
187 stat |= (val & PCMCIA_C1_VS2_MASK) ? IN_VS2 : 0;
188
189 /* drive VS1 low, float VS2 */
190 val &= ~PCMCIA_C1_VS1OE_MASK;
191 val |= PCMCIA_C1_VS2OE_MASK;
192 pcmcia_writel(skt, val, PCMCIA_C1_REG);
193
194 /* wait for output to stabilize and read CD[12] */
195 udelay(10);
196 val = pcmcia_readl(skt, PCMCIA_C1_REG);
197 stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS2H : 0;
198 stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS2H : 0;
199
200 /* float VS1, drive VS2 low */
201 val |= PCMCIA_C1_VS1OE_MASK;
202 val &= ~PCMCIA_C1_VS2OE_MASK;
203 pcmcia_writel(skt, val, PCMCIA_C1_REG);
204
205 /* wait for output to stabilize and read CD[12] */
206 udelay(10);
207 val = pcmcia_readl(skt, PCMCIA_C1_REG);
208 stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS1H : 0;
209 stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS1H : 0;
210
211 /* guess cardtype from all this */
212 skt->card_type = vscd_to_cardtype[stat];
213 if (!skt->card_type)
214 dev_err(&skt->socket.dev, "unsupported card type\n");
215
216 /* drive both VS pin to 0 again */
217 val &= ~(PCMCIA_C1_VS1OE_MASK | PCMCIA_C1_VS2OE_MASK);
218
219 /* enable correct logic */
220 val &= ~(PCMCIA_C1_EN_PCMCIA_MASK | PCMCIA_C1_EN_CARDBUS_MASK);
221 if (skt->card_type & CARD_PCCARD)
222 val |= PCMCIA_C1_EN_PCMCIA_MASK;
223 else
224 val |= PCMCIA_C1_EN_CARDBUS_MASK;
225
226 pcmcia_writel(skt, val, PCMCIA_C1_REG);
227 }
228 skt->card_detected = (stat & SS_DETECT) ? 1 : 0;
229
230 /* report card type/voltage */
231 if (skt->card_type & CARD_CARDBUS)
232 stat |= SS_CARDBUS;
233 if (skt->card_type & CARD_3V)
234 stat |= SS_3VCARD;
235 if (skt->card_type & CARD_XV)
236 stat |= SS_XVCARD;
237 stat |= SS_POWERON;
238
239 if (gpio_get_value(skt->pd->ready_gpio))
240 stat |= SS_READY;
241
242 return stat;
243}
244
245/*
246 * core request to get current socket status
247 */
248static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock,
249 unsigned int *status)
250{
251 struct bcm63xx_pcmcia_socket *skt;
252
253 skt = sock->driver_data;
254
255 spin_lock_bh(&skt->lock);
256 *status = __get_socket_status(skt);
257 spin_unlock_bh(&skt->lock);
258
259 return 0;
260}
261
262/*
263 * socket polling timer callback
264 */
265static void bcm63xx_pcmcia_poll(unsigned long data)
266{
267 struct bcm63xx_pcmcia_socket *skt;
268 unsigned int stat, events;
269
270 skt = (struct bcm63xx_pcmcia_socket *)data;
271
272 spin_lock_bh(&skt->lock);
273
274 stat = __get_socket_status(skt);
275
276 /* keep only changed bits, and mask with required one from the
277 * core */
278 events = (stat ^ skt->old_status) & skt->requested_state.csc_mask;
279 skt->old_status = stat;
280 spin_unlock_bh(&skt->lock);
281
282 if (events)
283 pcmcia_parse_events(&skt->socket, events);
284
285 mod_timer(&skt->timer,
286 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
287}
288
289static int bcm63xx_pcmcia_set_io_map(struct pcmcia_socket *sock,
290 struct pccard_io_map *map)
291{
292 /* this doesn't seem to be called by pcmcia layer if static
293 * mapping is used */
294 return 0;
295}
296
297static int bcm63xx_pcmcia_set_mem_map(struct pcmcia_socket *sock,
298 struct pccard_mem_map *map)
299{
300 struct bcm63xx_pcmcia_socket *skt;
301 struct resource *res;
302
303 skt = sock->driver_data;
304 if (map->flags & MAP_ATTRIB)
305 res = skt->attr_res;
306 else
307 res = skt->common_res;
308
309 map->static_start = res->start + map->card_start;
310 return 0;
311}
312
313static struct pccard_operations bcm63xx_pcmcia_operations = {
314 .init = bcm63xx_pcmcia_sock_init,
315 .suspend = bcm63xx_pcmcia_suspend,
316 .get_status = bcm63xx_pcmcia_get_status,
317 .set_socket = bcm63xx_pcmcia_set_socket,
318 .set_io_map = bcm63xx_pcmcia_set_io_map,
319 .set_mem_map = bcm63xx_pcmcia_set_mem_map,
320};
321
322/*
323 * register pcmcia socket to core
324 */
325static int __devinit bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
326{
327 struct bcm63xx_pcmcia_socket *skt;
328 struct pcmcia_socket *sock;
329 struct resource *res, *irq_res;
330 unsigned int regmem_size = 0, iomem_size = 0;
331 u32 val;
332 int ret;
333
334 skt = kzalloc(sizeof(*skt), GFP_KERNEL);
335 if (!skt)
336 return -ENOMEM;
337 spin_lock_init(&skt->lock);
338 sock = &skt->socket;
339 sock->driver_data = skt;
340
341 /* make sure we have all resources we need */
342 skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
343 skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
344 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
345 skt->pd = pdev->dev.platform_data;
346 if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) {
347 ret = -EINVAL;
348 goto err;
349 }
350
351 /* remap pcmcia registers */
352 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
353 regmem_size = resource_size(res);
354 if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) {
355 ret = -EINVAL;
356 goto err;
357 }
358 skt->reg_res = res;
359
360 skt->base = ioremap(res->start, regmem_size);
361 if (!skt->base) {
362 ret = -ENOMEM;
363 goto err;
364 }
365
366 /* remap io registers */
367 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
368 iomem_size = resource_size(res);
369 skt->io_base = ioremap(res->start, iomem_size);
370 if (!skt->io_base) {
371 ret = -ENOMEM;
372 goto err;
373 }
374
375 /* resources are static */
376 sock->resource_ops = &pccard_static_ops;
377 sock->ops = &bcm63xx_pcmcia_operations;
378 sock->owner = THIS_MODULE;
379 sock->dev.parent = &pdev->dev;
380 sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
381 sock->io_offset = (unsigned long)skt->io_base;
382 sock->pci_irq = irq_res->start;
383
384#ifdef CONFIG_CARDBUS
385 sock->cb_dev = bcm63xx_cb_dev;
386 if (bcm63xx_cb_dev)
387 sock->features |= SS_CAP_CARDBUS;
388#endif
389
390 /* assume common & attribute memory have the same size */
391 sock->map_size = resource_size(skt->common_res);
392
393 /* initialize polling timer */
394 setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt);
395
396 /* initialize pcmcia control register, drive VS[12] to 0,
397 * leave CB IDSEL to the old value since it is set by the PCI
398 * layer */
399 val = pcmcia_readl(skt, PCMCIA_C1_REG);
400 val &= PCMCIA_C1_CBIDSEL_MASK;
401 val |= PCMCIA_C1_EN_PCMCIA_GPIO_MASK;
402 pcmcia_writel(skt, val, PCMCIA_C1_REG);
403
404 /*
405 * Hardware has only one set of timings registers, not one for
406 * each memory access type, so we configure them for the
407 * slowest one: attribute memory.
408 */
409 val = PCMCIA_C2_DATA16_MASK;
410 val |= 10 << PCMCIA_C2_RWCOUNT_SHIFT;
411 val |= 6 << PCMCIA_C2_INACTIVE_SHIFT;
412 val |= 3 << PCMCIA_C2_SETUP_SHIFT;
413 val |= 3 << PCMCIA_C2_HOLD_SHIFT;
414 pcmcia_writel(skt, val, PCMCIA_C2_REG);
415
416 ret = pcmcia_register_socket(sock);
417 if (ret)
418 goto err;
419
420 /* start polling socket */
421 mod_timer(&skt->timer,
422 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
423
424 platform_set_drvdata(pdev, skt);
425 return 0;
426
427err:
428 if (skt->io_base)
429 iounmap(skt->io_base);
430 if (skt->base)
431 iounmap(skt->base);
432 if (skt->reg_res)
433 release_mem_region(skt->reg_res->start, regmem_size);
434 kfree(skt);
435 return ret;
436}
437
438static int __devexit bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
439{
440 struct bcm63xx_pcmcia_socket *skt;
441 struct resource *res;
442
443 skt = platform_get_drvdata(pdev);
444 del_timer_sync(&skt->timer);
445 iounmap(skt->base);
446 iounmap(skt->io_base);
447 res = skt->reg_res;
448 release_mem_region(res->start, resource_size(res));
449 kfree(skt);
450 return 0;
451}
452
453struct platform_driver bcm63xx_pcmcia_driver = {
454 .probe = bcm63xx_drv_pcmcia_probe,
455 .remove = __devexit_p(bcm63xx_drv_pcmcia_remove),
456 .driver = {
457 .name = "bcm63xx_pcmcia",
458 .owner = THIS_MODULE,
459 },
460};
461
462#ifdef CONFIG_CARDBUS
463static int __devinit bcm63xx_cb_probe(struct pci_dev *dev,
464 const struct pci_device_id *id)
465{
466 /* keep pci device */
467 bcm63xx_cb_dev = dev;
468 return platform_driver_register(&bcm63xx_pcmcia_driver);
469}
470
471static void __devexit bcm63xx_cb_exit(struct pci_dev *dev)
472{
473 platform_driver_unregister(&bcm63xx_pcmcia_driver);
474 bcm63xx_cb_dev = NULL;
475}
476
477static struct pci_device_id bcm63xx_cb_table[] = {
478 {
479 .vendor = PCI_VENDOR_ID_BROADCOM,
480 .device = BCM6348_CPU_ID,
481 .subvendor = PCI_VENDOR_ID_BROADCOM,
482 .subdevice = PCI_ANY_ID,
483 .class = PCI_CLASS_BRIDGE_CARDBUS << 8,
484 .class_mask = ~0,
485 },
486
487 {
488 .vendor = PCI_VENDOR_ID_BROADCOM,
489 .device = BCM6358_CPU_ID,
490 .subvendor = PCI_VENDOR_ID_BROADCOM,
491 .subdevice = PCI_ANY_ID,
492 .class = PCI_CLASS_BRIDGE_CARDBUS << 8,
493 .class_mask = ~0,
494 },
495
496 { },
497};
498
499MODULE_DEVICE_TABLE(pci, bcm63xx_cb_table);
500
501static struct pci_driver bcm63xx_cardbus_driver = {
502 .name = "bcm63xx_cardbus",
503 .id_table = bcm63xx_cb_table,
504 .probe = bcm63xx_cb_probe,
505 .remove = __devexit_p(bcm63xx_cb_exit),
506};
507#endif
508
509/*
510 * if cardbus support is enabled, register our platform device after
511 * our fake cardbus bridge has been registered
512 */
513static int __init bcm63xx_pcmcia_init(void)
514{
515#ifdef CONFIG_CARDBUS
516 return pci_register_driver(&bcm63xx_cardbus_driver);
517#else
518 return platform_driver_register(&bcm63xx_pcmcia_driver);
519#endif
520}
521
522static void __exit bcm63xx_pcmcia_exit(void)
523{
524#ifdef CONFIG_CARDBUS
525 return pci_unregister_driver(&bcm63xx_cardbus_driver);
526#else
527 platform_driver_unregister(&bcm63xx_pcmcia_driver);
528#endif
529}
530
531module_init(bcm63xx_pcmcia_init);
532module_exit(bcm63xx_pcmcia_exit);
533
534MODULE_LICENSE("GPL");
535MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
536MODULE_DESCRIPTION("Linux PCMCIA Card Services: bcm63xx Socket Controller");
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.h b/drivers/pcmcia/bcm63xx_pcmcia.h
new file mode 100644
index 000000000000..ed957399d863
--- /dev/null
+++ b/drivers/pcmcia/bcm63xx_pcmcia.h
@@ -0,0 +1,60 @@
1#ifndef BCM63XX_PCMCIA_H_
2#define BCM63XX_PCMCIA_H_
3
4#include <linux/types.h>
5#include <linux/timer.h>
6#include <pcmcia/ss.h>
7#include <bcm63xx_dev_pcmcia.h>
8
9/* socket polling rate in ms */
10#define BCM63XX_PCMCIA_POLL_RATE 500
11
12enum {
13 CARD_CARDBUS = (1 << 0),
14 CARD_PCCARD = (1 << 1),
15 CARD_5V = (1 << 2),
16 CARD_3V = (1 << 3),
17 CARD_XV = (1 << 4),
18 CARD_YV = (1 << 5),
19};
20
21struct bcm63xx_pcmcia_socket {
22 struct pcmcia_socket socket;
23
24 /* platform specific data */
25 struct bcm63xx_pcmcia_platform_data *pd;
26
27 /* all regs access are protected by this spinlock */
28 spinlock_t lock;
29
30 /* pcmcia registers resource */
31 struct resource *reg_res;
32
33 /* base remapped address of registers */
34 void __iomem *base;
35
36 /* whether a card is detected at the moment */
37 int card_detected;
38
39 /* type of detected card (mask of above enum) */
40 u8 card_type;
41
42 /* keep last socket status to implement event reporting */
43 unsigned int old_status;
44
45 /* backup of requested socket state */
46 socket_state_t requested_state;
47
48 /* timer used for socket status polling */
49 struct timer_list timer;
50
51 /* attribute/common memory resources */
52 struct resource *attr_res;
53 struct resource *common_res;
54 struct resource *io_res;
55
56 /* base address of io memory */
57 void __iomem *io_base;
58};
59
60#endif /* BCM63XX_PCMCIA_H_ */
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index b59d4115d20f..300b368605c9 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -302,7 +302,7 @@ static int __devexit bfin_cf_remove(struct platform_device *pdev)
302 302
303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg) 303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
304{ 304{
305 return pcmcia_socket_dev_suspend(&pdev->dev, mesg); 305 return pcmcia_socket_dev_suspend(&pdev->dev);
306} 306}
307 307
308static int bfin_cf_resume(struct platform_device *pdev) 308static int bfin_cf_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 0660ad182589..934d4bee39a0 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -101,7 +101,7 @@ EXPORT_SYMBOL(pcmcia_socket_list_rwsem);
101static int socket_resume(struct pcmcia_socket *skt); 101static int socket_resume(struct pcmcia_socket *skt);
102static int socket_suspend(struct pcmcia_socket *skt); 102static int socket_suspend(struct pcmcia_socket *skt);
103 103
104int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state) 104int pcmcia_socket_dev_suspend(struct device *dev)
105{ 105{
106 struct pcmcia_socket *socket; 106 struct pcmcia_socket *socket;
107 107
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 46561face128..a04f21c8170f 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
42#ifdef CONFIG_PM 42#ifdef CONFIG_PM
43static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state) 43static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state)
44{ 44{
45 return pcmcia_socket_dev_suspend(&dev->dev, state); 45 return pcmcia_socket_dev_suspend(&dev->dev);
46} 46}
47 47
48static int i82092aa_socket_resume (struct pci_dev *dev) 48static int i82092aa_socket_resume (struct pci_dev *dev)
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 40d4953e4b12..b906abe26ad0 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1241,7 +1241,7 @@ static int pcic_init(struct pcmcia_socket *s)
1241static int i82365_drv_pcmcia_suspend(struct platform_device *dev, 1241static int i82365_drv_pcmcia_suspend(struct platform_device *dev,
1242 pm_message_t state) 1242 pm_message_t state)
1243{ 1243{
1244 return pcmcia_socket_dev_suspend(&dev->dev, state); 1244 return pcmcia_socket_dev_suspend(&dev->dev);
1245} 1245}
1246 1246
1247static int i82365_drv_pcmcia_resume(struct platform_device *dev) 1247static int i82365_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 62b4ecc97c46..d1d89c4491ad 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -699,7 +699,7 @@ static struct pccard_operations pcc_operations = {
699static int cfc_drv_pcmcia_suspend(struct platform_device *dev, 699static int cfc_drv_pcmcia_suspend(struct platform_device *dev,
700 pm_message_t state) 700 pm_message_t state)
701{ 701{
702 return pcmcia_socket_dev_suspend(&dev->dev, state); 702 return pcmcia_socket_dev_suspend(&dev->dev);
703} 703}
704 704
705static int cfc_drv_pcmcia_resume(struct platform_device *dev) 705static int cfc_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index 12034b41d196..a0655839c8d3 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -675,7 +675,7 @@ static struct pccard_operations pcc_operations = {
675static int pcc_drv_pcmcia_suspend(struct platform_device *dev, 675static int pcc_drv_pcmcia_suspend(struct platform_device *dev,
676 pm_message_t state) 676 pm_message_t state)
677{ 677{
678 return pcmcia_socket_dev_suspend(&dev->dev, state); 678 return pcmcia_socket_dev_suspend(&dev->dev);
679} 679}
680 680
681static int pcc_drv_pcmcia_resume(struct platform_device *dev) 681static int pcc_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index d1ad0966392d..c69f2c4fe520 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1296,7 +1296,7 @@ static int m8xx_remove(struct of_device *ofdev)
1296#ifdef CONFIG_PM 1296#ifdef CONFIG_PM
1297static int m8xx_suspend(struct platform_device *pdev, pm_message_t state) 1297static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
1298{ 1298{
1299 return pcmcia_socket_dev_suspend(&pdev->dev, state); 1299 return pcmcia_socket_dev_suspend(&pdev->dev);
1300} 1300}
1301 1301
1302static int m8xx_resume(struct platform_device *pdev) 1302static int m8xx_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index f3736398900e..68570bc3ac86 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -334,7 +334,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
334 334
335static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg) 335static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
336{ 336{
337 return pcmcia_socket_dev_suspend(&pdev->dev, mesg); 337 return pcmcia_socket_dev_suspend(&pdev->dev);
338} 338}
339 339
340static int omap_cf_resume(struct platform_device *pdev) 340static int omap_cf_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 8bed1dab9039..1c39d3438f20 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -758,7 +758,7 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
758#ifdef CONFIG_PM 758#ifdef CONFIG_PM
759static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state) 759static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state)
760{ 760{
761 return pcmcia_socket_dev_suspend(&dev->dev, state); 761 return pcmcia_socket_dev_suspend(&dev->dev);
762} 762}
763 763
764static int pd6729_socket_resume(struct pci_dev *dev) 764static int pd6729_socket_resume(struct pci_dev *dev)
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 87e22ef8eb02..0e35acb1366b 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -302,7 +302,7 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
302 302
303static int pxa2xx_drv_pcmcia_suspend(struct device *dev) 303static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
304{ 304{
305 return pcmcia_socket_dev_suspend(dev, PMSG_SUSPEND); 305 return pcmcia_socket_dev_suspend(dev);
306} 306}
307 307
308static int pxa2xx_drv_pcmcia_resume(struct device *dev) 308static int pxa2xx_drv_pcmcia_resume(struct device *dev)
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index d8da5ac844e9..2d0e99751530 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -89,7 +89,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, 89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
90 pm_message_t state) 90 pm_message_t state)
91{ 91{
92 return pcmcia_socket_dev_suspend(&dev->dev, state); 92 return pcmcia_socket_dev_suspend(&dev->dev);
93} 93}
94 94
95static int sa11x0_drv_pcmcia_resume(struct platform_device *dev) 95static int sa11x0_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 401052a21ce8..4be4e172ffa1 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -159,7 +159,7 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
159 159
160static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state) 160static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state)
161{ 161{
162 return pcmcia_socket_dev_suspend(&dev->dev, state); 162 return pcmcia_socket_dev_suspend(&dev->dev);
163} 163}
164 164
165static int pcmcia_resume(struct sa1111_dev *dev) 165static int pcmcia_resume(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 8eb04230fec7..582413fcb62f 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -366,7 +366,7 @@ static int __init get_tcic_id(void)
366static int tcic_drv_pcmcia_suspend(struct platform_device *dev, 366static int tcic_drv_pcmcia_suspend(struct platform_device *dev,
367 pm_message_t state) 367 pm_message_t state)
368{ 368{
369 return pcmcia_socket_dev_suspend(&dev->dev, state); 369 return pcmcia_socket_dev_suspend(&dev->dev);
370} 370}
371 371
372static int tcic_drv_pcmcia_resume(struct platform_device *dev) 372static int tcic_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index d4ad50d737b0..c9fcbdc164ea 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -707,7 +707,7 @@ __setup("vrc4171_card=", vrc4171_card_setup);
707static int vrc4171_card_suspend(struct platform_device *dev, 707static int vrc4171_card_suspend(struct platform_device *dev,
708 pm_message_t state) 708 pm_message_t state)
709{ 709{
710 return pcmcia_socket_dev_suspend(&dev->dev, state); 710 return pcmcia_socket_dev_suspend(&dev->dev);
711} 711}
712 712
713static int vrc4171_card_resume(struct platform_device *dev) 713static int vrc4171_card_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b459e87a30ac..abe0e44c6e9e 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1225,60 +1225,71 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1225} 1225}
1226 1226
1227#ifdef CONFIG_PM 1227#ifdef CONFIG_PM
1228static int yenta_dev_suspend (struct pci_dev *dev, pm_message_t state) 1228static int yenta_dev_suspend_noirq(struct device *dev)
1229{ 1229{
1230 struct yenta_socket *socket = pci_get_drvdata(dev); 1230 struct pci_dev *pdev = to_pci_dev(dev);
1231 struct yenta_socket *socket = pci_get_drvdata(pdev);
1231 int ret; 1232 int ret;
1232 1233
1233 ret = pcmcia_socket_dev_suspend(&dev->dev, state); 1234 ret = pcmcia_socket_dev_suspend(dev);
1234 1235
1235 if (socket) { 1236 if (!socket)
1236 if (socket->type && socket->type->save_state) 1237 return ret;
1237 socket->type->save_state(socket);
1238 1238
1239 /* FIXME: pci_save_state needs to have a better interface */ 1239 if (socket->type && socket->type->save_state)
1240 pci_save_state(dev); 1240 socket->type->save_state(socket);
1241 pci_read_config_dword(dev, 16*4, &socket->saved_state[0]);
1242 pci_read_config_dword(dev, 17*4, &socket->saved_state[1]);
1243 pci_disable_device(dev);
1244 1241
1245 /* 1242 pci_save_state(pdev);
1246 * Some laptops (IBM T22) do not like us putting the Cardbus 1243 pci_read_config_dword(pdev, 16*4, &socket->saved_state[0]);
1247 * bridge into D3. At a guess, some other laptop will 1244 pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
1248 * probably require this, so leave it commented out for now. 1245 pci_disable_device(pdev);
1249 */ 1246
1250 /* pci_set_power_state(dev, 3); */ 1247 /*
1251 } 1248 * Some laptops (IBM T22) do not like us putting the Cardbus
1249 * bridge into D3. At a guess, some other laptop will
1250 * probably require this, so leave it commented out for now.
1251 */
1252 /* pci_set_power_state(dev, 3); */
1252 1253
1253 return ret; 1254 return ret;
1254} 1255}
1255 1256
1256 1257static int yenta_dev_resume_noirq(struct device *dev)
1257static int yenta_dev_resume (struct pci_dev *dev)
1258{ 1258{
1259 struct yenta_socket *socket = pci_get_drvdata(dev); 1259 struct pci_dev *pdev = to_pci_dev(dev);
1260 struct yenta_socket *socket = pci_get_drvdata(pdev);
1261 int ret;
1260 1262
1261 if (socket) { 1263 if (!socket)
1262 int rc; 1264 return 0;
1263 1265
1264 pci_set_power_state(dev, 0); 1266 pci_write_config_dword(pdev, 16*4, socket->saved_state[0]);
1265 /* FIXME: pci_restore_state needs to have a better interface */ 1267 pci_write_config_dword(pdev, 17*4, socket->saved_state[1]);
1266 pci_restore_state(dev);
1267 pci_write_config_dword(dev, 16*4, socket->saved_state[0]);
1268 pci_write_config_dword(dev, 17*4, socket->saved_state[1]);
1269 1268
1270 rc = pci_enable_device(dev); 1269 ret = pci_enable_device(pdev);
1271 if (rc) 1270 if (ret)
1272 return rc; 1271 return ret;
1273 1272
1274 pci_set_master(dev); 1273 pci_set_master(pdev);
1275 1274
1276 if (socket->type && socket->type->restore_state) 1275 if (socket->type && socket->type->restore_state)
1277 socket->type->restore_state(socket); 1276 socket->type->restore_state(socket);
1278 }
1279 1277
1280 return pcmcia_socket_dev_resume(&dev->dev); 1278 return pcmcia_socket_dev_resume(dev);
1281} 1279}
1280
1281static struct dev_pm_ops yenta_pm_ops = {
1282 .suspend_noirq = yenta_dev_suspend_noirq,
1283 .resume_noirq = yenta_dev_resume_noirq,
1284 .freeze_noirq = yenta_dev_suspend_noirq,
1285 .thaw_noirq = yenta_dev_resume_noirq,
1286 .poweroff_noirq = yenta_dev_suspend_noirq,
1287 .restore_noirq = yenta_dev_resume_noirq,
1288};
1289
1290#define YENTA_PM_OPS (&yenta_pm_ops)
1291#else
1292#define YENTA_PM_OPS NULL
1282#endif 1293#endif
1283 1294
1284#define CB_ID(vend,dev,type) \ 1295#define CB_ID(vend,dev,type) \
@@ -1376,10 +1387,7 @@ static struct pci_driver yenta_cardbus_driver = {
1376 .id_table = yenta_table, 1387 .id_table = yenta_table,
1377 .probe = yenta_probe, 1388 .probe = yenta_probe,
1378 .remove = __devexit_p(yenta_close), 1389 .remove = __devexit_p(yenta_close),
1379#ifdef CONFIG_PM 1390 .driver.pm = YENTA_PM_OPS,
1380 .suspend = yenta_dev_suspend,
1381 .resume = yenta_dev_resume,
1382#endif
1383}; 1391};
1384 1392
1385 1393
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index f9f68e0e7344..afdbdaaf80cb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1041,6 +1041,9 @@ static int sony_nc_resume(struct acpi_device *device)
1041 sony_backlight_update_status(sony_backlight_device) < 0) 1041 sony_backlight_update_status(sony_backlight_device) < 0)
1042 printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n"); 1042 printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
1043 1043
1044 /* re-read rfkill state */
1045 sony_nc_rfkill_update();
1046
1044 return 0; 1047 return 0;
1045} 1048}
1046 1049
@@ -1078,6 +1081,8 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
1078 struct rfkill *rfk; 1081 struct rfkill *rfk;
1079 enum rfkill_type type; 1082 enum rfkill_type type;
1080 const char *name; 1083 const char *name;
1084 int result;
1085 bool hwblock;
1081 1086
1082 switch (nc_type) { 1087 switch (nc_type) {
1083 case SONY_WIFI: 1088 case SONY_WIFI:
@@ -1105,6 +1110,10 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
1105 if (!rfk) 1110 if (!rfk)
1106 return -ENOMEM; 1111 return -ENOMEM;
1107 1112
1113 sony_call_snc_handle(0x124, 0x200, &result);
1114 hwblock = !(result & 0x1);
1115 rfkill_set_hw_state(rfk, hwblock);
1116
1108 err = rfkill_register(rfk); 1117 err = rfkill_register(rfk);
1109 if (err) { 1118 if (err) {
1110 rfkill_destroy(rfk); 1119 rfkill_destroy(rfk);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 3910f2f3eada..d93108d148fc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2235,7 +2235,9 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask)
2235 2235
2236 HOTKEY_CONFIG_CRITICAL_START 2236 HOTKEY_CONFIG_CRITICAL_START
2237 hotkey_driver_mask = mask; 2237 hotkey_driver_mask = mask;
2238#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2238 hotkey_source_mask |= (mask & ~hotkey_all_mask); 2239 hotkey_source_mask |= (mask & ~hotkey_all_mask);
2240#endif
2239 HOTKEY_CONFIG_CRITICAL_END 2241 HOTKEY_CONFIG_CRITICAL_END
2240 2242
2241 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) & 2243 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 1b78f639ead3..76769978285f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
125 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
126} 126}
127 127
128static struct file_operations debugfs_fops = { 128static const struct file_operations debugfs_fops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .open = qstat_seq_open, 130 .open = qstat_seq_open,
131 .read = seq_read, 131 .read = seq_read,
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index eff943923c6f..968e3c7c2632 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
84 return single_open(filp, qdio_perf_proc_show, NULL); 84 return single_open(filp, qdio_perf_proc_show, NULL);
85} 85}
86 86
87static struct file_operations qdio_perf_proc_fops = { 87static const struct file_operations qdio_perf_proc_fops = {
88 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
89 .open = qdio_perf_seq_open, 89 .open = qdio_perf_seq_open,
90 .read = seq_read, 90 .read = seq_read,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 848b59466850..747a5e5c1276 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1185,7 +1185,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1185 return VM_FAULT_SIGBUS; 1185 return VM_FAULT_SIGBUS;
1186} 1186}
1187 1187
1188static struct vm_operations_struct sg_mmap_vm_ops = { 1188static const struct vm_operations_struct sg_mmap_vm_ops = {
1189 .fault = sg_vma_fault, 1189 .fault = sg_vma_fault,
1190}; 1190};
1191 1191
@@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1317 } 1317 }
1318} 1318}
1319 1319
1320static struct file_operations sg_fops = { 1320static const struct file_operations sg_fops = {
1321 .owner = THIS_MODULE, 1321 .owner = THIS_MODULE,
1322 .read = sg_read, 1322 .read = sg_read,
1323 .write = sg_write, 1323 .write = sg_write,
@@ -2194,9 +2194,11 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2196 size_t count, loff_t *off); 2196 size_t count, loff_t *off);
2197static struct file_operations adio_fops = { 2197static const struct file_operations adio_fops = {
2198 /* .owner, .read and .llseek added in sg_proc_init() */ 2198 .owner = THIS_MODULE,
2199 .open = sg_proc_single_open_adio, 2199 .open = sg_proc_single_open_adio,
2200 .read = seq_read,
2201 .llseek = seq_lseek,
2200 .write = sg_proc_write_adio, 2202 .write = sg_proc_write_adio,
2201 .release = single_release, 2203 .release = single_release,
2202}; 2204};
@@ -2204,23 +2206,32 @@ static struct file_operations adio_fops = {
2204static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2206static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2205static ssize_t sg_proc_write_dressz(struct file *filp, 2207static ssize_t sg_proc_write_dressz(struct file *filp,
2206 const char __user *buffer, size_t count, loff_t *off); 2208 const char __user *buffer, size_t count, loff_t *off);
2207static struct file_operations dressz_fops = { 2209static const struct file_operations dressz_fops = {
2210 .owner = THIS_MODULE,
2208 .open = sg_proc_single_open_dressz, 2211 .open = sg_proc_single_open_dressz,
2212 .read = seq_read,
2213 .llseek = seq_lseek,
2209 .write = sg_proc_write_dressz, 2214 .write = sg_proc_write_dressz,
2210 .release = single_release, 2215 .release = single_release,
2211}; 2216};
2212 2217
2213static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2218static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2214static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2219static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2215static struct file_operations version_fops = { 2220static const struct file_operations version_fops = {
2221 .owner = THIS_MODULE,
2216 .open = sg_proc_single_open_version, 2222 .open = sg_proc_single_open_version,
2223 .read = seq_read,
2224 .llseek = seq_lseek,
2217 .release = single_release, 2225 .release = single_release,
2218}; 2226};
2219 2227
2220static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2228static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2221static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2229static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2222static struct file_operations devhdr_fops = { 2230static const struct file_operations devhdr_fops = {
2231 .owner = THIS_MODULE,
2223 .open = sg_proc_single_open_devhdr, 2232 .open = sg_proc_single_open_devhdr,
2233 .read = seq_read,
2234 .llseek = seq_lseek,
2224 .release = single_release, 2235 .release = single_release,
2225}; 2236};
2226 2237
@@ -2229,8 +2240,11 @@ static int sg_proc_open_dev(struct inode *inode, struct file *file);
2229static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2240static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2230static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2241static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2231static void dev_seq_stop(struct seq_file *s, void *v); 2242static void dev_seq_stop(struct seq_file *s, void *v);
2232static struct file_operations dev_fops = { 2243static const struct file_operations dev_fops = {
2244 .owner = THIS_MODULE,
2233 .open = sg_proc_open_dev, 2245 .open = sg_proc_open_dev,
2246 .read = seq_read,
2247 .llseek = seq_lseek,
2234 .release = seq_release, 2248 .release = seq_release,
2235}; 2249};
2236static const struct seq_operations dev_seq_ops = { 2250static const struct seq_operations dev_seq_ops = {
@@ -2242,8 +2256,11 @@ static const struct seq_operations dev_seq_ops = {
2242 2256
2243static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2257static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2244static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2258static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2245static struct file_operations devstrs_fops = { 2259static const struct file_operations devstrs_fops = {
2260 .owner = THIS_MODULE,
2246 .open = sg_proc_open_devstrs, 2261 .open = sg_proc_open_devstrs,
2262 .read = seq_read,
2263 .llseek = seq_lseek,
2247 .release = seq_release, 2264 .release = seq_release,
2248}; 2265};
2249static const struct seq_operations devstrs_seq_ops = { 2266static const struct seq_operations devstrs_seq_ops = {
@@ -2255,8 +2272,11 @@ static const struct seq_operations devstrs_seq_ops = {
2255 2272
2256static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2273static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2257static int sg_proc_open_debug(struct inode *inode, struct file *file); 2274static int sg_proc_open_debug(struct inode *inode, struct file *file);
2258static struct file_operations debug_fops = { 2275static const struct file_operations debug_fops = {
2276 .owner = THIS_MODULE,
2259 .open = sg_proc_open_debug, 2277 .open = sg_proc_open_debug,
2278 .read = seq_read,
2279 .llseek = seq_lseek,
2260 .release = seq_release, 2280 .release = seq_release,
2261}; 2281};
2262static const struct seq_operations debug_seq_ops = { 2282static const struct seq_operations debug_seq_ops = {
@@ -2269,7 +2289,7 @@ static const struct seq_operations debug_seq_ops = {
2269 2289
2270struct sg_proc_leaf { 2290struct sg_proc_leaf {
2271 const char * name; 2291 const char * name;
2272 struct file_operations * fops; 2292 const struct file_operations * fops;
2273}; 2293};
2274 2294
2275static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2295static struct sg_proc_leaf sg_proc_leaf_arr[] = {
@@ -2295,9 +2315,6 @@ sg_proc_init(void)
2295 for (k = 0; k < num_leaves; ++k) { 2315 for (k = 0; k < num_leaves; ++k) {
2296 leaf = &sg_proc_leaf_arr[k]; 2316 leaf = &sg_proc_leaf_arr[k];
2297 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2317 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2298 leaf->fops->owner = THIS_MODULE;
2299 leaf->fops->read = seq_read;
2300 leaf->fops->llseek = seq_lseek;
2301 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); 2318 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2302 } 2319 }
2303 return 0; 2320 return 0;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2209620d2349..b1ae774016f1 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -64,6 +64,8 @@ static int serial_index(struct uart_port *port)
64 return (serial8250_reg.minor - 64) + port->line; 64 return (serial8250_reg.minor - 64) + port->line;
65} 65}
66 66
67static unsigned int skip_txen_test; /* force skip of txen test at init time */
68
67/* 69/*
68 * Debugging. 70 * Debugging.
69 */ 71 */
@@ -2108,7 +2110,7 @@ static int serial8250_startup(struct uart_port *port)
2108 is variable. So, let's just don't test if we receive 2110 is variable. So, let's just don't test if we receive
2109 TX irq. This way, we'll never enable UART_BUG_TXEN. 2111 TX irq. This way, we'll never enable UART_BUG_TXEN.
2110 */ 2112 */
2111 if (up->port.flags & UPF_NO_TXEN_TEST) 2113 if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
2112 goto dont_test_tx_en; 2114 goto dont_test_tx_en;
2113 2115
2114 /* 2116 /*
@@ -3248,6 +3250,9 @@ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
3248module_param(nr_uarts, uint, 0644); 3250module_param(nr_uarts, uint, 0644);
3249MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); 3251MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
3250 3252
3253module_param(skip_txen_test, uint, 0644);
3254MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
3255
3251#ifdef CONFIG_SERIAL_8250_RSA 3256#ifdef CONFIG_SERIAL_8250_RSA
3252module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); 3257module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
3253MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); 3258MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 03422ce878cf..e52257257279 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -862,7 +862,7 @@ config SERIAL_IMX_CONSOLE
862 862
863config SERIAL_UARTLITE 863config SERIAL_UARTLITE
864 tristate "Xilinx uartlite serial port support" 864 tristate "Xilinx uartlite serial port support"
865 depends on PPC32 || MICROBLAZE 865 depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
866 select SERIAL_CORE 866 select SERIAL_CORE
867 help 867 help
868 Say Y here if you want to use the Xilinx uartlite serial controller. 868 Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1458,4 +1458,23 @@ config SERIAL_TIMBERDALE
1458 ---help--- 1458 ---help---
1459 Add support for UART controller on timberdale. 1459 Add support for UART controller on timberdale.
1460 1460
1461config SERIAL_BCM63XX
1462 tristate "bcm63xx serial port support"
1463 select SERIAL_CORE
1464 depends on BCM63XX
1465 help
1466 If you have a bcm63xx CPU, you can enable its onboard
1467 serial port by enabling this options.
1468
1469 To compile this driver as a module, choose M here: the
1470 module will be called bcm963xx_uart.
1471
1472config SERIAL_BCM63XX_CONSOLE
1473 bool "Console on bcm63xx serial port"
1474 depends on SERIAL_BCM63XX=y
1475 select SERIAL_CORE_CONSOLE
1476 help
1477 If you have enabled the serial port on the bcm63xx CPU
1478 you can make it the console by answering Y to this option.
1479
1461endmenu 1480endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 97f6fcc8b432..d21d5dd5d048 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
34obj-$(CONFIG_SERIAL_PXA) += pxa.o 34obj-$(CONFIG_SERIAL_PXA) += pxa.o
35obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o 35obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
36obj-$(CONFIG_SERIAL_SA1100) += sa1100.o 36obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
37obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
37obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o 38obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
38obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o 39obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
39obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o 40obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
new file mode 100644
index 000000000000..beddaa6e9069
--- /dev/null
+++ b/drivers/serial/bcm63xx_uart.c
@@ -0,0 +1,890 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Derived from many drivers using generic_serial interface.
7 *
8 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
9 *
10 * Serial driver for BCM63xx integrated UART.
11 *
12 * Hardware flow control was _not_ tested since I only have RX/TX on
13 * my board.
14 */
15
16#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
17#define SUPPORT_SYSRQ
18#endif
19
20#include <linux/kernel.h>
21#include <linux/platform_device.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/module.h>
25#include <linux/console.h>
26#include <linux/clk.h>
27#include <linux/tty.h>
28#include <linux/tty_flip.h>
29#include <linux/sysrq.h>
30#include <linux/serial.h>
31#include <linux/serial_core.h>
32
33#include <bcm63xx_clk.h>
34#include <bcm63xx_irq.h>
35#include <bcm63xx_regs.h>
36#include <bcm63xx_io.h>
37
38#define BCM63XX_NR_UARTS 1
39
40static struct uart_port ports[BCM63XX_NR_UARTS];
41
42/*
43 * rx interrupt mask / stat
44 *
45 * mask:
46 * - rx fifo full
47 * - rx fifo above threshold
48 * - rx fifo not empty for too long
49 */
50#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \
51 UART_IR_MASK(UART_IR_RXTHRESH) | \
52 UART_IR_MASK(UART_IR_RXTIMEOUT))
53
54#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \
55 UART_IR_STAT(UART_IR_RXTHRESH) | \
56 UART_IR_STAT(UART_IR_RXTIMEOUT))
57
58/*
59 * tx interrupt mask / stat
60 *
61 * mask:
62 * - tx fifo empty
63 * - tx fifo below threshold
64 */
65#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \
66 UART_IR_MASK(UART_IR_TXTRESH))
67
68#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \
69 UART_IR_STAT(UART_IR_TXTRESH))
70
71/*
72 * external input interrupt
73 *
74 * mask: any edge on CTS, DCD
75 */
76#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
77 UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
78
79/*
80 * handy uart register accessor
81 */
82static inline unsigned int bcm_uart_readl(struct uart_port *port,
83 unsigned int offset)
84{
85 return bcm_readl(port->membase + offset);
86}
87
88static inline void bcm_uart_writel(struct uart_port *port,
89 unsigned int value, unsigned int offset)
90{
91 bcm_writel(value, port->membase + offset);
92}
93
94/*
95 * serial core request to check if uart tx fifo is empty
96 */
97static unsigned int bcm_uart_tx_empty(struct uart_port *port)
98{
99 unsigned int val;
100
101 val = bcm_uart_readl(port, UART_IR_REG);
102 return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
103}
104
105/*
106 * serial core request to set RTS and DTR pin state and loopback mode
107 */
108static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
109{
110 unsigned int val;
111
112 val = bcm_uart_readl(port, UART_MCTL_REG);
113 val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
114 /* invert of written value is reflected on the pin */
115 if (!(mctrl & TIOCM_DTR))
116 val |= UART_MCTL_DTR_MASK;
117 if (!(mctrl & TIOCM_RTS))
118 val |= UART_MCTL_RTS_MASK;
119 bcm_uart_writel(port, val, UART_MCTL_REG);
120
121 val = bcm_uart_readl(port, UART_CTL_REG);
122 if (mctrl & TIOCM_LOOP)
123 val |= UART_CTL_LOOPBACK_MASK;
124 else
125 val &= ~UART_CTL_LOOPBACK_MASK;
126 bcm_uart_writel(port, val, UART_CTL_REG);
127}
128
129/*
130 * serial core request to return RI, CTS, DCD and DSR pin state
131 */
132static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
133{
134 unsigned int val, mctrl;
135
136 mctrl = 0;
137 val = bcm_uart_readl(port, UART_EXTINP_REG);
138 if (val & UART_EXTINP_RI_MASK)
139 mctrl |= TIOCM_RI;
140 if (val & UART_EXTINP_CTS_MASK)
141 mctrl |= TIOCM_CTS;
142 if (val & UART_EXTINP_DCD_MASK)
143 mctrl |= TIOCM_CD;
144 if (val & UART_EXTINP_DSR_MASK)
145 mctrl |= TIOCM_DSR;
146 return mctrl;
147}
148
149/*
150 * serial core request to disable tx ASAP (used for flow control)
151 */
152static void bcm_uart_stop_tx(struct uart_port *port)
153{
154 unsigned int val;
155
156 val = bcm_uart_readl(port, UART_CTL_REG);
157 val &= ~(UART_CTL_TXEN_MASK);
158 bcm_uart_writel(port, val, UART_CTL_REG);
159
160 val = bcm_uart_readl(port, UART_IR_REG);
161 val &= ~UART_TX_INT_MASK;
162 bcm_uart_writel(port, val, UART_IR_REG);
163}
164
165/*
166 * serial core request to (re)enable tx
167 */
168static void bcm_uart_start_tx(struct uart_port *port)
169{
170 unsigned int val;
171
172 val = bcm_uart_readl(port, UART_IR_REG);
173 val |= UART_TX_INT_MASK;
174 bcm_uart_writel(port, val, UART_IR_REG);
175
176 val = bcm_uart_readl(port, UART_CTL_REG);
177 val |= UART_CTL_TXEN_MASK;
178 bcm_uart_writel(port, val, UART_CTL_REG);
179}
180
181/*
182 * serial core request to stop rx, called before port shutdown
183 */
184static void bcm_uart_stop_rx(struct uart_port *port)
185{
186 unsigned int val;
187
188 val = bcm_uart_readl(port, UART_IR_REG);
189 val &= ~UART_RX_INT_MASK;
190 bcm_uart_writel(port, val, UART_IR_REG);
191}
192
193/*
194 * serial core request to enable modem status interrupt reporting
195 */
196static void bcm_uart_enable_ms(struct uart_port *port)
197{
198 unsigned int val;
199
200 val = bcm_uart_readl(port, UART_IR_REG);
201 val |= UART_IR_MASK(UART_IR_EXTIP);
202 bcm_uart_writel(port, val, UART_IR_REG);
203}
204
205/*
206 * serial core request to start/stop emitting break char
207 */
208static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
209{
210 unsigned long flags;
211 unsigned int val;
212
213 spin_lock_irqsave(&port->lock, flags);
214
215 val = bcm_uart_readl(port, UART_CTL_REG);
216 if (ctl)
217 val |= UART_CTL_XMITBRK_MASK;
218 else
219 val &= ~UART_CTL_XMITBRK_MASK;
220 bcm_uart_writel(port, val, UART_CTL_REG);
221
222 spin_unlock_irqrestore(&port->lock, flags);
223}
224
225/*
226 * return port type in string format
227 */
228static const char *bcm_uart_type(struct uart_port *port)
229{
230 return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
231}
232
233/*
234 * read all chars in rx fifo and send them to core
235 */
236static void bcm_uart_do_rx(struct uart_port *port)
237{
238 struct tty_struct *tty;
239 unsigned int max_count;
240
241 /* limit number of char read in interrupt, should not be
242 * higher than fifo size anyway since we're much faster than
243 * serial port */
244 max_count = 32;
245 tty = port->info->port.tty;
246 do {
247 unsigned int iestat, c, cstat;
248 char flag;
249
250 /* get overrun/fifo empty information from ier
251 * register */
252 iestat = bcm_uart_readl(port, UART_IR_REG);
253 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
254 break;
255
256 cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
257 port->icount.rx++;
258 flag = TTY_NORMAL;
259 c &= 0xff;
260
261 if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
262 /* do stats first */
263 if (cstat & UART_FIFO_BRKDET_MASK) {
264 port->icount.brk++;
265 if (uart_handle_break(port))
266 continue;
267 }
268
269 if (cstat & UART_FIFO_PARERR_MASK)
270 port->icount.parity++;
271 if (cstat & UART_FIFO_FRAMEERR_MASK)
272 port->icount.frame++;
273
274 /* update flag wrt read_status_mask */
275 cstat &= port->read_status_mask;
276 if (cstat & UART_FIFO_BRKDET_MASK)
277 flag = TTY_BREAK;
278 if (cstat & UART_FIFO_FRAMEERR_MASK)
279 flag = TTY_FRAME;
280 if (cstat & UART_FIFO_PARERR_MASK)
281 flag = TTY_PARITY;
282 }
283
284 if (uart_handle_sysrq_char(port, c))
285 continue;
286
287 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
288 port->icount.overrun++;
289 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
290 }
291
292 if ((cstat & port->ignore_status_mask) == 0)
293 tty_insert_flip_char(tty, c, flag);
294
295 } while (--max_count);
296
297 tty_flip_buffer_push(tty);
298}
299
300/*
301 * fill tx fifo with chars to send, stop when fifo is about to be full
302 * or when all chars have been sent.
303 */
304static void bcm_uart_do_tx(struct uart_port *port)
305{
306 struct circ_buf *xmit;
307 unsigned int val, max_count;
308
309 if (port->x_char) {
310 bcm_uart_writel(port, port->x_char, UART_FIFO_REG);
311 port->icount.tx++;
312 port->x_char = 0;
313 return;
314 }
315
316 if (uart_tx_stopped(port)) {
317 bcm_uart_stop_tx(port);
318 return;
319 }
320
321 xmit = &port->info->xmit;
322 if (uart_circ_empty(xmit))
323 goto txq_empty;
324
325 val = bcm_uart_readl(port, UART_MCTL_REG);
326 val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
327 max_count = port->fifosize - val;
328
329 while (max_count--) {
330 unsigned int c;
331
332 c = xmit->buf[xmit->tail];
333 bcm_uart_writel(port, c, UART_FIFO_REG);
334 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
335 port->icount.tx++;
336 if (uart_circ_empty(xmit))
337 break;
338 }
339
340 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
341 uart_write_wakeup(port);
342
343 if (uart_circ_empty(xmit))
344 goto txq_empty;
345 return;
346
347txq_empty:
348 /* nothing to send, disable transmit interrupt */
349 val = bcm_uart_readl(port, UART_IR_REG);
350 val &= ~UART_TX_INT_MASK;
351 bcm_uart_writel(port, val, UART_IR_REG);
352 return;
353}
354
355/*
356 * process uart interrupt
357 */
358static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
359{
360 struct uart_port *port;
361 unsigned int irqstat;
362
363 port = dev_id;
364 spin_lock(&port->lock);
365
366 irqstat = bcm_uart_readl(port, UART_IR_REG);
367 if (irqstat & UART_RX_INT_STAT)
368 bcm_uart_do_rx(port);
369
370 if (irqstat & UART_TX_INT_STAT)
371 bcm_uart_do_tx(port);
372
373 if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
374 unsigned int estat;
375
376 estat = bcm_uart_readl(port, UART_EXTINP_REG);
377 if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
378 uart_handle_cts_change(port,
379 estat & UART_EXTINP_CTS_MASK);
380 if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
381 uart_handle_dcd_change(port,
382 estat & UART_EXTINP_DCD_MASK);
383 }
384
385 spin_unlock(&port->lock);
386 return IRQ_HANDLED;
387}
388
389/*
390 * enable rx & tx operation on uart
391 */
392static void bcm_uart_enable(struct uart_port *port)
393{
394 unsigned int val;
395
396 val = bcm_uart_readl(port, UART_CTL_REG);
397 val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
398 bcm_uart_writel(port, val, UART_CTL_REG);
399}
400
401/*
402 * disable rx & tx operation on uart
403 */
404static void bcm_uart_disable(struct uart_port *port)
405{
406 unsigned int val;
407
408 val = bcm_uart_readl(port, UART_CTL_REG);
409 val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
410 UART_CTL_RXEN_MASK);
411 bcm_uart_writel(port, val, UART_CTL_REG);
412}
413
414/*
415 * clear all unread data in rx fifo and unsent data in tx fifo
416 */
417static void bcm_uart_flush(struct uart_port *port)
418{
419 unsigned int val;
420
421 /* empty rx and tx fifo */
422 val = bcm_uart_readl(port, UART_CTL_REG);
423 val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
424 bcm_uart_writel(port, val, UART_CTL_REG);
425
426 /* read any pending char to make sure all irq status are
427 * cleared */
428 (void)bcm_uart_readl(port, UART_FIFO_REG);
429}
430
431/*
432 * serial core request to initialize uart and start rx operation
433 */
434static int bcm_uart_startup(struct uart_port *port)
435{
436 unsigned int val;
437 int ret;
438
439 /* mask all irq and flush port */
440 bcm_uart_disable(port);
441 bcm_uart_writel(port, 0, UART_IR_REG);
442 bcm_uart_flush(port);
443
444 /* clear any pending external input interrupt */
445 (void)bcm_uart_readl(port, UART_EXTINP_REG);
446
447 /* set rx/tx fifo thresh to fifo half size */
448 val = bcm_uart_readl(port, UART_MCTL_REG);
449 val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
450 val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
451 val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
452 bcm_uart_writel(port, val, UART_MCTL_REG);
453
454 /* set rx fifo timeout to 1 char time */
455 val = bcm_uart_readl(port, UART_CTL_REG);
456 val &= ~UART_CTL_RXTMOUTCNT_MASK;
457 val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
458 bcm_uart_writel(port, val, UART_CTL_REG);
459
460 /* report any edge on dcd and cts */
461 val = UART_EXTINP_INT_MASK;
462 val |= UART_EXTINP_DCD_NOSENSE_MASK;
463 val |= UART_EXTINP_CTS_NOSENSE_MASK;
464 bcm_uart_writel(port, val, UART_EXTINP_REG);
465
466 /* register irq and enable rx interrupts */
467 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
468 bcm_uart_type(port), port);
469 if (ret)
470 return ret;
471 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
472 bcm_uart_enable(port);
473 return 0;
474}
475
476/*
477 * serial core request to flush & disable uart
478 */
479static void bcm_uart_shutdown(struct uart_port *port)
480{
481 unsigned long flags;
482
483 spin_lock_irqsave(&port->lock, flags);
484 bcm_uart_writel(port, 0, UART_IR_REG);
485 spin_unlock_irqrestore(&port->lock, flags);
486
487 bcm_uart_disable(port);
488 bcm_uart_flush(port);
489 free_irq(port->irq, port);
490}
491
492/*
493 * serial core request to change current uart setting
494 */
495static void bcm_uart_set_termios(struct uart_port *port,
496 struct ktermios *new,
497 struct ktermios *old)
498{
499 unsigned int ctl, baud, quot, ier;
500 unsigned long flags;
501
502 spin_lock_irqsave(&port->lock, flags);
503
504 /* disable uart while changing speed */
505 bcm_uart_disable(port);
506 bcm_uart_flush(port);
507
508 /* update Control register */
509 ctl = bcm_uart_readl(port, UART_CTL_REG);
510 ctl &= ~UART_CTL_BITSPERSYM_MASK;
511
512 switch (new->c_cflag & CSIZE) {
513 case CS5:
514 ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
515 break;
516 case CS6:
517 ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
518 break;
519 case CS7:
520 ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
521 break;
522 default:
523 ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
524 break;
525 }
526
527 ctl &= ~UART_CTL_STOPBITS_MASK;
528 if (new->c_cflag & CSTOPB)
529 ctl |= UART_CTL_STOPBITS_2;
530 else
531 ctl |= UART_CTL_STOPBITS_1;
532
533 ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
534 if (new->c_cflag & PARENB)
535 ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
536 ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
537 if (new->c_cflag & PARODD)
538 ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
539 bcm_uart_writel(port, ctl, UART_CTL_REG);
540
541 /* update Baudword register */
542 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
543 quot = uart_get_divisor(port, baud) - 1;
544 bcm_uart_writel(port, quot, UART_BAUD_REG);
545
546 /* update Interrupt register */
547 ier = bcm_uart_readl(port, UART_IR_REG);
548
549 ier &= ~UART_IR_MASK(UART_IR_EXTIP);
550 if (UART_ENABLE_MS(port, new->c_cflag))
551 ier |= UART_IR_MASK(UART_IR_EXTIP);
552
553 bcm_uart_writel(port, ier, UART_IR_REG);
554
555 /* update read/ignore mask */
556 port->read_status_mask = UART_FIFO_VALID_MASK;
557 if (new->c_iflag & INPCK) {
558 port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
559 port->read_status_mask |= UART_FIFO_PARERR_MASK;
560 }
561 if (new->c_iflag & (BRKINT))
562 port->read_status_mask |= UART_FIFO_BRKDET_MASK;
563
564 port->ignore_status_mask = 0;
565 if (new->c_iflag & IGNPAR)
566 port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
567 if (new->c_iflag & IGNBRK)
568 port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
569 if (!(new->c_cflag & CREAD))
570 port->ignore_status_mask |= UART_FIFO_VALID_MASK;
571
572 uart_update_timeout(port, new->c_cflag, baud);
573 bcm_uart_enable(port);
574 spin_unlock_irqrestore(&port->lock, flags);
575}
576
577/*
578 * serial core request to claim uart iomem
579 */
580static int bcm_uart_request_port(struct uart_port *port)
581{
582 unsigned int size;
583
584 size = RSET_UART_SIZE;
585 if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
586 dev_err(port->dev, "Memory region busy\n");
587 return -EBUSY;
588 }
589
590 port->membase = ioremap(port->mapbase, size);
591 if (!port->membase) {
592 dev_err(port->dev, "Unable to map registers\n");
593 release_mem_region(port->mapbase, size);
594 return -EBUSY;
595 }
596 return 0;
597}
598
599/*
600 * serial core request to release uart iomem
601 */
602static void bcm_uart_release_port(struct uart_port *port)
603{
604 release_mem_region(port->mapbase, RSET_UART_SIZE);
605 iounmap(port->membase);
606}
607
608/*
609 * serial core request to do any port required autoconfiguration
610 */
611static void bcm_uart_config_port(struct uart_port *port, int flags)
612{
613 if (flags & UART_CONFIG_TYPE) {
614 if (bcm_uart_request_port(port))
615 return;
616 port->type = PORT_BCM63XX;
617 }
618}
619
620/*
621 * serial core request to check that port information in serinfo are
622 * suitable
623 */
624static int bcm_uart_verify_port(struct uart_port *port,
625 struct serial_struct *serinfo)
626{
627 if (port->type != PORT_BCM63XX)
628 return -EINVAL;
629 if (port->irq != serinfo->irq)
630 return -EINVAL;
631 if (port->iotype != serinfo->io_type)
632 return -EINVAL;
633 if (port->mapbase != (unsigned long)serinfo->iomem_base)
634 return -EINVAL;
635 return 0;
636}
637
638/* serial core callbacks */
639static struct uart_ops bcm_uart_ops = {
640 .tx_empty = bcm_uart_tx_empty,
641 .get_mctrl = bcm_uart_get_mctrl,
642 .set_mctrl = bcm_uart_set_mctrl,
643 .start_tx = bcm_uart_start_tx,
644 .stop_tx = bcm_uart_stop_tx,
645 .stop_rx = bcm_uart_stop_rx,
646 .enable_ms = bcm_uart_enable_ms,
647 .break_ctl = bcm_uart_break_ctl,
648 .startup = bcm_uart_startup,
649 .shutdown = bcm_uart_shutdown,
650 .set_termios = bcm_uart_set_termios,
651 .type = bcm_uart_type,
652 .release_port = bcm_uart_release_port,
653 .request_port = bcm_uart_request_port,
654 .config_port = bcm_uart_config_port,
655 .verify_port = bcm_uart_verify_port,
656};
657
658
659
660#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
661static inline void wait_for_xmitr(struct uart_port *port)
662{
663 unsigned int tmout;
664
665 /* Wait up to 10ms for the character(s) to be sent. */
666 tmout = 10000;
667 while (--tmout) {
668 unsigned int val;
669
670 val = bcm_uart_readl(port, UART_IR_REG);
671 if (val & UART_IR_STAT(UART_IR_TXEMPTY))
672 break;
673 udelay(1);
674 }
675
676 /* Wait up to 1s for flow control if necessary */
677 if (port->flags & UPF_CONS_FLOW) {
678 tmout = 1000000;
679 while (--tmout) {
680 unsigned int val;
681
682 val = bcm_uart_readl(port, UART_EXTINP_REG);
683 if (val & UART_EXTINP_CTS_MASK)
684 break;
685 udelay(1);
686 }
687 }
688}
689
690/*
691 * output given char
692 */
693static void bcm_console_putchar(struct uart_port *port, int ch)
694{
695 wait_for_xmitr(port);
696 bcm_uart_writel(port, ch, UART_FIFO_REG);
697}
698
699/*
700 * console core request to output given string
701 */
702static void bcm_console_write(struct console *co, const char *s,
703 unsigned int count)
704{
705 struct uart_port *port;
706 unsigned long flags;
707 int locked;
708
709 port = &ports[co->index];
710
711 local_irq_save(flags);
712 if (port->sysrq) {
713 /* bcm_uart_interrupt() already took the lock */
714 locked = 0;
715 } else if (oops_in_progress) {
716 locked = spin_trylock(&port->lock);
717 } else {
718 spin_lock(&port->lock);
719 locked = 1;
720 }
721
722 /* call helper to deal with \r\n */
723 uart_console_write(port, s, count, bcm_console_putchar);
724
725 /* and wait for char to be transmitted */
726 wait_for_xmitr(port);
727
728 if (locked)
729 spin_unlock(&port->lock);
730 local_irq_restore(flags);
731}
732
733/*
734 * console core request to setup given console, find matching uart
735 * port and setup it.
736 */
737static int bcm_console_setup(struct console *co, char *options)
738{
739 struct uart_port *port;
740 int baud = 9600;
741 int bits = 8;
742 int parity = 'n';
743 int flow = 'n';
744
745 if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
746 return -EINVAL;
747 port = &ports[co->index];
748 if (!port->membase)
749 return -ENODEV;
750 if (options)
751 uart_parse_options(options, &baud, &parity, &bits, &flow);
752
753 return uart_set_options(port, co, baud, parity, bits, flow);
754}
755
756static struct uart_driver bcm_uart_driver;
757
758static struct console bcm63xx_console = {
759 .name = "ttyS",
760 .write = bcm_console_write,
761 .device = uart_console_device,
762 .setup = bcm_console_setup,
763 .flags = CON_PRINTBUFFER,
764 .index = -1,
765 .data = &bcm_uart_driver,
766};
767
768static int __init bcm63xx_console_init(void)
769{
770 register_console(&bcm63xx_console);
771 return 0;
772}
773
774console_initcall(bcm63xx_console_init);
775
776#define BCM63XX_CONSOLE (&bcm63xx_console)
777#else
778#define BCM63XX_CONSOLE NULL
779#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
780
781static struct uart_driver bcm_uart_driver = {
782 .owner = THIS_MODULE,
783 .driver_name = "bcm63xx_uart",
784 .dev_name = "ttyS",
785 .major = TTY_MAJOR,
786 .minor = 64,
787 .nr = 1,
788 .cons = BCM63XX_CONSOLE,
789};
790
791/*
792 * platform driver probe/remove callback
793 */
794static int __devinit bcm_uart_probe(struct platform_device *pdev)
795{
796 struct resource *res_mem, *res_irq;
797 struct uart_port *port;
798 struct clk *clk;
799 int ret;
800
801 if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
802 return -EINVAL;
803
804 if (ports[pdev->id].membase)
805 return -EBUSY;
806
807 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
808 if (!res_mem)
809 return -ENODEV;
810
811 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
812 if (!res_irq)
813 return -ENODEV;
814
815 clk = clk_get(&pdev->dev, "periph");
816 if (IS_ERR(clk))
817 return -ENODEV;
818
819 port = &ports[pdev->id];
820 memset(port, 0, sizeof(*port));
821 port->iotype = UPIO_MEM;
822 port->mapbase = res_mem->start;
823 port->irq = res_irq->start;
824 port->ops = &bcm_uart_ops;
825 port->flags = UPF_BOOT_AUTOCONF;
826 port->dev = &pdev->dev;
827 port->fifosize = 16;
828 port->uartclk = clk_get_rate(clk) / 2;
829 clk_put(clk);
830
831 ret = uart_add_one_port(&bcm_uart_driver, port);
832 if (ret) {
833 kfree(port);
834 return ret;
835 }
836 platform_set_drvdata(pdev, port);
837 return 0;
838}
839
840static int __devexit bcm_uart_remove(struct platform_device *pdev)
841{
842 struct uart_port *port;
843
844 port = platform_get_drvdata(pdev);
845 uart_remove_one_port(&bcm_uart_driver, port);
846 platform_set_drvdata(pdev, NULL);
847 /* mark port as free */
848 ports[pdev->id].membase = 0;
849 return 0;
850}
851
852/*
853 * platform driver stuff
854 */
855static struct platform_driver bcm_uart_platform_driver = {
856 .probe = bcm_uart_probe,
857 .remove = __devexit_p(bcm_uart_remove),
858 .driver = {
859 .owner = THIS_MODULE,
860 .name = "bcm63xx_uart",
861 },
862};
863
864static int __init bcm_uart_init(void)
865{
866 int ret;
867
868 ret = uart_register_driver(&bcm_uart_driver);
869 if (ret)
870 return ret;
871
872 ret = platform_driver_register(&bcm_uart_platform_driver);
873 if (ret)
874 uart_unregister_driver(&bcm_uart_driver);
875
876 return ret;
877}
878
879static void __exit bcm_uart_exit(void)
880{
881 platform_driver_unregister(&bcm_uart_platform_driver);
882 uart_unregister_driver(&bcm_uart_driver);
883}
884
885module_init(bcm_uart_init);
886module_exit(bcm_uart_exit);
887
888MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
889MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
890MODULE_LICENSE("GPL");
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 2d7feecaf492..0028b6f89ce6 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -307,7 +307,7 @@ static void stop_processor(struct icom_port *icom_port)
307 if (port < 4) { 307 if (port < 4) {
308 temp = readl(stop_proc[port].global_control_reg); 308 temp = readl(stop_proc[port].global_control_reg);
309 temp = 309 temp =
310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; 310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
311 writel(temp, stop_proc[port].global_control_reg); 311 writel(temp, stop_proc[port].global_control_reg);
312 312
313 /* write flush */ 313 /* write flush */
@@ -336,7 +336,7 @@ static void start_processor(struct icom_port *icom_port)
336 if (port < 4) { 336 if (port < 4) {
337 temp = readl(start_proc[port].global_control_reg); 337 temp = readl(start_proc[port].global_control_reg);
338 temp = 338 temp =
339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; 339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
340 writel(temp, start_proc[port].global_control_reg); 340 writel(temp, start_proc[port].global_control_reg);
341 341
342 /* write flush */ 342 /* write flush */
@@ -509,8 +509,8 @@ static void load_code(struct icom_port *icom_port)
509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n"); 509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
510 } 510 }
511 511
512 if (new_page != NULL) 512 if (new_page != NULL)
513 pci_free_consistent(dev, 4096, new_page, temp_pci); 513 pci_free_consistent(dev, 4096, new_page, temp_pci);
514} 514}
515 515
516static int startup(struct icom_port *icom_port) 516static int startup(struct icom_port *icom_port)
@@ -1493,15 +1493,15 @@ static int __devinit icom_probe(struct pci_dev *dev,
1493 const struct pci_device_id *ent) 1493 const struct pci_device_id *ent)
1494{ 1494{
1495 int index; 1495 int index;
1496 unsigned int command_reg; 1496 unsigned int command_reg;
1497 int retval; 1497 int retval;
1498 struct icom_adapter *icom_adapter; 1498 struct icom_adapter *icom_adapter;
1499 struct icom_port *icom_port; 1499 struct icom_port *icom_port;
1500 1500
1501 retval = pci_enable_device(dev); 1501 retval = pci_enable_device(dev);
1502 if (retval) { 1502 if (retval) {
1503 dev_err(&dev->dev, "Device enable FAILED\n"); 1503 dev_err(&dev->dev, "Device enable FAILED\n");
1504 return retval; 1504 return retval;
1505 } 1505 }
1506 1506
1507 if ( (retval = pci_request_regions(dev, "icom"))) { 1507 if ( (retval = pci_request_regions(dev, "icom"))) {
@@ -1510,23 +1510,23 @@ static int __devinit icom_probe(struct pci_dev *dev,
1510 return retval; 1510 return retval;
1511 } 1511 }
1512 1512
1513 pci_set_master(dev); 1513 pci_set_master(dev);
1514 1514
1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) { 1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
1516 dev_err(&dev->dev, "PCI Config read FAILED\n"); 1516 dev_err(&dev->dev, "PCI Config read FAILED\n");
1517 return retval; 1517 return retval;
1518 } 1518 }
1519 1519
1520 pci_write_config_dword(dev, PCI_COMMAND, 1520 pci_write_config_dword(dev, PCI_COMMAND,
1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER 1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1523 1523
1524 if (ent->driver_data == ADAPTER_V1) { 1524 if (ent->driver_data == ADAPTER_V1) {
1525 pci_write_config_dword(dev, 0x44, 0x8300830A); 1525 pci_write_config_dword(dev, 0x44, 0x8300830A);
1526 } else { 1526 } else {
1527 pci_write_config_dword(dev, 0x44, 0x42004200); 1527 pci_write_config_dword(dev, 0x44, 0x42004200);
1528 pci_write_config_dword(dev, 0x48, 0x42004200); 1528 pci_write_config_dword(dev, 0x48, 0x42004200);
1529 } 1529 }
1530 1530
1531 1531
1532 retval = icom_alloc_adapter(&icom_adapter); 1532 retval = icom_alloc_adapter(&icom_adapter);
@@ -1536,10 +1536,10 @@ static int __devinit icom_probe(struct pci_dev *dev,
1536 goto probe_exit0; 1536 goto probe_exit0;
1537 } 1537 }
1538 1538
1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0); 1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
1540 icom_adapter->pci_dev = dev; 1540 icom_adapter->pci_dev = dev;
1541 icom_adapter->version = ent->driver_data; 1541 icom_adapter->version = ent->driver_data;
1542 icom_adapter->subsystem_id = ent->subdevice; 1542 icom_adapter->subsystem_id = ent->subdevice;
1543 1543
1544 1544
1545 retval = icom_init_ports(icom_adapter); 1545 retval = icom_init_ports(icom_adapter);
@@ -1548,7 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1548 goto probe_exit1; 1548 goto probe_exit1;
1549 } 1549 }
1550 1550
1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0); 1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
1552 1552
1553 if (!icom_adapter->base_addr) 1553 if (!icom_adapter->base_addr)
1554 goto probe_exit1; 1554 goto probe_exit1;
@@ -1562,7 +1562,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1562 1562
1563 retval = icom_load_ports(icom_adapter); 1563 retval = icom_load_ports(icom_adapter);
1564 1564
1565 for (index = 0; index < icom_adapter->numb_ports; index++) { 1565 for (index = 0; index < icom_adapter->numb_ports; index++) {
1566 icom_port = &icom_adapter->port_info[index]; 1566 icom_port = &icom_adapter->port_info[index];
1567 1567
1568 if (icom_port->status == ICOM_PORT_ACTIVE) { 1568 if (icom_port->status == ICOM_PORT_ACTIVE) {
@@ -1579,7 +1579,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1579 icom_port->status = ICOM_PORT_OFF; 1579 icom_port->status = ICOM_PORT_OFF;
1580 dev_err(&dev->dev, "Device add failed\n"); 1580 dev_err(&dev->dev, "Device add failed\n");
1581 } else 1581 } else
1582 dev_info(&dev->dev, "Device added\n"); 1582 dev_info(&dev->dev, "Device added\n");
1583 } 1583 }
1584 } 1584 }
1585 1585
@@ -1595,9 +1595,7 @@ probe_exit0:
1595 pci_release_regions(dev); 1595 pci_release_regions(dev);
1596 pci_disable_device(dev); 1596 pci_disable_device(dev);
1597 1597
1598 return retval; 1598 return retval;
1599
1600
1601} 1599}
1602 1600
1603static void __devexit icom_remove(struct pci_dev *dev) 1601static void __devexit icom_remove(struct pci_dev *dev)
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 0f7cf4c453e6..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -221,21 +221,26 @@ sio_quot_set(struct uart_txx9_port *up, int quot)
221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); 221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
222} 222}
223 223
224static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
225{
226 return container_of(port, struct uart_txx9_port, port);
227}
228
224static void serial_txx9_stop_tx(struct uart_port *port) 229static void serial_txx9_stop_tx(struct uart_port *port)
225{ 230{
226 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 231 struct uart_txx9_port *up = to_uart_txx9_port(port);
227 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 232 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
228} 233}
229 234
230static void serial_txx9_start_tx(struct uart_port *port) 235static void serial_txx9_start_tx(struct uart_port *port)
231{ 236{
232 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 237 struct uart_txx9_port *up = to_uart_txx9_port(port);
233 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 238 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
234} 239}
235 240
236static void serial_txx9_stop_rx(struct uart_port *port) 241static void serial_txx9_stop_rx(struct uart_port *port)
237{ 242{
238 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 243 struct uart_txx9_port *up = to_uart_txx9_port(port);
239 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS; 244 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
240} 245}
241 246
@@ -246,7 +251,7 @@ static void serial_txx9_enable_ms(struct uart_port *port)
246 251
247static void serial_txx9_initialize(struct uart_port *port) 252static void serial_txx9_initialize(struct uart_port *port)
248{ 253{
249 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 254 struct uart_txx9_port *up = to_uart_txx9_port(port);
250 unsigned int tmout = 10000; 255 unsigned int tmout = 10000;
251 256
252 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); 257 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
@@ -414,7 +419,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
414 419
415static unsigned int serial_txx9_tx_empty(struct uart_port *port) 420static unsigned int serial_txx9_tx_empty(struct uart_port *port)
416{ 421{
417 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 422 struct uart_txx9_port *up = to_uart_txx9_port(port);
418 unsigned long flags; 423 unsigned long flags;
419 unsigned int ret; 424 unsigned int ret;
420 425
@@ -427,7 +432,7 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *port)
427 432
428static unsigned int serial_txx9_get_mctrl(struct uart_port *port) 433static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
429{ 434{
430 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 435 struct uart_txx9_port *up = to_uart_txx9_port(port);
431 unsigned int ret; 436 unsigned int ret;
432 437
433 /* no modem control lines */ 438 /* no modem control lines */
@@ -440,7 +445,7 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
440 445
441static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) 446static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
442{ 447{
443 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 448 struct uart_txx9_port *up = to_uart_txx9_port(port);
444 449
445 if (mctrl & TIOCM_RTS) 450 if (mctrl & TIOCM_RTS)
446 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); 451 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
@@ -450,7 +455,7 @@ static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
450 455
451static void serial_txx9_break_ctl(struct uart_port *port, int break_state) 456static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
452{ 457{
453 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 458 struct uart_txx9_port *up = to_uart_txx9_port(port);
454 unsigned long flags; 459 unsigned long flags;
455 460
456 spin_lock_irqsave(&up->port.lock, flags); 461 spin_lock_irqsave(&up->port.lock, flags);
@@ -494,7 +499,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
494{ 499{
495 unsigned int ier; 500 unsigned int ier;
496 unsigned char c; 501 unsigned char c;
497 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 502 struct uart_txx9_port *up = to_uart_txx9_port(port);
498 503
499 /* 504 /*
500 * First save the IER then disable the interrupts 505 * First save the IER then disable the interrupts
@@ -520,7 +525,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
520static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) 525static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
521{ 526{
522 unsigned int ier; 527 unsigned int ier;
523 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 528 struct uart_txx9_port *up = to_uart_txx9_port(port);
524 529
525 /* 530 /*
526 * First save the IER then disable the interrupts 531 * First save the IER then disable the interrupts
@@ -551,7 +556,7 @@ static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
551 556
552static int serial_txx9_startup(struct uart_port *port) 557static int serial_txx9_startup(struct uart_port *port)
553{ 558{
554 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 559 struct uart_txx9_port *up = to_uart_txx9_port(port);
555 unsigned long flags; 560 unsigned long flags;
556 int retval; 561 int retval;
557 562
@@ -596,7 +601,7 @@ static int serial_txx9_startup(struct uart_port *port)
596 601
597static void serial_txx9_shutdown(struct uart_port *port) 602static void serial_txx9_shutdown(struct uart_port *port)
598{ 603{
599 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 604 struct uart_txx9_port *up = to_uart_txx9_port(port);
600 unsigned long flags; 605 unsigned long flags;
601 606
602 /* 607 /*
@@ -636,7 +641,7 @@ static void
636serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, 641serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
637 struct ktermios *old) 642 struct ktermios *old)
638{ 643{
639 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 644 struct uart_txx9_port *up = to_uart_txx9_port(port);
640 unsigned int cval, fcr = 0; 645 unsigned int cval, fcr = 0;
641 unsigned long flags; 646 unsigned long flags;
642 unsigned int baud, quot; 647 unsigned int baud, quot;
@@ -814,19 +819,19 @@ static void serial_txx9_release_resource(struct uart_txx9_port *up)
814 819
815static void serial_txx9_release_port(struct uart_port *port) 820static void serial_txx9_release_port(struct uart_port *port)
816{ 821{
817 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 822 struct uart_txx9_port *up = to_uart_txx9_port(port);
818 serial_txx9_release_resource(up); 823 serial_txx9_release_resource(up);
819} 824}
820 825
821static int serial_txx9_request_port(struct uart_port *port) 826static int serial_txx9_request_port(struct uart_port *port)
822{ 827{
823 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 828 struct uart_txx9_port *up = to_uart_txx9_port(port);
824 return serial_txx9_request_resource(up); 829 return serial_txx9_request_resource(up);
825} 830}
826 831
827static void serial_txx9_config_port(struct uart_port *port, int uflags) 832static void serial_txx9_config_port(struct uart_port *port, int uflags)
828{ 833{
829 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 834 struct uart_txx9_port *up = to_uart_txx9_port(port);
830 int ret; 835 int ret;
831 836
832 /* 837 /*
@@ -897,7 +902,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
897 902
898static void serial_txx9_console_putchar(struct uart_port *port, int ch) 903static void serial_txx9_console_putchar(struct uart_port *port, int ch)
899{ 904{
900 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 905 struct uart_txx9_port *up = to_uart_txx9_port(port);
901 906
902 wait_for_xmitr(up); 907 wait_for_xmitr(up);
903 sio_out(up, TXX9_SITFIFO, ch); 908 sio_out(up, TXX9_SITFIFO, ch);
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6d7a3f82c54b..21a118269cac 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += mxc_spi.o 20obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/spi_imx.c
index b1447236ae81..89c22efedfb0 100644
--- a/drivers/spi/mxc_spi.c
+++ b/drivers/spi/spi_imx.c
@@ -48,14 +48,14 @@
48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
50 50
51struct mxc_spi_config { 51struct spi_imx_config {
52 unsigned int speed_hz; 52 unsigned int speed_hz;
53 unsigned int bpw; 53 unsigned int bpw;
54 unsigned int mode; 54 unsigned int mode;
55 int cs; 55 int cs;
56}; 56};
57 57
58struct mxc_spi_data { 58struct spi_imx_data {
59 struct spi_bitbang bitbang; 59 struct spi_bitbang bitbang;
60 60
61 struct completion xfer_done; 61 struct completion xfer_done;
@@ -66,43 +66,43 @@ struct mxc_spi_data {
66 int *chipselect; 66 int *chipselect;
67 67
68 unsigned int count; 68 unsigned int count;
69 void (*tx)(struct mxc_spi_data *); 69 void (*tx)(struct spi_imx_data *);
70 void (*rx)(struct mxc_spi_data *); 70 void (*rx)(struct spi_imx_data *);
71 void *rx_buf; 71 void *rx_buf;
72 const void *tx_buf; 72 const void *tx_buf;
73 unsigned int txfifo; /* number of words pushed in tx FIFO */ 73 unsigned int txfifo; /* number of words pushed in tx FIFO */
74 74
75 /* SoC specific functions */ 75 /* SoC specific functions */
76 void (*intctrl)(struct mxc_spi_data *, int); 76 void (*intctrl)(struct spi_imx_data *, int);
77 int (*config)(struct mxc_spi_data *, struct mxc_spi_config *); 77 int (*config)(struct spi_imx_data *, struct spi_imx_config *);
78 void (*trigger)(struct mxc_spi_data *); 78 void (*trigger)(struct spi_imx_data *);
79 int (*rx_available)(struct mxc_spi_data *); 79 int (*rx_available)(struct spi_imx_data *);
80}; 80};
81 81
82#define MXC_SPI_BUF_RX(type) \ 82#define MXC_SPI_BUF_RX(type) \
83static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi) \ 83static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
84{ \ 84{ \
85 unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA); \ 85 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
86 \ 86 \
87 if (mxc_spi->rx_buf) { \ 87 if (spi_imx->rx_buf) { \
88 *(type *)mxc_spi->rx_buf = val; \ 88 *(type *)spi_imx->rx_buf = val; \
89 mxc_spi->rx_buf += sizeof(type); \ 89 spi_imx->rx_buf += sizeof(type); \
90 } \ 90 } \
91} 91}
92 92
93#define MXC_SPI_BUF_TX(type) \ 93#define MXC_SPI_BUF_TX(type) \
94static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi) \ 94static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
95{ \ 95{ \
96 type val = 0; \ 96 type val = 0; \
97 \ 97 \
98 if (mxc_spi->tx_buf) { \ 98 if (spi_imx->tx_buf) { \
99 val = *(type *)mxc_spi->tx_buf; \ 99 val = *(type *)spi_imx->tx_buf; \
100 mxc_spi->tx_buf += sizeof(type); \ 100 spi_imx->tx_buf += sizeof(type); \
101 } \ 101 } \
102 \ 102 \
103 mxc_spi->count -= sizeof(type); \ 103 spi_imx->count -= sizeof(type); \
104 \ 104 \
105 writel(val, mxc_spi->base + MXC_CSPITXDATA); \ 105 writel(val, spi_imx->base + MXC_CSPITXDATA); \
106} 106}
107 107
108MXC_SPI_BUF_RX(u8) 108MXC_SPI_BUF_RX(u8)
@@ -119,7 +119,7 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
119 256, 384, 512, 768, 1024}; 119 256, 384, 512, 768, 1024};
120 120
121/* MX21, MX27 */ 121/* MX21, MX27 */
122static unsigned int mxc_spi_clkdiv_1(unsigned int fin, 122static unsigned int spi_imx_clkdiv_1(unsigned int fin,
123 unsigned int fspi) 123 unsigned int fspi)
124{ 124{
125 int i, max; 125 int i, max;
@@ -137,7 +137,7 @@ static unsigned int mxc_spi_clkdiv_1(unsigned int fin,
137} 137}
138 138
139/* MX1, MX31, MX35 */ 139/* MX1, MX31, MX35 */
140static unsigned int mxc_spi_clkdiv_2(unsigned int fin, 140static unsigned int spi_imx_clkdiv_2(unsigned int fin,
141 unsigned int fspi) 141 unsigned int fspi)
142{ 142{
143 int i, div = 4; 143 int i, div = 4;
@@ -174,7 +174,7 @@ static unsigned int mxc_spi_clkdiv_2(unsigned int fin,
174 * the i.MX35 has a slightly different register layout for bits 174 * the i.MX35 has a slightly different register layout for bits
175 * we do not use here. 175 * we do not use here.
176 */ 176 */
177static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable) 177static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
178{ 178{
179 unsigned int val = 0; 179 unsigned int val = 0;
180 180
@@ -183,24 +183,24 @@ static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable)
183 if (enable & MXC_INT_RR) 183 if (enable & MXC_INT_RR)
184 val |= MX31_INTREG_RREN; 184 val |= MX31_INTREG_RREN;
185 185
186 writel(val, mxc_spi->base + MXC_CSPIINT); 186 writel(val, spi_imx->base + MXC_CSPIINT);
187} 187}
188 188
189static void mx31_trigger(struct mxc_spi_data *mxc_spi) 189static void mx31_trigger(struct spi_imx_data *spi_imx)
190{ 190{
191 unsigned int reg; 191 unsigned int reg;
192 192
193 reg = readl(mxc_spi->base + MXC_CSPICTRL); 193 reg = readl(spi_imx->base + MXC_CSPICTRL);
194 reg |= MX31_CSPICTRL_XCH; 194 reg |= MX31_CSPICTRL_XCH;
195 writel(reg, mxc_spi->base + MXC_CSPICTRL); 195 writel(reg, spi_imx->base + MXC_CSPICTRL);
196} 196}
197 197
198static int mx31_config(struct mxc_spi_data *mxc_spi, 198static int mx31_config(struct spi_imx_data *spi_imx,
199 struct mxc_spi_config *config) 199 struct spi_imx_config *config)
200{ 200{
201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
202 202
203 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 203 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
204 MX31_CSPICTRL_DR_SHIFT; 204 MX31_CSPICTRL_DR_SHIFT;
205 205
206 if (cpu_is_mx31()) 206 if (cpu_is_mx31())
@@ -223,14 +223,14 @@ static int mx31_config(struct mxc_spi_data *mxc_spi,
223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; 223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
224 } 224 }
225 225
226 writel(reg, mxc_spi->base + MXC_CSPICTRL); 226 writel(reg, spi_imx->base + MXC_CSPICTRL);
227 227
228 return 0; 228 return 0;
229} 229}
230 230
231static int mx31_rx_available(struct mxc_spi_data *mxc_spi) 231static int mx31_rx_available(struct spi_imx_data *spi_imx)
232{ 232{
233 return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 233 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
234} 234}
235 235
236#define MX27_INTREG_RR (1 << 4) 236#define MX27_INTREG_RR (1 << 4)
@@ -246,7 +246,7 @@ static int mx31_rx_available(struct mxc_spi_data *mxc_spi)
246#define MX27_CSPICTRL_DR_SHIFT 14 246#define MX27_CSPICTRL_DR_SHIFT 14
247#define MX27_CSPICTRL_CS_SHIFT 19 247#define MX27_CSPICTRL_CS_SHIFT 19
248 248
249static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable) 249static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
250{ 250{
251 unsigned int val = 0; 251 unsigned int val = 0;
252 252
@@ -255,24 +255,24 @@ static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable)
255 if (enable & MXC_INT_RR) 255 if (enable & MXC_INT_RR)
256 val |= MX27_INTREG_RREN; 256 val |= MX27_INTREG_RREN;
257 257
258 writel(val, mxc_spi->base + MXC_CSPIINT); 258 writel(val, spi_imx->base + MXC_CSPIINT);
259} 259}
260 260
261static void mx27_trigger(struct mxc_spi_data *mxc_spi) 261static void mx27_trigger(struct spi_imx_data *spi_imx)
262{ 262{
263 unsigned int reg; 263 unsigned int reg;
264 264
265 reg = readl(mxc_spi->base + MXC_CSPICTRL); 265 reg = readl(spi_imx->base + MXC_CSPICTRL);
266 reg |= MX27_CSPICTRL_XCH; 266 reg |= MX27_CSPICTRL_XCH;
267 writel(reg, mxc_spi->base + MXC_CSPICTRL); 267 writel(reg, spi_imx->base + MXC_CSPICTRL);
268} 268}
269 269
270static int mx27_config(struct mxc_spi_data *mxc_spi, 270static int mx27_config(struct spi_imx_data *spi_imx,
271 struct mxc_spi_config *config) 271 struct spi_imx_config *config)
272{ 272{
273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; 273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
274 274
275 reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) << 275 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
276 MX27_CSPICTRL_DR_SHIFT; 276 MX27_CSPICTRL_DR_SHIFT;
277 reg |= config->bpw - 1; 277 reg |= config->bpw - 1;
278 278
@@ -285,14 +285,14 @@ static int mx27_config(struct mxc_spi_data *mxc_spi,
285 if (config->cs < 0) 285 if (config->cs < 0)
286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; 286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
287 287
288 writel(reg, mxc_spi->base + MXC_CSPICTRL); 288 writel(reg, spi_imx->base + MXC_CSPICTRL);
289 289
290 return 0; 290 return 0;
291} 291}
292 292
293static int mx27_rx_available(struct mxc_spi_data *mxc_spi) 293static int mx27_rx_available(struct spi_imx_data *spi_imx)
294{ 294{
295 return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR; 295 return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
296} 296}
297 297
298#define MX1_INTREG_RR (1 << 3) 298#define MX1_INTREG_RR (1 << 3)
@@ -306,7 +306,7 @@ static int mx27_rx_available(struct mxc_spi_data *mxc_spi)
306#define MX1_CSPICTRL_MASTER (1 << 10) 306#define MX1_CSPICTRL_MASTER (1 << 10)
307#define MX1_CSPICTRL_DR_SHIFT 13 307#define MX1_CSPICTRL_DR_SHIFT 13
308 308
309static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable) 309static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
310{ 310{
311 unsigned int val = 0; 311 unsigned int val = 0;
312 312
@@ -315,24 +315,24 @@ static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable)
315 if (enable & MXC_INT_RR) 315 if (enable & MXC_INT_RR)
316 val |= MX1_INTREG_RREN; 316 val |= MX1_INTREG_RREN;
317 317
318 writel(val, mxc_spi->base + MXC_CSPIINT); 318 writel(val, spi_imx->base + MXC_CSPIINT);
319} 319}
320 320
321static void mx1_trigger(struct mxc_spi_data *mxc_spi) 321static void mx1_trigger(struct spi_imx_data *spi_imx)
322{ 322{
323 unsigned int reg; 323 unsigned int reg;
324 324
325 reg = readl(mxc_spi->base + MXC_CSPICTRL); 325 reg = readl(spi_imx->base + MXC_CSPICTRL);
326 reg |= MX1_CSPICTRL_XCH; 326 reg |= MX1_CSPICTRL_XCH;
327 writel(reg, mxc_spi->base + MXC_CSPICTRL); 327 writel(reg, spi_imx->base + MXC_CSPICTRL);
328} 328}
329 329
330static int mx1_config(struct mxc_spi_data *mxc_spi, 330static int mx1_config(struct spi_imx_data *spi_imx,
331 struct mxc_spi_config *config) 331 struct spi_imx_config *config)
332{ 332{
333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
334 334
335 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 335 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
336 MX1_CSPICTRL_DR_SHIFT; 336 MX1_CSPICTRL_DR_SHIFT;
337 reg |= config->bpw - 1; 337 reg |= config->bpw - 1;
338 338
@@ -341,156 +341,151 @@ static int mx1_config(struct mxc_spi_data *mxc_spi,
341 if (config->mode & SPI_CPOL) 341 if (config->mode & SPI_CPOL)
342 reg |= MX1_CSPICTRL_POL; 342 reg |= MX1_CSPICTRL_POL;
343 343
344 writel(reg, mxc_spi->base + MXC_CSPICTRL); 344 writel(reg, spi_imx->base + MXC_CSPICTRL);
345 345
346 return 0; 346 return 0;
347} 347}
348 348
349static int mx1_rx_available(struct mxc_spi_data *mxc_spi) 349static int mx1_rx_available(struct spi_imx_data *spi_imx)
350{ 350{
351 return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR; 351 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
352} 352}
353 353
354static void mxc_spi_chipselect(struct spi_device *spi, int is_active) 354static void spi_imx_chipselect(struct spi_device *spi, int is_active)
355{ 355{
356 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 356 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
357 unsigned int cs = 0; 357 int gpio = spi_imx->chipselect[spi->chip_select];
358 int gpio = mxc_spi->chipselect[spi->chip_select]; 358 int active = is_active != BITBANG_CS_INACTIVE;
359 struct mxc_spi_config config; 359 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
360 360
361 if (spi->mode & SPI_CS_HIGH) 361 if (gpio < 0)
362 cs = 1;
363
364 if (is_active == BITBANG_CS_INACTIVE) {
365 if (gpio >= 0)
366 gpio_set_value(gpio, !cs);
367 return; 362 return;
368 }
369
370 config.bpw = spi->bits_per_word;
371 config.speed_hz = spi->max_speed_hz;
372 config.mode = spi->mode;
373 config.cs = mxc_spi->chipselect[spi->chip_select];
374
375 mxc_spi->config(mxc_spi, &config);
376
377 /* Initialize the functions for transfer */
378 if (config.bpw <= 8) {
379 mxc_spi->rx = mxc_spi_buf_rx_u8;
380 mxc_spi->tx = mxc_spi_buf_tx_u8;
381 } else if (config.bpw <= 16) {
382 mxc_spi->rx = mxc_spi_buf_rx_u16;
383 mxc_spi->tx = mxc_spi_buf_tx_u16;
384 } else if (config.bpw <= 32) {
385 mxc_spi->rx = mxc_spi_buf_rx_u32;
386 mxc_spi->tx = mxc_spi_buf_tx_u32;
387 } else
388 BUG();
389 363
390 if (gpio >= 0) 364 gpio_set_value(gpio, dev_is_lowactive ^ active);
391 gpio_set_value(gpio, cs);
392
393 return;
394} 365}
395 366
396static void mxc_spi_push(struct mxc_spi_data *mxc_spi) 367static void spi_imx_push(struct spi_imx_data *spi_imx)
397{ 368{
398 while (mxc_spi->txfifo < 8) { 369 while (spi_imx->txfifo < 8) {
399 if (!mxc_spi->count) 370 if (!spi_imx->count)
400 break; 371 break;
401 mxc_spi->tx(mxc_spi); 372 spi_imx->tx(spi_imx);
402 mxc_spi->txfifo++; 373 spi_imx->txfifo++;
403 } 374 }
404 375
405 mxc_spi->trigger(mxc_spi); 376 spi_imx->trigger(spi_imx);
406} 377}
407 378
408static irqreturn_t mxc_spi_isr(int irq, void *dev_id) 379static irqreturn_t spi_imx_isr(int irq, void *dev_id)
409{ 380{
410 struct mxc_spi_data *mxc_spi = dev_id; 381 struct spi_imx_data *spi_imx = dev_id;
411 382
412 while (mxc_spi->rx_available(mxc_spi)) { 383 while (spi_imx->rx_available(spi_imx)) {
413 mxc_spi->rx(mxc_spi); 384 spi_imx->rx(spi_imx);
414 mxc_spi->txfifo--; 385 spi_imx->txfifo--;
415 } 386 }
416 387
417 if (mxc_spi->count) { 388 if (spi_imx->count) {
418 mxc_spi_push(mxc_spi); 389 spi_imx_push(spi_imx);
419 return IRQ_HANDLED; 390 return IRQ_HANDLED;
420 } 391 }
421 392
422 if (mxc_spi->txfifo) { 393 if (spi_imx->txfifo) {
423 /* No data left to push, but still waiting for rx data, 394 /* No data left to push, but still waiting for rx data,
424 * enable receive data available interrupt. 395 * enable receive data available interrupt.
425 */ 396 */
426 mxc_spi->intctrl(mxc_spi, MXC_INT_RR); 397 spi_imx->intctrl(spi_imx, MXC_INT_RR);
427 return IRQ_HANDLED; 398 return IRQ_HANDLED;
428 } 399 }
429 400
430 mxc_spi->intctrl(mxc_spi, 0); 401 spi_imx->intctrl(spi_imx, 0);
431 complete(&mxc_spi->xfer_done); 402 complete(&spi_imx->xfer_done);
432 403
433 return IRQ_HANDLED; 404 return IRQ_HANDLED;
434} 405}
435 406
436static int mxc_spi_setupxfer(struct spi_device *spi, 407static int spi_imx_setupxfer(struct spi_device *spi,
437 struct spi_transfer *t) 408 struct spi_transfer *t)
438{ 409{
439 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 410 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
440 struct mxc_spi_config config; 411 struct spi_imx_config config;
441 412
442 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 413 config.bpw = t ? t->bits_per_word : spi->bits_per_word;
443 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 414 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
444 config.mode = spi->mode; 415 config.mode = spi->mode;
416 config.cs = spi_imx->chipselect[spi->chip_select];
417
418 if (!config.speed_hz)
419 config.speed_hz = spi->max_speed_hz;
420 if (!config.bpw)
421 config.bpw = spi->bits_per_word;
422 if (!config.speed_hz)
423 config.speed_hz = spi->max_speed_hz;
424
425 /* Initialize the functions for transfer */
426 if (config.bpw <= 8) {
427 spi_imx->rx = spi_imx_buf_rx_u8;
428 spi_imx->tx = spi_imx_buf_tx_u8;
429 } else if (config.bpw <= 16) {
430 spi_imx->rx = spi_imx_buf_rx_u16;
431 spi_imx->tx = spi_imx_buf_tx_u16;
432 } else if (config.bpw <= 32) {
433 spi_imx->rx = spi_imx_buf_rx_u32;
434 spi_imx->tx = spi_imx_buf_tx_u32;
435 } else
436 BUG();
445 437
446 mxc_spi->config(mxc_spi, &config); 438 spi_imx->config(spi_imx, &config);
447 439
448 return 0; 440 return 0;
449} 441}
450 442
451static int mxc_spi_transfer(struct spi_device *spi, 443static int spi_imx_transfer(struct spi_device *spi,
452 struct spi_transfer *transfer) 444 struct spi_transfer *transfer)
453{ 445{
454 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 446 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
455 447
456 mxc_spi->tx_buf = transfer->tx_buf; 448 spi_imx->tx_buf = transfer->tx_buf;
457 mxc_spi->rx_buf = transfer->rx_buf; 449 spi_imx->rx_buf = transfer->rx_buf;
458 mxc_spi->count = transfer->len; 450 spi_imx->count = transfer->len;
459 mxc_spi->txfifo = 0; 451 spi_imx->txfifo = 0;
460 452
461 init_completion(&mxc_spi->xfer_done); 453 init_completion(&spi_imx->xfer_done);
462 454
463 mxc_spi_push(mxc_spi); 455 spi_imx_push(spi_imx);
464 456
465 mxc_spi->intctrl(mxc_spi, MXC_INT_TE); 457 spi_imx->intctrl(spi_imx, MXC_INT_TE);
466 458
467 wait_for_completion(&mxc_spi->xfer_done); 459 wait_for_completion(&spi_imx->xfer_done);
468 460
469 return transfer->len; 461 return transfer->len;
470} 462}
471 463
472static int mxc_spi_setup(struct spi_device *spi) 464static int spi_imx_setup(struct spi_device *spi)
473{ 465{
474 if (!spi->bits_per_word) 466 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
475 spi->bits_per_word = 8; 467 int gpio = spi_imx->chipselect[spi->chip_select];
476 468
477 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, 469 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
478 spi->mode, spi->bits_per_word, spi->max_speed_hz); 470 spi->mode, spi->bits_per_word, spi->max_speed_hz);
479 471
480 mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE); 472 if (gpio >= 0)
473 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
474
475 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
481 476
482 return 0; 477 return 0;
483} 478}
484 479
485static void mxc_spi_cleanup(struct spi_device *spi) 480static void spi_imx_cleanup(struct spi_device *spi)
486{ 481{
487} 482}
488 483
489static int __init mxc_spi_probe(struct platform_device *pdev) 484static int __init spi_imx_probe(struct platform_device *pdev)
490{ 485{
491 struct spi_imx_master *mxc_platform_info; 486 struct spi_imx_master *mxc_platform_info;
492 struct spi_master *master; 487 struct spi_master *master;
493 struct mxc_spi_data *mxc_spi; 488 struct spi_imx_data *spi_imx;
494 struct resource *res; 489 struct resource *res;
495 int i, ret; 490 int i, ret;
496 491
@@ -500,7 +495,7 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
500 return -EINVAL; 495 return -EINVAL;
501 } 496 }
502 497
503 master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data)); 498 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
504 if (!master) 499 if (!master)
505 return -ENOMEM; 500 return -ENOMEM;
506 501
@@ -509,32 +504,32 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
509 master->bus_num = pdev->id; 504 master->bus_num = pdev->id;
510 master->num_chipselect = mxc_platform_info->num_chipselect; 505 master->num_chipselect = mxc_platform_info->num_chipselect;
511 506
512 mxc_spi = spi_master_get_devdata(master); 507 spi_imx = spi_master_get_devdata(master);
513 mxc_spi->bitbang.master = spi_master_get(master); 508 spi_imx->bitbang.master = spi_master_get(master);
514 mxc_spi->chipselect = mxc_platform_info->chipselect; 509 spi_imx->chipselect = mxc_platform_info->chipselect;
515 510
516 for (i = 0; i < master->num_chipselect; i++) { 511 for (i = 0; i < master->num_chipselect; i++) {
517 if (mxc_spi->chipselect[i] < 0) 512 if (spi_imx->chipselect[i] < 0)
518 continue; 513 continue;
519 ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME); 514 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
520 if (ret) { 515 if (ret) {
521 i--; 516 i--;
522 while (i > 0) 517 while (i > 0)
523 if (mxc_spi->chipselect[i] >= 0) 518 if (spi_imx->chipselect[i] >= 0)
524 gpio_free(mxc_spi->chipselect[i--]); 519 gpio_free(spi_imx->chipselect[i--]);
525 dev_err(&pdev->dev, "can't get cs gpios"); 520 dev_err(&pdev->dev, "can't get cs gpios");
526 goto out_master_put; 521 goto out_master_put;
527 } 522 }
528 gpio_direction_output(mxc_spi->chipselect[i], 1);
529 } 523 }
530 524
531 mxc_spi->bitbang.chipselect = mxc_spi_chipselect; 525 spi_imx->bitbang.chipselect = spi_imx_chipselect;
532 mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer; 526 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
533 mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer; 527 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
534 mxc_spi->bitbang.master->setup = mxc_spi_setup; 528 spi_imx->bitbang.master->setup = spi_imx_setup;
535 mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup; 529 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
530 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
536 531
537 init_completion(&mxc_spi->xfer_done); 532 init_completion(&spi_imx->xfer_done);
538 533
539 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 if (!res) { 535 if (!res) {
@@ -549,58 +544,58 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
549 goto out_gpio_free; 544 goto out_gpio_free;
550 } 545 }
551 546
552 mxc_spi->base = ioremap(res->start, resource_size(res)); 547 spi_imx->base = ioremap(res->start, resource_size(res));
553 if (!mxc_spi->base) { 548 if (!spi_imx->base) {
554 ret = -EINVAL; 549 ret = -EINVAL;
555 goto out_release_mem; 550 goto out_release_mem;
556 } 551 }
557 552
558 mxc_spi->irq = platform_get_irq(pdev, 0); 553 spi_imx->irq = platform_get_irq(pdev, 0);
559 if (!mxc_spi->irq) { 554 if (!spi_imx->irq) {
560 ret = -EINVAL; 555 ret = -EINVAL;
561 goto out_iounmap; 556 goto out_iounmap;
562 } 557 }
563 558
564 ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi); 559 ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
565 if (ret) { 560 if (ret) {
566 dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret); 561 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
567 goto out_iounmap; 562 goto out_iounmap;
568 } 563 }
569 564
570 if (cpu_is_mx31() || cpu_is_mx35()) { 565 if (cpu_is_mx31() || cpu_is_mx35()) {
571 mxc_spi->intctrl = mx31_intctrl; 566 spi_imx->intctrl = mx31_intctrl;
572 mxc_spi->config = mx31_config; 567 spi_imx->config = mx31_config;
573 mxc_spi->trigger = mx31_trigger; 568 spi_imx->trigger = mx31_trigger;
574 mxc_spi->rx_available = mx31_rx_available; 569 spi_imx->rx_available = mx31_rx_available;
575 } else if (cpu_is_mx27() || cpu_is_mx21()) { 570 } else if (cpu_is_mx27() || cpu_is_mx21()) {
576 mxc_spi->intctrl = mx27_intctrl; 571 spi_imx->intctrl = mx27_intctrl;
577 mxc_spi->config = mx27_config; 572 spi_imx->config = mx27_config;
578 mxc_spi->trigger = mx27_trigger; 573 spi_imx->trigger = mx27_trigger;
579 mxc_spi->rx_available = mx27_rx_available; 574 spi_imx->rx_available = mx27_rx_available;
580 } else if (cpu_is_mx1()) { 575 } else if (cpu_is_mx1()) {
581 mxc_spi->intctrl = mx1_intctrl; 576 spi_imx->intctrl = mx1_intctrl;
582 mxc_spi->config = mx1_config; 577 spi_imx->config = mx1_config;
583 mxc_spi->trigger = mx1_trigger; 578 spi_imx->trigger = mx1_trigger;
584 mxc_spi->rx_available = mx1_rx_available; 579 spi_imx->rx_available = mx1_rx_available;
585 } else 580 } else
586 BUG(); 581 BUG();
587 582
588 mxc_spi->clk = clk_get(&pdev->dev, NULL); 583 spi_imx->clk = clk_get(&pdev->dev, NULL);
589 if (IS_ERR(mxc_spi->clk)) { 584 if (IS_ERR(spi_imx->clk)) {
590 dev_err(&pdev->dev, "unable to get clock\n"); 585 dev_err(&pdev->dev, "unable to get clock\n");
591 ret = PTR_ERR(mxc_spi->clk); 586 ret = PTR_ERR(spi_imx->clk);
592 goto out_free_irq; 587 goto out_free_irq;
593 } 588 }
594 589
595 clk_enable(mxc_spi->clk); 590 clk_enable(spi_imx->clk);
596 mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk); 591 spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
597 592
598 if (!cpu_is_mx31() || !cpu_is_mx35()) 593 if (!cpu_is_mx31() || !cpu_is_mx35())
599 writel(1, mxc_spi->base + MXC_RESET); 594 writel(1, spi_imx->base + MXC_RESET);
600 595
601 mxc_spi->intctrl(mxc_spi, 0); 596 spi_imx->intctrl(spi_imx, 0);
602 597
603 ret = spi_bitbang_start(&mxc_spi->bitbang); 598 ret = spi_bitbang_start(&spi_imx->bitbang);
604 if (ret) { 599 if (ret) {
605 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 600 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
606 goto out_clk_put; 601 goto out_clk_put;
@@ -611,18 +606,18 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
611 return ret; 606 return ret;
612 607
613out_clk_put: 608out_clk_put:
614 clk_disable(mxc_spi->clk); 609 clk_disable(spi_imx->clk);
615 clk_put(mxc_spi->clk); 610 clk_put(spi_imx->clk);
616out_free_irq: 611out_free_irq:
617 free_irq(mxc_spi->irq, mxc_spi); 612 free_irq(spi_imx->irq, spi_imx);
618out_iounmap: 613out_iounmap:
619 iounmap(mxc_spi->base); 614 iounmap(spi_imx->base);
620out_release_mem: 615out_release_mem:
621 release_mem_region(res->start, resource_size(res)); 616 release_mem_region(res->start, resource_size(res));
622out_gpio_free: 617out_gpio_free:
623 for (i = 0; i < master->num_chipselect; i++) 618 for (i = 0; i < master->num_chipselect; i++)
624 if (mxc_spi->chipselect[i] >= 0) 619 if (spi_imx->chipselect[i] >= 0)
625 gpio_free(mxc_spi->chipselect[i]); 620 gpio_free(spi_imx->chipselect[i]);
626out_master_put: 621out_master_put:
627 spi_master_put(master); 622 spi_master_put(master);
628 kfree(master); 623 kfree(master);
@@ -630,24 +625,24 @@ out_master_put:
630 return ret; 625 return ret;
631} 626}
632 627
633static int __exit mxc_spi_remove(struct platform_device *pdev) 628static int __exit spi_imx_remove(struct platform_device *pdev)
634{ 629{
635 struct spi_master *master = platform_get_drvdata(pdev); 630 struct spi_master *master = platform_get_drvdata(pdev);
636 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 631 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
637 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master); 632 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
638 int i; 633 int i;
639 634
640 spi_bitbang_stop(&mxc_spi->bitbang); 635 spi_bitbang_stop(&spi_imx->bitbang);
641 636
642 writel(0, mxc_spi->base + MXC_CSPICTRL); 637 writel(0, spi_imx->base + MXC_CSPICTRL);
643 clk_disable(mxc_spi->clk); 638 clk_disable(spi_imx->clk);
644 clk_put(mxc_spi->clk); 639 clk_put(spi_imx->clk);
645 free_irq(mxc_spi->irq, mxc_spi); 640 free_irq(spi_imx->irq, spi_imx);
646 iounmap(mxc_spi->base); 641 iounmap(spi_imx->base);
647 642
648 for (i = 0; i < master->num_chipselect; i++) 643 for (i = 0; i < master->num_chipselect; i++)
649 if (mxc_spi->chipselect[i] >= 0) 644 if (spi_imx->chipselect[i] >= 0)
650 gpio_free(mxc_spi->chipselect[i]); 645 gpio_free(spi_imx->chipselect[i]);
651 646
652 spi_master_put(master); 647 spi_master_put(master);
653 648
@@ -658,27 +653,27 @@ static int __exit mxc_spi_remove(struct platform_device *pdev)
658 return 0; 653 return 0;
659} 654}
660 655
661static struct platform_driver mxc_spi_driver = { 656static struct platform_driver spi_imx_driver = {
662 .driver = { 657 .driver = {
663 .name = DRIVER_NAME, 658 .name = DRIVER_NAME,
664 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
665 }, 660 },
666 .probe = mxc_spi_probe, 661 .probe = spi_imx_probe,
667 .remove = __exit_p(mxc_spi_remove), 662 .remove = __exit_p(spi_imx_remove),
668}; 663};
669 664
670static int __init mxc_spi_init(void) 665static int __init spi_imx_init(void)
671{ 666{
672 return platform_driver_register(&mxc_spi_driver); 667 return platform_driver_register(&spi_imx_driver);
673} 668}
674 669
675static void __exit mxc_spi_exit(void) 670static void __exit spi_imx_exit(void)
676{ 671{
677 platform_driver_unregister(&mxc_spi_driver); 672 platform_driver_unregister(&spi_imx_driver);
678} 673}
679 674
680module_init(mxc_spi_init); 675module_init(spi_imx_init);
681module_exit(mxc_spi_exit); 676module_exit(spi_imx_exit);
682 677
683MODULE_DESCRIPTION("SPI Master Controller driver"); 678MODULE_DESCRIPTION("SPI Master Controller driver");
684MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 679MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f921bd1109e1..5d23983f02fc 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -537,7 +537,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
537 return status; 537 return status;
538} 538}
539 539
540static struct file_operations spidev_fops = { 540static const struct file_operations spidev_fops = {
541 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
542 /* REVISIT switch to aio primitives, so that userspace 542 /* REVISIT switch to aio primitives, so that userspace
543 * gets more complete API coverage. It'll simplify things 543 * gets more complete API coverage. It'll simplify things
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index ac8577358ba0..ee1601026fb0 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -847,7 +847,7 @@ static dst_command_func dst_commands[] = {
847/* 847/*
848 * Configuration parser. 848 * Configuration parser.
849 */ 849 */
850static void cn_dst_callback(struct cn_msg *msg) 850static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
851{ 851{
852 struct dst_ctl *ctl; 852 struct dst_ctl *ctl;
853 int err; 853 int err;
@@ -855,6 +855,11 @@ static void cn_dst_callback(struct cn_msg *msg)
855 struct dst_node *n = NULL, *tmp; 855 struct dst_node *n = NULL, *tmp;
856 unsigned int hash; 856 unsigned int hash;
857 857
858 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
859 err = -EPERM;
860 goto out;
861 }
862
858 if (msg->len < sizeof(struct dst_ctl)) { 863 if (msg->len < sizeof(struct dst_ctl)) {
859 err = -EBADMSG; 864 err = -EBADMSG;
860 goto out; 865 goto out;
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
index 90f962ee5fd8..5d04bf5b021a 100644
--- a/drivers/staging/pohmelfs/config.c
+++ b/drivers/staging/pohmelfs/config.c
@@ -527,10 +527,13 @@ out_unlock:
527 return err; 527 return err;
528} 528}
529 529
530static void pohmelfs_cn_callback(struct cn_msg *msg) 530static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
531{ 531{
532 int err; 532 int err;
533 533
534 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
535 return;
536
534 switch (msg->flags) { 537 switch (msg->flags) {
535 case POHMELFS_FLAGS_ADD: 538 case POHMELFS_FLAGS_ADD:
536 case POHMELFS_FLAGS_DEL: 539 case POHMELFS_FLAGS_DEL:
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 03efb065455f..a9d707047202 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -658,7 +658,7 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
658 return 0; 658 return 0;
659} 659}
660 660
661static struct vm_operations_struct uio_vm_ops = { 661static const struct vm_operations_struct uio_vm_ops = {
662 .open = uio_vma_open, 662 .open = uio_vma_open,
663 .close = uio_vma_close, 663 .close = uio_vma_close,
664 .fault = uio_vma_fault, 664 .fault = uio_vma_fault,
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 333ee02e7b2b..864f0ba6a344 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -993,7 +993,7 @@ skip_io_on_zombie:
993 return retval; 993 return retval;
994} 994}
995 995
996static struct file_operations fops = { 996static const struct file_operations fops = {
997 .owner = THIS_MODULE, 997 .owner = THIS_MODULE,
998 .read = usbtmc_read, 998 .read = usbtmc_read,
999 .write = usbtmc_write, 999 .write = usbtmc_write,
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 29500154d00c..2d867fd22413 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
875} 875}
876 876
877/* used after endpoint configuration */ 877/* used after endpoint configuration */
878static struct file_operations printer_io_operations = { 878static const struct file_operations printer_io_operations = {
879 .owner = THIS_MODULE, 879 .owner = THIS_MODULE,
880 .open = printer_open, 880 .open = printer_open,
881 .read = printer_read, 881 .read = printer_read,
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index cf2d45946c57..2273c815941f 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode, struct file *file)
134 return single_open(file, pzl_print, inode->i_private); 134 return single_open(file, pzl_print, inode->i_private);
135} 135}
136 136
137static struct file_operations di_fops = { 137static const struct file_operations di_fops = {
138 .open = di_open, 138 .open = di_open,
139 .read = seq_read, 139 .read = seq_read,
140 .llseek = seq_lseek, 140 .llseek = seq_lseek,
@@ -142,7 +142,7 @@ static struct file_operations di_fops = {
142 .owner = THIS_MODULE, 142 .owner = THIS_MODULE,
143}; 143};
144 144
145static struct file_operations asl_fops = { 145static const struct file_operations asl_fops = {
146 .open = asl_open, 146 .open = asl_open,
147 .read = seq_read, 147 .read = seq_read,
148 .llseek = seq_lseek, 148 .llseek = seq_lseek,
@@ -150,7 +150,7 @@ static struct file_operations asl_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151}; 151};
152 152
153static struct file_operations pzl_fops = { 153static const struct file_operations pzl_fops = {
154 .open = pzl_open, 154 .open = pzl_open,
155 .read = seq_read, 155 .read = seq_read,
156 .llseek = seq_lseek, 156 .llseek = seq_lseek,
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index d645f3899fe1..32d0199d0c32 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -429,8 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
429 return read_count; 429 return read_count;
430} 430}
431 431
432static struct 432static const struct file_operations usb_rio_fops = {
433file_operations usb_rio_fops = {
434 .owner = THIS_MODULE, 433 .owner = THIS_MODULE,
435 .read = read_rio, 434 .read = read_rio,
436 .write = write_rio, 435 .write = write_rio,
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index dfdc43e2e00d..9ed3e741bee1 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1174,7 +1174,7 @@ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177static struct vm_operations_struct mon_bin_vm_ops = { 1177static const struct vm_operations_struct mon_bin_vm_ops = {
1178 .open = mon_bin_vma_open, 1178 .open = mon_bin_vma_open,
1179 .close = mon_bin_vma_close, 1179 .close = mon_bin_vma_close,
1180 .fault = mon_bin_vma_fault, 1180 .fault = mon_bin_vma_fault,
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index ff75a3589e7e..aa6b2ae951ae 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -192,7 +192,7 @@ void usb_serial_put(struct usb_serial *serial)
192 * This is the first place a new tty gets used. Hence this is where we 192 * This is the first place a new tty gets used. Hence this is where we
193 * acquire references to the usb_serial structure and the driver module, 193 * acquire references to the usb_serial structure and the driver module,
194 * where we store a pointer to the port, and where we do an autoresume. 194 * where we store a pointer to the port, and where we do an autoresume.
195 * All these actions are reversed in serial_release(). 195 * All these actions are reversed in serial_cleanup().
196 */ 196 */
197static int serial_install(struct tty_driver *driver, struct tty_struct *tty) 197static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
198{ 198{
@@ -339,15 +339,16 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
339} 339}
340 340
341/** 341/**
342 * serial_release - free resources post close/hangup 342 * serial_cleanup - free resources post close/hangup
343 * @port: port to free up 343 * @port: port to free up
344 * 344 *
345 * Do the resource freeing and refcount dropping for the port. 345 * Do the resource freeing and refcount dropping for the port.
346 * Avoid freeing the console. 346 * Avoid freeing the console.
347 * 347 *
348 * Called when the last tty kref is dropped. 348 * Called asynchronously after the last tty kref is dropped,
349 * and the tty layer has already done the tty_shutdown(tty);
349 */ 350 */
350static void serial_release(struct tty_struct *tty) 351static void serial_cleanup(struct tty_struct *tty)
351{ 352{
352 struct usb_serial_port *port = tty->driver_data; 353 struct usb_serial_port *port = tty->driver_data;
353 struct usb_serial *serial; 354 struct usb_serial *serial;
@@ -361,9 +362,6 @@ static void serial_release(struct tty_struct *tty)
361 362
362 dbg("%s - port %d", __func__, port->number); 363 dbg("%s - port %d", __func__, port->number);
363 364
364 /* Standard shutdown processing */
365 tty_shutdown(tty);
366
367 tty->driver_data = NULL; 365 tty->driver_data = NULL;
368 366
369 serial = port->serial; 367 serial = port->serial;
@@ -1210,7 +1208,7 @@ static const struct tty_operations serial_ops = {
1210 .chars_in_buffer = serial_chars_in_buffer, 1208 .chars_in_buffer = serial_chars_in_buffer,
1211 .tiocmget = serial_tiocmget, 1209 .tiocmget = serial_tiocmget,
1212 .tiocmset = serial_tiocmset, 1210 .tiocmset = serial_tiocmset,
1213 .shutdown = serial_release, 1211 .cleanup = serial_cleanup,
1214 .install = serial_install, 1212 .install = serial_install,
1215 .proc_fops = &serial_proc_fops, 1213 .proc_fops = &serial_proc_fops,
1216}; 1214};
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index 4a42993700c1..2eecec0c13c9 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -205,7 +205,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
205 return ret < 0 ? ret : len; 205 return ret < 0 ? ret : len;
206} 206}
207 207
208static struct file_operations command_fops = { 208static const struct file_operations command_fops = {
209 .open = command_open, 209 .open = command_open,
210 .write = command_write, 210 .write = command_write,
211 .read = NULL, 211 .read = NULL,
@@ -255,7 +255,7 @@ static int reservations_open(struct inode *inode, struct file *file)
255 return single_open(file, reservations_print, inode->i_private); 255 return single_open(file, reservations_print, inode->i_private);
256} 256}
257 257
258static struct file_operations reservations_fops = { 258static const struct file_operations reservations_fops = {
259 .open = reservations_open, 259 .open = reservations_open,
260 .read = seq_read, 260 .read = seq_read,
261 .llseek = seq_lseek, 261 .llseek = seq_lseek,
@@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *inode, struct file *file)
283 return single_open(file, drp_avail_print, inode->i_private); 283 return single_open(file, drp_avail_print, inode->i_private);
284} 284}
285 285
286static struct file_operations drp_avail_fops = { 286static const struct file_operations drp_avail_fops = {
287 .open = drp_avail_open, 287 .open = drp_avail_open,
288 .read = seq_read, 288 .read = seq_read,
289 .llseek = seq_lseek, 289 .llseek = seq_lseek,
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 0a7a6679ee6e..c27ab1ed9604 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -125,7 +125,7 @@ page_already_added:
125 return 0; 125 return 0;
126} 126}
127 127
128static struct vm_operations_struct fb_deferred_io_vm_ops = { 128static const struct vm_operations_struct fb_deferred_io_vm_ops = {
129 .fault = fb_deferred_io_fault, 129 .fault = fb_deferred_io_fault,
130 .page_mkwrite = fb_deferred_io_mkwrite, 130 .page_mkwrite = fb_deferred_io_mkwrite,
131}; 131};
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a1f2e7ce730b..99bbd282ce63 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1800,7 +1800,7 @@ static int __init video_setup(char *options)
1800 global = 1; 1800 global = 1;
1801 } 1801 }
1802 1802
1803 if (!global && !strstr(options, "fb:")) { 1803 if (!global && !strchr(options, ':')) {
1804 fb_mode_option = options; 1804 fb_mode_option = options;
1805 global = 1; 1805 global = 1;
1806 } 1806 }
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 80a11d078df4..f16e42154229 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -1035,7 +1035,7 @@ static void mmap_user_close(struct vm_area_struct *vma)
1035 atomic_dec(&dispc.map_count[plane]); 1035 atomic_dec(&dispc.map_count[plane]);
1036} 1036}
1037 1037
1038static struct vm_operations_struct mmap_user_ops = { 1038static const struct vm_operations_struct mmap_user_ops = {
1039 .open = mmap_user_open, 1039 .open = mmap_user_open,
1040 .close = mmap_user_close, 1040 .close = mmap_user_close,
1041}; 1041};
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index e98baf6916b8..e35232a18571 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -67,11 +67,14 @@ static DEFINE_MUTEX(uvfb_lock);
67 * find the kernel part of the task struct, copy the registers and 67 * find the kernel part of the task struct, copy the registers and
68 * the buffer contents and then complete the task. 68 * the buffer contents and then complete the task.
69 */ 69 */
70static void uvesafb_cn_callback(struct cn_msg *msg) 70static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
71{ 71{
72 struct uvesafb_task *utask; 72 struct uvesafb_task *utask;
73 struct uvesafb_ktask *task; 73 struct uvesafb_ktask *task;
74 74
75 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
76 return;
77
75 if (msg->seq >= UVESAFB_TASKS_MAX) 78 if (msg->seq >= UVESAFB_TASKS_MAX)
76 return; 79 return;
77 80
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 52ccb3d3a963..45c126fea31d 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -306,7 +306,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
306 return error; 306 return error;
307} 307}
308 308
309static void w1_cn_callback(struct cn_msg *msg) 309static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
310{ 310{
311 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); 311 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
312 struct w1_netlink_cmd *cmd; 312 struct w1_netlink_cmd *cmd;
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
deleted file mode 100644
index 5c4f6b499e90..000000000000
--- a/fs/afs/cache.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* AFS local cache management interface
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/fscache.h>
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 106be66dafd2..6ece2a13bf71 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -18,10 +18,10 @@
18#include <linux/key.h> 18#include <linux/key.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/fscache.h>
21 22
22#include "afs.h" 23#include "afs.h"
23#include "afs_vl.h" 24#include "afs_vl.h"
24#include "cache.h"
25 25
26#define AFS_CELL_MAX_ADDRS 15 26#define AFS_CELL_MAX_ADDRS 15
27 27
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index f128427b995b..69b355ae7f49 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
27#include "btrfs_inode.h" 27#include "btrfs_inode.h"
28#include "xattr.h" 28#include "xattr.h"
29 29
30#ifdef CONFIG_FS_POSIX_ACL 30#ifdef CONFIG_BTRFS_POSIX_ACL
31 31
32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
33{ 33{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
313 .set = btrfs_xattr_acl_access_set, 313 .set = btrfs_xattr_acl_access_set,
314}; 314};
315 315
316#else /* CONFIG_FS_POSIX_ACL */ 316#else /* CONFIG_BTRFS_POSIX_ACL */
317 317
318int btrfs_acl_chmod(struct inode *inode) 318int btrfs_acl_chmod(struct inode *inode)
319{ 319{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
325 return 0; 325 return 0;
326} 326}
327 327
328#endif /* CONFIG_FS_POSIX_ACL */ 328#endif /* CONFIG_BTRFS_POSIX_ACL */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 82ee56bba299..a54d354cefcb 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -128,6 +128,14 @@ struct btrfs_inode {
128 u64 last_unlink_trans; 128 u64 last_unlink_trans;
129 129
130 /* 130 /*
131 * These two counters are for delalloc metadata reservations. We keep
132 * track of how many extents we've accounted for vs how many extents we
133 * have.
134 */
135 int delalloc_reserved_extents;
136 int delalloc_extents;
137
138 /*
131 * ordered_data_close is set by truncate when a file that used 139 * ordered_data_close is set by truncate when a file that used
132 * to have good data has been truncated to zero. When it is set 140 * to have good data has been truncated to zero. When it is set
133 * the btrfs file release call will add this inode to the 141 * the btrfs file release call will add this inode to the
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80599b4e42bd..dd8ced9814c4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -675,18 +675,19 @@ struct btrfs_space_info {
675 current allocations */ 675 current allocations */
676 u64 bytes_readonly; /* total bytes that are read only */ 676 u64 bytes_readonly; /* total bytes that are read only */
677 u64 bytes_super; /* total bytes reserved for the super blocks */ 677 u64 bytes_super; /* total bytes reserved for the super blocks */
678 678 u64 bytes_root; /* the number of bytes needed to commit a
679 /* delalloc accounting */ 679 transaction */
680 u64 bytes_delalloc; /* number of bytes reserved for allocation,
681 this space is not necessarily reserved yet
682 by the allocator */
683 u64 bytes_may_use; /* number of bytes that may be used for 680 u64 bytes_may_use; /* number of bytes that may be used for
684 delalloc */ 681 delalloc/allocations */
682 u64 bytes_delalloc; /* number of bytes currently reserved for
683 delayed allocation */
685 684
686 int full; /* indicates that we cannot allocate any more 685 int full; /* indicates that we cannot allocate any more
687 chunks for this space */ 686 chunks for this space */
688 int force_alloc; /* set if we need to force a chunk alloc for 687 int force_alloc; /* set if we need to force a chunk alloc for
689 this space */ 688 this space */
689 int force_delalloc; /* make people start doing filemap_flush until
690 we're under a threshold */
690 691
691 struct list_head list; 692 struct list_head list;
692 693
@@ -695,6 +696,9 @@ struct btrfs_space_info {
695 spinlock_t lock; 696 spinlock_t lock;
696 struct rw_semaphore groups_sem; 697 struct rw_semaphore groups_sem;
697 atomic_t caching_threads; 698 atomic_t caching_threads;
699
700 int allocating_chunk;
701 wait_queue_head_t wait;
698}; 702};
699 703
700/* 704/*
@@ -2022,7 +2026,12 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
2022void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); 2026void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
2023void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2027void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
2024 2028
2025int btrfs_check_metadata_free_space(struct btrfs_root *root); 2029int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items);
2030int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items);
2031int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2032 struct inode *inode, int num_items);
2033int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2034 struct inode *inode, int num_items);
2026int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, 2035int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2027 u64 bytes); 2036 u64 bytes);
2028void btrfs_free_reserved_data_space(struct btrfs_root *root, 2037void btrfs_free_reserved_data_space(struct btrfs_root *root,
@@ -2326,7 +2335,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
2326int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 2335int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
2327 int skip_pinned); 2336 int skip_pinned);
2328int btrfs_check_file(struct btrfs_root *root, struct inode *inode); 2337int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
2329extern struct file_operations btrfs_file_operations; 2338extern const struct file_operations btrfs_file_operations;
2330int btrfs_drop_extents(struct btrfs_trans_handle *trans, 2339int btrfs_drop_extents(struct btrfs_trans_handle *trans,
2331 struct btrfs_root *root, struct inode *inode, 2340 struct btrfs_root *root, struct inode *inode,
2332 u64 start, u64 end, u64 locked_end, 2341 u64 start, u64 end, u64 locked_end,
@@ -2357,7 +2366,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
2357int btrfs_sync_fs(struct super_block *sb, int wait); 2366int btrfs_sync_fs(struct super_block *sb, int wait);
2358 2367
2359/* acl.c */ 2368/* acl.c */
2360#ifdef CONFIG_FS_POSIX_ACL 2369#ifdef CONFIG_BTRFS_POSIX_ACL
2361int btrfs_check_acl(struct inode *inode, int mask); 2370int btrfs_check_acl(struct inode *inode, int mask);
2362#else 2371#else
2363#define btrfs_check_acl NULL 2372#define btrfs_check_acl NULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 644e796fd643..af0435f79fa6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -822,14 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
822 822
823int btrfs_write_tree_block(struct extent_buffer *buf) 823int btrfs_write_tree_block(struct extent_buffer *buf)
824{ 824{
825 return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start, 825 return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
826 buf->start + buf->len - 1, WB_SYNC_ALL); 826 buf->start + buf->len - 1);
827} 827}
828 828
829int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 829int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
830{ 830{
831 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, 831 return filemap_fdatawait_range(buf->first_page->mapping,
832 buf->start, buf->start + buf->len - 1); 832 buf->start, buf->start + buf->len - 1);
833} 833}
834 834
835struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, 835struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
@@ -1630,7 +1630,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1630 fs_info->sb = sb; 1630 fs_info->sb = sb;
1631 fs_info->max_extent = (u64)-1; 1631 fs_info->max_extent = (u64)-1;
1632 fs_info->max_inline = 8192 * 1024; 1632 fs_info->max_inline = 8192 * 1024;
1633 fs_info->metadata_ratio = 8; 1633 fs_info->metadata_ratio = 0;
1634 1634
1635 fs_info->thread_pool_size = min_t(unsigned long, 1635 fs_info->thread_pool_size = min_t(unsigned long,
1636 num_online_cpus() + 2, 8); 1636 num_online_cpus() + 2, 8);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 993f93ff7ba6..359a754c782c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -68,6 +68,8 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
68 struct extent_buffer **must_clean); 68 struct extent_buffer **must_clean);
69static int find_next_key(struct btrfs_path *path, int level, 69static int find_next_key(struct btrfs_path *path, int level,
70 struct btrfs_key *key); 70 struct btrfs_key *key);
71static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
72 int dump_block_groups);
71 73
72static noinline int 74static noinline int
73block_group_cache_done(struct btrfs_block_group_cache *cache) 75block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -2765,67 +2767,346 @@ void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2765 alloc_target); 2767 alloc_target);
2766} 2768}
2767 2769
2770static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2771{
2772 u64 num_bytes;
2773 int level;
2774
2775 level = BTRFS_MAX_LEVEL - 2;
2776 /*
2777 * NOTE: these calculations are absolutely the worst possible case.
2778 * This assumes that _every_ item we insert will require a new leaf, and
2779 * that the tree has grown to its maximum level size.
2780 */
2781
2782 /*
2783 * for every item we insert we could insert both an extent item and a
2784 * extent ref item. Then for ever item we insert, we will need to cow
2785 * both the original leaf, plus the leaf to the left and right of it.
2786 *
2787 * Unless we are talking about the extent root, then we just want the
2788 * number of items * 2, since we just need the extent item plus its ref.
2789 */
2790 if (root == root->fs_info->extent_root)
2791 num_bytes = num_items * 2;
2792 else
2793 num_bytes = (num_items + (2 * num_items)) * 3;
2794
2795 /*
2796 * num_bytes is total number of leaves we could need times the leaf
2797 * size, and then for every leaf we could end up cow'ing 2 nodes per
2798 * level, down to the leaf level.
2799 */
2800 num_bytes = (num_bytes * root->leafsize) +
2801 (num_bytes * (level * 2)) * root->nodesize;
2802
2803 return num_bytes;
2804}
2805
2768/* 2806/*
2769 * for now this just makes sure we have at least 5% of our metadata space free 2807 * Unreserve metadata space for delalloc. If we have less reserved credits than
2770 * for use. 2808 * we have extents, this function does nothing.
2771 */ 2809 */
2772int btrfs_check_metadata_free_space(struct btrfs_root *root) 2810int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2811 struct inode *inode, int num_items)
2773{ 2812{
2774 struct btrfs_fs_info *info = root->fs_info; 2813 struct btrfs_fs_info *info = root->fs_info;
2775 struct btrfs_space_info *meta_sinfo; 2814 struct btrfs_space_info *meta_sinfo;
2776 u64 alloc_target, thresh; 2815 u64 num_bytes;
2777 int committed = 0, ret; 2816 u64 alloc_target;
2817 bool bug = false;
2778 2818
2779 /* get the space info for where the metadata will live */ 2819 /* get the space info for where the metadata will live */
2780 alloc_target = btrfs_get_alloc_profile(root, 0); 2820 alloc_target = btrfs_get_alloc_profile(root, 0);
2781 meta_sinfo = __find_space_info(info, alloc_target); 2821 meta_sinfo = __find_space_info(info, alloc_target);
2782 if (!meta_sinfo)
2783 goto alloc;
2784 2822
2785again: 2823 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2824 num_items);
2825
2786 spin_lock(&meta_sinfo->lock); 2826 spin_lock(&meta_sinfo->lock);
2787 if (!meta_sinfo->full) 2827 if (BTRFS_I(inode)->delalloc_reserved_extents <=
2788 thresh = meta_sinfo->total_bytes * 80; 2828 BTRFS_I(inode)->delalloc_extents) {
2789 else 2829 spin_unlock(&meta_sinfo->lock);
2790 thresh = meta_sinfo->total_bytes * 95; 2830 return 0;
2831 }
2832
2833 BTRFS_I(inode)->delalloc_reserved_extents--;
2834 BUG_ON(BTRFS_I(inode)->delalloc_reserved_extents < 0);
2835
2836 if (meta_sinfo->bytes_delalloc < num_bytes) {
2837 bug = true;
2838 meta_sinfo->bytes_delalloc = 0;
2839 } else {
2840 meta_sinfo->bytes_delalloc -= num_bytes;
2841 }
2842 spin_unlock(&meta_sinfo->lock);
2791 2843
2844 BUG_ON(bug);
2845
2846 return 0;
2847}
2848
2849static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2850{
2851 u64 thresh;
2852
2853 thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2854 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2855 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2856 meta_sinfo->bytes_may_use;
2857
2858 thresh = meta_sinfo->total_bytes - thresh;
2859 thresh *= 80;
2792 do_div(thresh, 100); 2860 do_div(thresh, 100);
2861 if (thresh <= meta_sinfo->bytes_delalloc)
2862 meta_sinfo->force_delalloc = 1;
2863 else
2864 meta_sinfo->force_delalloc = 0;
2865}
2793 2866
2794 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + 2867static int maybe_allocate_chunk(struct btrfs_root *root,
2795 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + 2868 struct btrfs_space_info *info)
2796 meta_sinfo->bytes_super > thresh) { 2869{
2797 struct btrfs_trans_handle *trans; 2870 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2798 if (!meta_sinfo->full) { 2871 struct btrfs_trans_handle *trans;
2799 meta_sinfo->force_alloc = 1; 2872 bool wait = false;
2873 int ret = 0;
2874 u64 min_metadata;
2875 u64 free_space;
2876
2877 free_space = btrfs_super_total_bytes(disk_super);
2878 /*
2879 * we allow the metadata to grow to a max of either 5gb or 5% of the
2880 * space in the volume.
2881 */
2882 min_metadata = min((u64)5 * 1024 * 1024 * 1024,
2883 div64_u64(free_space * 5, 100));
2884 if (info->total_bytes >= min_metadata) {
2885 spin_unlock(&info->lock);
2886 return 0;
2887 }
2888
2889 if (info->full) {
2890 spin_unlock(&info->lock);
2891 return 0;
2892 }
2893
2894 if (!info->allocating_chunk) {
2895 info->force_alloc = 1;
2896 info->allocating_chunk = 1;
2897 init_waitqueue_head(&info->wait);
2898 } else {
2899 wait = true;
2900 }
2901
2902 spin_unlock(&info->lock);
2903
2904 if (wait) {
2905 wait_event(info->wait,
2906 !info->allocating_chunk);
2907 return 1;
2908 }
2909
2910 trans = btrfs_start_transaction(root, 1);
2911 if (!trans) {
2912 ret = -ENOMEM;
2913 goto out;
2914 }
2915
2916 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2917 4096 + 2 * 1024 * 1024,
2918 info->flags, 0);
2919 btrfs_end_transaction(trans, root);
2920 if (ret)
2921 goto out;
2922out:
2923 spin_lock(&info->lock);
2924 info->allocating_chunk = 0;
2925 spin_unlock(&info->lock);
2926 wake_up(&info->wait);
2927
2928 if (ret)
2929 return 0;
2930 return 1;
2931}
2932
2933/*
2934 * Reserve metadata space for delalloc.
2935 */
2936int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2937 struct inode *inode, int num_items)
2938{
2939 struct btrfs_fs_info *info = root->fs_info;
2940 struct btrfs_space_info *meta_sinfo;
2941 u64 num_bytes;
2942 u64 used;
2943 u64 alloc_target;
2944 int flushed = 0;
2945 int force_delalloc;
2946
2947 /* get the space info for where the metadata will live */
2948 alloc_target = btrfs_get_alloc_profile(root, 0);
2949 meta_sinfo = __find_space_info(info, alloc_target);
2950
2951 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2952 num_items);
2953again:
2954 spin_lock(&meta_sinfo->lock);
2955
2956 force_delalloc = meta_sinfo->force_delalloc;
2957
2958 if (unlikely(!meta_sinfo->bytes_root))
2959 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
2960
2961 if (!flushed)
2962 meta_sinfo->bytes_delalloc += num_bytes;
2963
2964 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2965 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2966 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2967 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
2968
2969 if (used > meta_sinfo->total_bytes) {
2970 flushed++;
2971
2972 if (flushed == 1) {
2973 if (maybe_allocate_chunk(root, meta_sinfo))
2974 goto again;
2975 flushed++;
2976 } else {
2800 spin_unlock(&meta_sinfo->lock); 2977 spin_unlock(&meta_sinfo->lock);
2801alloc: 2978 }
2802 trans = btrfs_start_transaction(root, 1);
2803 if (!trans)
2804 return -ENOMEM;
2805 2979
2806 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 2980 if (flushed == 2) {
2807 2 * 1024 * 1024, alloc_target, 0); 2981 filemap_flush(inode->i_mapping);
2808 btrfs_end_transaction(trans, root); 2982 goto again;
2809 if (!meta_sinfo) { 2983 } else if (flushed == 3) {
2810 meta_sinfo = __find_space_info(info, 2984 btrfs_start_delalloc_inodes(root);
2811 alloc_target); 2985 btrfs_wait_ordered_extents(root, 0);
2812 }
2813 goto again; 2986 goto again;
2814 } 2987 }
2988 spin_lock(&meta_sinfo->lock);
2989 meta_sinfo->bytes_delalloc -= num_bytes;
2815 spin_unlock(&meta_sinfo->lock); 2990 spin_unlock(&meta_sinfo->lock);
2991 printk(KERN_ERR "enospc, has %d, reserved %d\n",
2992 BTRFS_I(inode)->delalloc_extents,
2993 BTRFS_I(inode)->delalloc_reserved_extents);
2994 dump_space_info(meta_sinfo, 0, 0);
2995 return -ENOSPC;
2996 }
2816 2997
2817 if (!committed) { 2998 BTRFS_I(inode)->delalloc_reserved_extents++;
2818 committed = 1; 2999 check_force_delalloc(meta_sinfo);
2819 trans = btrfs_join_transaction(root, 1); 3000 spin_unlock(&meta_sinfo->lock);
2820 if (!trans) 3001
2821 return -ENOMEM; 3002 if (!flushed && force_delalloc)
2822 ret = btrfs_commit_transaction(trans, root); 3003 filemap_flush(inode->i_mapping);
2823 if (ret) 3004
2824 return ret; 3005 return 0;
3006}
3007
3008/*
3009 * unreserve num_items number of items worth of metadata space. This needs to
3010 * be paired with btrfs_reserve_metadata_space.
3011 *
3012 * NOTE: if you have the option, run this _AFTER_ you do a
3013 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3014 * oprations which will result in more used metadata, so we want to make sure we
3015 * can do that without issue.
3016 */
3017int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3018{
3019 struct btrfs_fs_info *info = root->fs_info;
3020 struct btrfs_space_info *meta_sinfo;
3021 u64 num_bytes;
3022 u64 alloc_target;
3023 bool bug = false;
3024
3025 /* get the space info for where the metadata will live */
3026 alloc_target = btrfs_get_alloc_profile(root, 0);
3027 meta_sinfo = __find_space_info(info, alloc_target);
3028
3029 num_bytes = calculate_bytes_needed(root, num_items);
3030
3031 spin_lock(&meta_sinfo->lock);
3032 if (meta_sinfo->bytes_may_use < num_bytes) {
3033 bug = true;
3034 meta_sinfo->bytes_may_use = 0;
3035 } else {
3036 meta_sinfo->bytes_may_use -= num_bytes;
3037 }
3038 spin_unlock(&meta_sinfo->lock);
3039
3040 BUG_ON(bug);
3041
3042 return 0;
3043}
3044
3045/*
3046 * Reserve some metadata space for use. We'll calculate the worste case number
3047 * of bytes that would be needed to modify num_items number of items. If we
3048 * have space, fantastic, if not, you get -ENOSPC. Please call
3049 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3050 * items you reserved, since whatever metadata you needed should have already
3051 * been allocated.
3052 *
3053 * This will commit the transaction to make more space if we don't have enough
3054 * metadata space. THe only time we don't do this is if we're reserving space
3055 * inside of a transaction, then we will just return -ENOSPC and it is the
3056 * callers responsibility to handle it properly.
3057 */
3058int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3059{
3060 struct btrfs_fs_info *info = root->fs_info;
3061 struct btrfs_space_info *meta_sinfo;
3062 u64 num_bytes;
3063 u64 used;
3064 u64 alloc_target;
3065 int retries = 0;
3066
3067 /* get the space info for where the metadata will live */
3068 alloc_target = btrfs_get_alloc_profile(root, 0);
3069 meta_sinfo = __find_space_info(info, alloc_target);
3070
3071 num_bytes = calculate_bytes_needed(root, num_items);
3072again:
3073 spin_lock(&meta_sinfo->lock);
3074
3075 if (unlikely(!meta_sinfo->bytes_root))
3076 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3077
3078 if (!retries)
3079 meta_sinfo->bytes_may_use += num_bytes;
3080
3081 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3082 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3083 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3084 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3085
3086 if (used > meta_sinfo->total_bytes) {
3087 retries++;
3088 if (retries == 1) {
3089 if (maybe_allocate_chunk(root, meta_sinfo))
3090 goto again;
3091 retries++;
3092 } else {
3093 spin_unlock(&meta_sinfo->lock);
3094 }
3095
3096 if (retries == 2) {
3097 btrfs_start_delalloc_inodes(root);
3098 btrfs_wait_ordered_extents(root, 0);
2825 goto again; 3099 goto again;
2826 } 3100 }
3101 spin_lock(&meta_sinfo->lock);
3102 meta_sinfo->bytes_may_use -= num_bytes;
3103 spin_unlock(&meta_sinfo->lock);
3104
3105 dump_space_info(meta_sinfo, 0, 0);
2827 return -ENOSPC; 3106 return -ENOSPC;
2828 } 3107 }
3108
3109 check_force_delalloc(meta_sinfo);
2829 spin_unlock(&meta_sinfo->lock); 3110 spin_unlock(&meta_sinfo->lock);
2830 3111
2831 return 0; 3112 return 0;
@@ -2888,7 +3169,7 @@ alloc:
2888 spin_unlock(&data_sinfo->lock); 3169 spin_unlock(&data_sinfo->lock);
2889 3170
2890 /* commit the current transaction and try again */ 3171 /* commit the current transaction and try again */
2891 if (!committed) { 3172 if (!committed && !root->fs_info->open_ioctl_trans) {
2892 committed = 1; 3173 committed = 1;
2893 trans = btrfs_join_transaction(root, 1); 3174 trans = btrfs_join_transaction(root, 1);
2894 if (!trans) 3175 if (!trans)
@@ -2916,7 +3197,7 @@ alloc:
2916 BTRFS_I(inode)->reserved_bytes += bytes; 3197 BTRFS_I(inode)->reserved_bytes += bytes;
2917 spin_unlock(&data_sinfo->lock); 3198 spin_unlock(&data_sinfo->lock);
2918 3199
2919 return btrfs_check_metadata_free_space(root); 3200 return 0;
2920} 3201}
2921 3202
2922/* 3203/*
@@ -3015,17 +3296,15 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3015 BUG_ON(!space_info); 3296 BUG_ON(!space_info);
3016 3297
3017 spin_lock(&space_info->lock); 3298 spin_lock(&space_info->lock);
3018 if (space_info->force_alloc) { 3299 if (space_info->force_alloc)
3019 force = 1; 3300 force = 1;
3020 space_info->force_alloc = 0;
3021 }
3022 if (space_info->full) { 3301 if (space_info->full) {
3023 spin_unlock(&space_info->lock); 3302 spin_unlock(&space_info->lock);
3024 goto out; 3303 goto out;
3025 } 3304 }
3026 3305
3027 thresh = space_info->total_bytes - space_info->bytes_readonly; 3306 thresh = space_info->total_bytes - space_info->bytes_readonly;
3028 thresh = div_factor(thresh, 6); 3307 thresh = div_factor(thresh, 8);
3029 if (!force && 3308 if (!force &&
3030 (space_info->bytes_used + space_info->bytes_pinned + 3309 (space_info->bytes_used + space_info->bytes_pinned +
3031 space_info->bytes_reserved + alloc_bytes) < thresh) { 3310 space_info->bytes_reserved + alloc_bytes) < thresh) {
@@ -3039,7 +3318,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3039 * we keep a reasonable number of metadata chunks allocated in the 3318 * we keep a reasonable number of metadata chunks allocated in the
3040 * FS as well. 3319 * FS as well.
3041 */ 3320 */
3042 if (flags & BTRFS_BLOCK_GROUP_DATA) { 3321 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3043 fs_info->data_chunk_allocations++; 3322 fs_info->data_chunk_allocations++;
3044 if (!(fs_info->data_chunk_allocations % 3323 if (!(fs_info->data_chunk_allocations %
3045 fs_info->metadata_ratio)) 3324 fs_info->metadata_ratio))
@@ -3047,8 +3326,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3047 } 3326 }
3048 3327
3049 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3328 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3329 spin_lock(&space_info->lock);
3050 if (ret) 3330 if (ret)
3051 space_info->full = 1; 3331 space_info->full = 1;
3332 space_info->force_alloc = 0;
3333 spin_unlock(&space_info->lock);
3052out: 3334out:
3053 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3335 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3054 return ret; 3336 return ret;
@@ -4063,21 +4345,32 @@ loop:
4063 return ret; 4345 return ret;
4064} 4346}
4065 4347
4066static void dump_space_info(struct btrfs_space_info *info, u64 bytes) 4348static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4349 int dump_block_groups)
4067{ 4350{
4068 struct btrfs_block_group_cache *cache; 4351 struct btrfs_block_group_cache *cache;
4069 4352
4353 spin_lock(&info->lock);
4070 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 4354 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4071 (unsigned long long)(info->total_bytes - info->bytes_used - 4355 (unsigned long long)(info->total_bytes - info->bytes_used -
4072 info->bytes_pinned - info->bytes_reserved), 4356 info->bytes_pinned - info->bytes_reserved -
4357 info->bytes_super),
4073 (info->full) ? "" : "not "); 4358 (info->full) ? "" : "not ");
4074 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu," 4359 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4075 " may_use=%llu, used=%llu\n", 4360 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4361 "\n",
4076 (unsigned long long)info->total_bytes, 4362 (unsigned long long)info->total_bytes,
4077 (unsigned long long)info->bytes_pinned, 4363 (unsigned long long)info->bytes_pinned,
4078 (unsigned long long)info->bytes_delalloc, 4364 (unsigned long long)info->bytes_delalloc,
4079 (unsigned long long)info->bytes_may_use, 4365 (unsigned long long)info->bytes_may_use,
4080 (unsigned long long)info->bytes_used); 4366 (unsigned long long)info->bytes_used,
4367 (unsigned long long)info->bytes_root,
4368 (unsigned long long)info->bytes_super,
4369 (unsigned long long)info->bytes_reserved);
4370 spin_unlock(&info->lock);
4371
4372 if (!dump_block_groups)
4373 return;
4081 4374
4082 down_read(&info->groups_sem); 4375 down_read(&info->groups_sem);
4083 list_for_each_entry(cache, &info->block_groups, list) { 4376 list_for_each_entry(cache, &info->block_groups, list) {
@@ -4145,7 +4438,7 @@ again:
4145 printk(KERN_ERR "btrfs allocation failed flags %llu, " 4438 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4146 "wanted %llu\n", (unsigned long long)data, 4439 "wanted %llu\n", (unsigned long long)data,
4147 (unsigned long long)num_bytes); 4440 (unsigned long long)num_bytes);
4148 dump_space_info(sinfo, num_bytes); 4441 dump_space_info(sinfo, num_bytes, 1);
4149 } 4442 }
4150 4443
4151 return ret; 4444 return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0cb88f8146ea..de1793ba004a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -280,6 +280,14 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
280 return NULL; 280 return NULL;
281} 281}
282 282
283static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
284 struct extent_state *other)
285{
286 if (tree->ops && tree->ops->merge_extent_hook)
287 tree->ops->merge_extent_hook(tree->mapping->host, new,
288 other);
289}
290
283/* 291/*
284 * utility function to look for merge candidates inside a given range. 292 * utility function to look for merge candidates inside a given range.
285 * Any extents with matching state are merged together into a single 293 * Any extents with matching state are merged together into a single
@@ -303,6 +311,7 @@ static int merge_state(struct extent_io_tree *tree,
303 other = rb_entry(other_node, struct extent_state, rb_node); 311 other = rb_entry(other_node, struct extent_state, rb_node);
304 if (other->end == state->start - 1 && 312 if (other->end == state->start - 1 &&
305 other->state == state->state) { 313 other->state == state->state) {
314 merge_cb(tree, state, other);
306 state->start = other->start; 315 state->start = other->start;
307 other->tree = NULL; 316 other->tree = NULL;
308 rb_erase(&other->rb_node, &tree->state); 317 rb_erase(&other->rb_node, &tree->state);
@@ -314,33 +323,37 @@ static int merge_state(struct extent_io_tree *tree,
314 other = rb_entry(other_node, struct extent_state, rb_node); 323 other = rb_entry(other_node, struct extent_state, rb_node);
315 if (other->start == state->end + 1 && 324 if (other->start == state->end + 1 &&
316 other->state == state->state) { 325 other->state == state->state) {
326 merge_cb(tree, state, other);
317 other->start = state->start; 327 other->start = state->start;
318 state->tree = NULL; 328 state->tree = NULL;
319 rb_erase(&state->rb_node, &tree->state); 329 rb_erase(&state->rb_node, &tree->state);
320 free_extent_state(state); 330 free_extent_state(state);
331 state = NULL;
321 } 332 }
322 } 333 }
334
323 return 0; 335 return 0;
324} 336}
325 337
326static void set_state_cb(struct extent_io_tree *tree, 338static int set_state_cb(struct extent_io_tree *tree,
327 struct extent_state *state, 339 struct extent_state *state,
328 unsigned long bits) 340 unsigned long bits)
329{ 341{
330 if (tree->ops && tree->ops->set_bit_hook) { 342 if (tree->ops && tree->ops->set_bit_hook) {
331 tree->ops->set_bit_hook(tree->mapping->host, state->start, 343 return tree->ops->set_bit_hook(tree->mapping->host,
332 state->end, state->state, bits); 344 state->start, state->end,
345 state->state, bits);
333 } 346 }
347
348 return 0;
334} 349}
335 350
336static void clear_state_cb(struct extent_io_tree *tree, 351static void clear_state_cb(struct extent_io_tree *tree,
337 struct extent_state *state, 352 struct extent_state *state,
338 unsigned long bits) 353 unsigned long bits)
339{ 354{
340 if (tree->ops && tree->ops->clear_bit_hook) { 355 if (tree->ops && tree->ops->clear_bit_hook)
341 tree->ops->clear_bit_hook(tree->mapping->host, state->start, 356 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
342 state->end, state->state, bits);
343 }
344} 357}
345 358
346/* 359/*
@@ -358,6 +371,7 @@ static int insert_state(struct extent_io_tree *tree,
358 int bits) 371 int bits)
359{ 372{
360 struct rb_node *node; 373 struct rb_node *node;
374 int ret;
361 375
362 if (end < start) { 376 if (end < start) {
363 printk(KERN_ERR "btrfs end < start %llu %llu\n", 377 printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -365,11 +379,14 @@ static int insert_state(struct extent_io_tree *tree,
365 (unsigned long long)start); 379 (unsigned long long)start);
366 WARN_ON(1); 380 WARN_ON(1);
367 } 381 }
368 if (bits & EXTENT_DIRTY)
369 tree->dirty_bytes += end - start + 1;
370 state->start = start; 382 state->start = start;
371 state->end = end; 383 state->end = end;
372 set_state_cb(tree, state, bits); 384 ret = set_state_cb(tree, state, bits);
385 if (ret)
386 return ret;
387
388 if (bits & EXTENT_DIRTY)
389 tree->dirty_bytes += end - start + 1;
373 state->state |= bits; 390 state->state |= bits;
374 node = tree_insert(&tree->state, end, &state->rb_node); 391 node = tree_insert(&tree->state, end, &state->rb_node);
375 if (node) { 392 if (node) {
@@ -387,6 +404,15 @@ static int insert_state(struct extent_io_tree *tree,
387 return 0; 404 return 0;
388} 405}
389 406
407static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
408 u64 split)
409{
410 if (tree->ops && tree->ops->split_extent_hook)
411 return tree->ops->split_extent_hook(tree->mapping->host,
412 orig, split);
413 return 0;
414}
415
390/* 416/*
391 * split a given extent state struct in two, inserting the preallocated 417 * split a given extent state struct in two, inserting the preallocated
392 * struct 'prealloc' as the newly created second half. 'split' indicates an 418 * struct 'prealloc' as the newly created second half. 'split' indicates an
@@ -405,6 +431,9 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
405 struct extent_state *prealloc, u64 split) 431 struct extent_state *prealloc, u64 split)
406{ 432{
407 struct rb_node *node; 433 struct rb_node *node;
434
435 split_cb(tree, orig, split);
436
408 prealloc->start = orig->start; 437 prealloc->start = orig->start;
409 prealloc->end = split - 1; 438 prealloc->end = split - 1;
410 prealloc->state = orig->state; 439 prealloc->state = orig->state;
@@ -542,8 +571,8 @@ hit_next:
542 if (err) 571 if (err)
543 goto out; 572 goto out;
544 if (state->end <= end) { 573 if (state->end <= end) {
545 set |= clear_state_bit(tree, state, bits, 574 set |= clear_state_bit(tree, state, bits, wake,
546 wake, delete); 575 delete);
547 if (last_end == (u64)-1) 576 if (last_end == (u64)-1)
548 goto out; 577 goto out;
549 start = last_end + 1; 578 start = last_end + 1;
@@ -561,12 +590,11 @@ hit_next:
561 prealloc = alloc_extent_state(GFP_ATOMIC); 590 prealloc = alloc_extent_state(GFP_ATOMIC);
562 err = split_state(tree, state, prealloc, end + 1); 591 err = split_state(tree, state, prealloc, end + 1);
563 BUG_ON(err == -EEXIST); 592 BUG_ON(err == -EEXIST);
564
565 if (wake) 593 if (wake)
566 wake_up(&state->wq); 594 wake_up(&state->wq);
567 595
568 set |= clear_state_bit(tree, prealloc, bits, 596 set |= clear_state_bit(tree, prealloc, bits, wake, delete);
569 wake, delete); 597
570 prealloc = NULL; 598 prealloc = NULL;
571 goto out; 599 goto out;
572 } 600 }
@@ -667,16 +695,23 @@ out:
667 return 0; 695 return 0;
668} 696}
669 697
670static void set_state_bits(struct extent_io_tree *tree, 698static int set_state_bits(struct extent_io_tree *tree,
671 struct extent_state *state, 699 struct extent_state *state,
672 int bits) 700 int bits)
673{ 701{
702 int ret;
703
704 ret = set_state_cb(tree, state, bits);
705 if (ret)
706 return ret;
707
674 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 708 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
675 u64 range = state->end - state->start + 1; 709 u64 range = state->end - state->start + 1;
676 tree->dirty_bytes += range; 710 tree->dirty_bytes += range;
677 } 711 }
678 set_state_cb(tree, state, bits);
679 state->state |= bits; 712 state->state |= bits;
713
714 return 0;
680} 715}
681 716
682static void cache_state(struct extent_state *state, 717static void cache_state(struct extent_state *state,
@@ -758,7 +793,10 @@ hit_next:
758 goto out; 793 goto out;
759 } 794 }
760 795
761 set_state_bits(tree, state, bits); 796 err = set_state_bits(tree, state, bits);
797 if (err)
798 goto out;
799
762 cache_state(state, cached_state); 800 cache_state(state, cached_state);
763 merge_state(tree, state); 801 merge_state(tree, state);
764 if (last_end == (u64)-1) 802 if (last_end == (u64)-1)
@@ -805,7 +843,9 @@ hit_next:
805 if (err) 843 if (err)
806 goto out; 844 goto out;
807 if (state->end <= end) { 845 if (state->end <= end) {
808 set_state_bits(tree, state, bits); 846 err = set_state_bits(tree, state, bits);
847 if (err)
848 goto out;
809 cache_state(state, cached_state); 849 cache_state(state, cached_state);
810 merge_state(tree, state); 850 merge_state(tree, state);
811 if (last_end == (u64)-1) 851 if (last_end == (u64)-1)
@@ -829,11 +869,13 @@ hit_next:
829 this_end = last_start - 1; 869 this_end = last_start - 1;
830 err = insert_state(tree, prealloc, start, this_end, 870 err = insert_state(tree, prealloc, start, this_end,
831 bits); 871 bits);
832 cache_state(prealloc, cached_state);
833 prealloc = NULL;
834 BUG_ON(err == -EEXIST); 872 BUG_ON(err == -EEXIST);
835 if (err) 873 if (err) {
874 prealloc = NULL;
836 goto out; 875 goto out;
876 }
877 cache_state(prealloc, cached_state);
878 prealloc = NULL;
837 start = this_end + 1; 879 start = this_end + 1;
838 goto search_again; 880 goto search_again;
839 } 881 }
@@ -852,7 +894,11 @@ hit_next:
852 err = split_state(tree, state, prealloc, end + 1); 894 err = split_state(tree, state, prealloc, end + 1);
853 BUG_ON(err == -EEXIST); 895 BUG_ON(err == -EEXIST);
854 896
855 set_state_bits(tree, prealloc, bits); 897 err = set_state_bits(tree, prealloc, bits);
898 if (err) {
899 prealloc = NULL;
900 goto out;
901 }
856 cache_state(prealloc, cached_state); 902 cache_state(prealloc, cached_state);
857 merge_state(tree, prealloc); 903 merge_state(tree, prealloc);
858 prealloc = NULL; 904 prealloc = NULL;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 14ed16fd862d..4794ec891fed 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -60,8 +60,13 @@ struct extent_io_ops {
60 struct extent_state *state, int uptodate); 60 struct extent_state *state, int uptodate);
61 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, 61 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
62 unsigned long old, unsigned long bits); 62 unsigned long old, unsigned long bits);
63 int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, 63 int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
64 unsigned long old, unsigned long bits); 64 unsigned long bits);
65 int (*merge_extent_hook)(struct inode *inode,
66 struct extent_state *new,
67 struct extent_state *other);
68 int (*split_extent_hook)(struct inode *inode,
69 struct extent_state *orig, u64 split);
65 int (*write_cache_pages_lock_hook)(struct page *page); 70 int (*write_cache_pages_lock_hook)(struct page *page);
66}; 71};
67 72
@@ -79,10 +84,14 @@ struct extent_state {
79 u64 start; 84 u64 start;
80 u64 end; /* inclusive */ 85 u64 end; /* inclusive */
81 struct rb_node rb_node; 86 struct rb_node rb_node;
87
88 /* ADD NEW ELEMENTS AFTER THIS */
82 struct extent_io_tree *tree; 89 struct extent_io_tree *tree;
83 wait_queue_head_t wq; 90 wait_queue_head_t wq;
84 atomic_t refs; 91 atomic_t refs;
85 unsigned long state; 92 unsigned long state;
93 u64 split_start;
94 u64 split_end;
86 95
87 /* for use by the FS */ 96 /* for use by the FS */
88 u64 private; 97 u64 private;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 571ad3c13b47..f19e1259a971 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -123,7 +123,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
124 124
125 end_of_last_block = start_pos + num_bytes - 1; 125 end_of_last_block = start_pos + num_bytes - 1;
126 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 126 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
127 if (err)
128 return err;
129
127 for (i = 0; i < num_pages; i++) { 130 for (i = 0; i < num_pages; i++) {
128 struct page *p = pages[i]; 131 struct page *p = pages[i];
129 SetPageUptodate(p); 132 SetPageUptodate(p);
@@ -917,21 +920,35 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
917 start_pos = pos; 920 start_pos = pos;
918 921
919 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 922 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
923
924 /* do the reserve before the mutex lock in case we have to do some
925 * flushing. We wouldn't deadlock, but this is more polite.
926 */
927 err = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
928 if (err)
929 goto out_nolock;
930
931 mutex_lock(&inode->i_mutex);
932
920 current->backing_dev_info = inode->i_mapping->backing_dev_info; 933 current->backing_dev_info = inode->i_mapping->backing_dev_info;
921 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 934 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
922 if (err) 935 if (err)
923 goto out_nolock; 936 goto out;
937
924 if (count == 0) 938 if (count == 0)
925 goto out_nolock; 939 goto out;
926 940
927 err = file_remove_suid(file); 941 err = file_remove_suid(file);
928 if (err) 942 if (err)
929 goto out_nolock; 943 goto out;
944
930 file_update_time(file); 945 file_update_time(file);
931 946
932 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 947 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
933 948
934 mutex_lock(&inode->i_mutex); 949 /* generic_write_checks can change our pos */
950 start_pos = pos;
951
935 BTRFS_I(inode)->sequence++; 952 BTRFS_I(inode)->sequence++;
936 first_index = pos >> PAGE_CACHE_SHIFT; 953 first_index = pos >> PAGE_CACHE_SHIFT;
937 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 954 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
@@ -1005,9 +1022,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1005 } 1022 }
1006 1023
1007 if (will_write) { 1024 if (will_write) {
1008 btrfs_fdatawrite_range(inode->i_mapping, pos, 1025 filemap_fdatawrite_range(inode->i_mapping, pos,
1009 pos + write_bytes - 1, 1026 pos + write_bytes - 1);
1010 WB_SYNC_ALL);
1011 } else { 1027 } else {
1012 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1028 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1013 num_pages); 1029 num_pages);
@@ -1028,6 +1044,7 @@ out:
1028 mutex_unlock(&inode->i_mutex); 1044 mutex_unlock(&inode->i_mutex);
1029 if (ret) 1045 if (ret)
1030 err = ret; 1046 err = ret;
1047 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1031 1048
1032out_nolock: 1049out_nolock:
1033 kfree(pages); 1050 kfree(pages);
@@ -1184,7 +1201,7 @@ out:
1184 return ret > 0 ? EIO : ret; 1201 return ret > 0 ? EIO : ret;
1185} 1202}
1186 1203
1187static struct vm_operations_struct btrfs_file_vm_ops = { 1204static const struct vm_operations_struct btrfs_file_vm_ops = {
1188 .fault = filemap_fault, 1205 .fault = filemap_fault,
1189 .page_mkwrite = btrfs_page_mkwrite, 1206 .page_mkwrite = btrfs_page_mkwrite,
1190}; 1207};
@@ -1196,7 +1213,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1196 return 0; 1213 return 0;
1197} 1214}
1198 1215
1199struct file_operations btrfs_file_operations = { 1216const struct file_operations btrfs_file_operations = {
1200 .llseek = generic_file_llseek, 1217 .llseek = generic_file_llseek,
1201 .read = do_sync_read, 1218 .read = do_sync_read,
1202 .aio_read = generic_file_aio_read, 1219 .aio_read = generic_file_aio_read,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e9b76bcd1c12..112e5aa85892 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -62,7 +62,7 @@ static const struct inode_operations btrfs_special_inode_operations;
62static const struct inode_operations btrfs_file_inode_operations; 62static const struct inode_operations btrfs_file_inode_operations;
63static const struct address_space_operations btrfs_aops; 63static const struct address_space_operations btrfs_aops;
64static const struct address_space_operations btrfs_symlink_aops; 64static const struct address_space_operations btrfs_symlink_aops;
65static struct file_operations btrfs_dir_file_operations; 65static const struct file_operations btrfs_dir_file_operations;
66static struct extent_io_ops btrfs_extent_io_ops; 66static struct extent_io_ops btrfs_extent_io_ops;
67 67
68static struct kmem_cache *btrfs_inode_cachep; 68static struct kmem_cache *btrfs_inode_cachep;
@@ -1159,6 +1159,83 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1159 return ret; 1159 return ret;
1160} 1160}
1161 1161
1162static int btrfs_split_extent_hook(struct inode *inode,
1163 struct extent_state *orig, u64 split)
1164{
1165 struct btrfs_root *root = BTRFS_I(inode)->root;
1166 u64 size;
1167
1168 if (!(orig->state & EXTENT_DELALLOC))
1169 return 0;
1170
1171 size = orig->end - orig->start + 1;
1172 if (size > root->fs_info->max_extent) {
1173 u64 num_extents;
1174 u64 new_size;
1175
1176 new_size = orig->end - split + 1;
1177 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1178 root->fs_info->max_extent);
1179
1180 /*
1181 * if we break a large extent up then leave delalloc_extents be,
1182 * since we've already accounted for the large extent.
1183 */
1184 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1185 root->fs_info->max_extent) < num_extents)
1186 return 0;
1187 }
1188
1189 BTRFS_I(inode)->delalloc_extents++;
1190
1191 return 0;
1192}
1193
1194/*
1195 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1196 * extents so we can keep track of new extents that are just merged onto old
1197 * extents, such as when we are doing sequential writes, so we can properly
1198 * account for the metadata space we'll need.
1199 */
1200static int btrfs_merge_extent_hook(struct inode *inode,
1201 struct extent_state *new,
1202 struct extent_state *other)
1203{
1204 struct btrfs_root *root = BTRFS_I(inode)->root;
1205 u64 new_size, old_size;
1206 u64 num_extents;
1207
1208 /* not delalloc, ignore it */
1209 if (!(other->state & EXTENT_DELALLOC))
1210 return 0;
1211
1212 old_size = other->end - other->start + 1;
1213 if (new->start < other->start)
1214 new_size = other->end - new->start + 1;
1215 else
1216 new_size = new->end - other->start + 1;
1217
1218 /* we're not bigger than the max, unreserve the space and go */
1219 if (new_size <= root->fs_info->max_extent) {
1220 BTRFS_I(inode)->delalloc_extents--;
1221 return 0;
1222 }
1223
1224 /*
1225 * If we grew by another max_extent, just return, we want to keep that
1226 * reserved amount.
1227 */
1228 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1229 root->fs_info->max_extent);
1230 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1231 root->fs_info->max_extent) > num_extents)
1232 return 0;
1233
1234 BTRFS_I(inode)->delalloc_extents--;
1235
1236 return 0;
1237}
1238
1162/* 1239/*
1163 * extent_io.c set_bit_hook, used to track delayed allocation 1240 * extent_io.c set_bit_hook, used to track delayed allocation
1164 * bytes in this file, and to maintain the list of inodes that 1241 * bytes in this file, and to maintain the list of inodes that
@@ -1167,6 +1244,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1167static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, 1244static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1168 unsigned long old, unsigned long bits) 1245 unsigned long old, unsigned long bits)
1169{ 1246{
1247
1170 /* 1248 /*
1171 * set_bit and clear bit hooks normally require _irqsave/restore 1249 * set_bit and clear bit hooks normally require _irqsave/restore
1172 * but in this case, we are only testeing for the DELALLOC 1250 * but in this case, we are only testeing for the DELALLOC
@@ -1174,6 +1252,8 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1174 */ 1252 */
1175 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1253 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1176 struct btrfs_root *root = BTRFS_I(inode)->root; 1254 struct btrfs_root *root = BTRFS_I(inode)->root;
1255
1256 BTRFS_I(inode)->delalloc_extents++;
1177 btrfs_delalloc_reserve_space(root, inode, end - start + 1); 1257 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1178 spin_lock(&root->fs_info->delalloc_lock); 1258 spin_lock(&root->fs_info->delalloc_lock);
1179 BTRFS_I(inode)->delalloc_bytes += end - start + 1; 1259 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
@@ -1190,22 +1270,27 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1190/* 1270/*
1191 * extent_io.c clear_bit_hook, see set_bit_hook for why 1271 * extent_io.c clear_bit_hook, see set_bit_hook for why
1192 */ 1272 */
1193static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, 1273static int btrfs_clear_bit_hook(struct inode *inode,
1194 unsigned long old, unsigned long bits) 1274 struct extent_state *state, unsigned long bits)
1195{ 1275{
1196 /* 1276 /*
1197 * set_bit and clear bit hooks normally require _irqsave/restore 1277 * set_bit and clear bit hooks normally require _irqsave/restore
1198 * but in this case, we are only testeing for the DELALLOC 1278 * but in this case, we are only testeing for the DELALLOC
1199 * bit, which is only set or cleared with irqs on 1279 * bit, which is only set or cleared with irqs on
1200 */ 1280 */
1201 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1281 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1202 struct btrfs_root *root = BTRFS_I(inode)->root; 1282 struct btrfs_root *root = BTRFS_I(inode)->root;
1203 1283
1284 BTRFS_I(inode)->delalloc_extents--;
1285 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1286
1204 spin_lock(&root->fs_info->delalloc_lock); 1287 spin_lock(&root->fs_info->delalloc_lock);
1205 if (end - start + 1 > root->fs_info->delalloc_bytes) { 1288 if (state->end - state->start + 1 >
1289 root->fs_info->delalloc_bytes) {
1206 printk(KERN_INFO "btrfs warning: delalloc account " 1290 printk(KERN_INFO "btrfs warning: delalloc account "
1207 "%llu %llu\n", 1291 "%llu %llu\n",
1208 (unsigned long long)end - start + 1, 1292 (unsigned long long)
1293 state->end - state->start + 1,
1209 (unsigned long long) 1294 (unsigned long long)
1210 root->fs_info->delalloc_bytes); 1295 root->fs_info->delalloc_bytes);
1211 btrfs_delalloc_free_space(root, inode, (u64)-1); 1296 btrfs_delalloc_free_space(root, inode, (u64)-1);
@@ -1213,9 +1298,12 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1213 BTRFS_I(inode)->delalloc_bytes = 0; 1298 BTRFS_I(inode)->delalloc_bytes = 0;
1214 } else { 1299 } else {
1215 btrfs_delalloc_free_space(root, inode, 1300 btrfs_delalloc_free_space(root, inode,
1216 end - start + 1); 1301 state->end -
1217 root->fs_info->delalloc_bytes -= end - start + 1; 1302 state->start + 1);
1218 BTRFS_I(inode)->delalloc_bytes -= end - start + 1; 1303 root->fs_info->delalloc_bytes -= state->end -
1304 state->start + 1;
1305 BTRFS_I(inode)->delalloc_bytes -= state->end -
1306 state->start + 1;
1219 } 1307 }
1220 if (BTRFS_I(inode)->delalloc_bytes == 0 && 1308 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1221 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1309 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
@@ -2950,7 +3038,12 @@ again:
2950 goto again; 3038 goto again;
2951 } 3039 }
2952 3040
2953 btrfs_set_extent_delalloc(inode, page_start, page_end); 3041 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3042 if (ret) {
3043 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3044 goto out_unlock;
3045 }
3046
2954 ret = 0; 3047 ret = 0;
2955 if (offset != PAGE_CACHE_SIZE) { 3048 if (offset != PAGE_CACHE_SIZE) {
2956 kaddr = kmap(page); 3049 kaddr = kmap(page);
@@ -2981,15 +3074,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
2981 u64 last_byte; 3074 u64 last_byte;
2982 u64 cur_offset; 3075 u64 cur_offset;
2983 u64 hole_size; 3076 u64 hole_size;
2984 int err; 3077 int err = 0;
2985 3078
2986 if (size <= hole_start) 3079 if (size <= hole_start)
2987 return 0; 3080 return 0;
2988 3081
2989 err = btrfs_check_metadata_free_space(root);
2990 if (err)
2991 return err;
2992
2993 btrfs_truncate_page(inode->i_mapping, inode->i_size); 3082 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2994 3083
2995 while (1) { 3084 while (1) {
@@ -3024,12 +3113,18 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3024 cur_offset, &hint_byte, 1); 3113 cur_offset, &hint_byte, 1);
3025 if (err) 3114 if (err)
3026 break; 3115 break;
3116
3117 err = btrfs_reserve_metadata_space(root, 1);
3118 if (err)
3119 break;
3120
3027 err = btrfs_insert_file_extent(trans, root, 3121 err = btrfs_insert_file_extent(trans, root,
3028 inode->i_ino, cur_offset, 0, 3122 inode->i_ino, cur_offset, 0,
3029 0, hole_size, 0, hole_size, 3123 0, hole_size, 0, hole_size,
3030 0, 0, 0); 3124 0, 0, 0);
3031 btrfs_drop_extent_cache(inode, hole_start, 3125 btrfs_drop_extent_cache(inode, hole_start,
3032 last_byte - 1, 0); 3126 last_byte - 1, 0);
3127 btrfs_unreserve_metadata_space(root, 1);
3033 } 3128 }
3034 free_extent_map(em); 3129 free_extent_map(em);
3035 cur_offset = last_byte; 3130 cur_offset = last_byte;
@@ -3990,11 +4085,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3990 if (!new_valid_dev(rdev)) 4085 if (!new_valid_dev(rdev))
3991 return -EINVAL; 4086 return -EINVAL;
3992 4087
3993 err = btrfs_check_metadata_free_space(root); 4088 /*
4089 * 2 for inode item and ref
4090 * 2 for dir items
4091 * 1 for xattr if selinux is on
4092 */
4093 err = btrfs_reserve_metadata_space(root, 5);
3994 if (err) 4094 if (err)
3995 goto fail; 4095 return err;
3996 4096
3997 trans = btrfs_start_transaction(root, 1); 4097 trans = btrfs_start_transaction(root, 1);
4098 if (!trans)
4099 goto fail;
3998 btrfs_set_trans_block_group(trans, dir); 4100 btrfs_set_trans_block_group(trans, dir);
3999 4101
4000 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4102 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4032,6 +4134,7 @@ out_unlock:
4032 nr = trans->blocks_used; 4134 nr = trans->blocks_used;
4033 btrfs_end_transaction_throttle(trans, root); 4135 btrfs_end_transaction_throttle(trans, root);
4034fail: 4136fail:
4137 btrfs_unreserve_metadata_space(root, 5);
4035 if (drop_inode) { 4138 if (drop_inode) {
4036 inode_dec_link_count(inode); 4139 inode_dec_link_count(inode);
4037 iput(inode); 4140 iput(inode);
@@ -4052,10 +4155,18 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4052 u64 objectid; 4155 u64 objectid;
4053 u64 index = 0; 4156 u64 index = 0;
4054 4157
4055 err = btrfs_check_metadata_free_space(root); 4158 /*
4159 * 2 for inode item and ref
4160 * 2 for dir items
4161 * 1 for xattr if selinux is on
4162 */
4163 err = btrfs_reserve_metadata_space(root, 5);
4056 if (err) 4164 if (err)
4057 goto fail; 4165 return err;
4166
4058 trans = btrfs_start_transaction(root, 1); 4167 trans = btrfs_start_transaction(root, 1);
4168 if (!trans)
4169 goto fail;
4059 btrfs_set_trans_block_group(trans, dir); 4170 btrfs_set_trans_block_group(trans, dir);
4060 4171
4061 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4172 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4096,6 +4207,7 @@ out_unlock:
4096 nr = trans->blocks_used; 4207 nr = trans->blocks_used;
4097 btrfs_end_transaction_throttle(trans, root); 4208 btrfs_end_transaction_throttle(trans, root);
4098fail: 4209fail:
4210 btrfs_unreserve_metadata_space(root, 5);
4099 if (drop_inode) { 4211 if (drop_inode) {
4100 inode_dec_link_count(inode); 4212 inode_dec_link_count(inode);
4101 iput(inode); 4213 iput(inode);
@@ -4118,10 +4230,16 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4118 if (inode->i_nlink == 0) 4230 if (inode->i_nlink == 0)
4119 return -ENOENT; 4231 return -ENOENT;
4120 4232
4121 btrfs_inc_nlink(inode); 4233 /*
4122 err = btrfs_check_metadata_free_space(root); 4234 * 1 item for inode ref
4235 * 2 items for dir items
4236 */
4237 err = btrfs_reserve_metadata_space(root, 3);
4123 if (err) 4238 if (err)
4124 goto fail; 4239 return err;
4240
4241 btrfs_inc_nlink(inode);
4242
4125 err = btrfs_set_inode_index(dir, &index); 4243 err = btrfs_set_inode_index(dir, &index);
4126 if (err) 4244 if (err)
4127 goto fail; 4245 goto fail;
@@ -4145,6 +4263,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4145 nr = trans->blocks_used; 4263 nr = trans->blocks_used;
4146 btrfs_end_transaction_throttle(trans, root); 4264 btrfs_end_transaction_throttle(trans, root);
4147fail: 4265fail:
4266 btrfs_unreserve_metadata_space(root, 3);
4148 if (drop_inode) { 4267 if (drop_inode) {
4149 inode_dec_link_count(inode); 4268 inode_dec_link_count(inode);
4150 iput(inode); 4269 iput(inode);
@@ -4164,17 +4283,21 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4164 u64 index = 0; 4283 u64 index = 0;
4165 unsigned long nr = 1; 4284 unsigned long nr = 1;
4166 4285
4167 err = btrfs_check_metadata_free_space(root); 4286 /*
4287 * 2 items for inode and ref
4288 * 2 items for dir items
4289 * 1 for xattr if selinux is on
4290 */
4291 err = btrfs_reserve_metadata_space(root, 5);
4168 if (err) 4292 if (err)
4169 goto out_unlock; 4293 return err;
4170 4294
4171 trans = btrfs_start_transaction(root, 1); 4295 trans = btrfs_start_transaction(root, 1);
4172 btrfs_set_trans_block_group(trans, dir); 4296 if (!trans) {
4173 4297 err = -ENOMEM;
4174 if (IS_ERR(trans)) {
4175 err = PTR_ERR(trans);
4176 goto out_unlock; 4298 goto out_unlock;
4177 } 4299 }
4300 btrfs_set_trans_block_group(trans, dir);
4178 4301
4179 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4302 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4180 if (err) { 4303 if (err) {
@@ -4223,6 +4346,7 @@ out_fail:
4223 btrfs_end_transaction_throttle(trans, root); 4346 btrfs_end_transaction_throttle(trans, root);
4224 4347
4225out_unlock: 4348out_unlock:
4349 btrfs_unreserve_metadata_space(root, 5);
4226 if (drop_on_err) 4350 if (drop_on_err)
4227 iput(inode); 4351 iput(inode);
4228 btrfs_btree_balance_dirty(root, nr); 4352 btrfs_btree_balance_dirty(root, nr);
@@ -4747,6 +4871,13 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4747 goto out; 4871 goto out;
4748 } 4872 }
4749 4873
4874 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
4875 if (ret) {
4876 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4877 ret = VM_FAULT_SIGBUS;
4878 goto out;
4879 }
4880
4750 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 4881 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4751again: 4882again:
4752 lock_page(page); 4883 lock_page(page);
@@ -4778,7 +4909,23 @@ again:
4778 goto again; 4909 goto again;
4779 } 4910 }
4780 4911
4781 btrfs_set_extent_delalloc(inode, page_start, page_end); 4912 /*
4913 * XXX - page_mkwrite gets called every time the page is dirtied, even
4914 * if it was already dirty, so for space accounting reasons we need to
4915 * clear any delalloc bits for the range we are fixing to save. There
4916 * is probably a better way to do this, but for now keep consistent with
4917 * prepare_pages in the normal write path.
4918 */
4919 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
4920 EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS);
4921
4922 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
4923 if (ret) {
4924 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4925 ret = VM_FAULT_SIGBUS;
4926 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4927 goto out_unlock;
4928 }
4782 ret = 0; 4929 ret = 0;
4783 4930
4784 /* page is wholly or partially inside EOF */ 4931 /* page is wholly or partially inside EOF */
@@ -4801,6 +4948,7 @@ again:
4801 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 4948 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4802 4949
4803out_unlock: 4950out_unlock:
4951 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
4804 if (!ret) 4952 if (!ret)
4805 return VM_FAULT_LOCKED; 4953 return VM_FAULT_LOCKED;
4806 unlock_page(page); 4954 unlock_page(page);
@@ -4917,6 +5065,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
4917 return NULL; 5065 return NULL;
4918 ei->last_trans = 0; 5066 ei->last_trans = 0;
4919 ei->logged_trans = 0; 5067 ei->logged_trans = 0;
5068 ei->delalloc_extents = 0;
5069 ei->delalloc_reserved_extents = 0;
4920 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5070 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4921 INIT_LIST_HEAD(&ei->i_orphan); 5071 INIT_LIST_HEAD(&ei->i_orphan);
4922 INIT_LIST_HEAD(&ei->ordered_operations); 5072 INIT_LIST_HEAD(&ei->ordered_operations);
@@ -5070,7 +5220,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5070 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 5220 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5071 return -ENOTEMPTY; 5221 return -ENOTEMPTY;
5072 5222
5073 ret = btrfs_check_metadata_free_space(root); 5223 /*
5224 * 2 items for dir items
5225 * 1 item for orphan entry
5226 * 1 item for ref
5227 */
5228 ret = btrfs_reserve_metadata_space(root, 4);
5074 if (ret) 5229 if (ret)
5075 return ret; 5230 return ret;
5076 5231
@@ -5185,6 +5340,8 @@ out_fail:
5185 5340
5186 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 5341 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5187 up_read(&root->fs_info->subvol_sem); 5342 up_read(&root->fs_info->subvol_sem);
5343
5344 btrfs_unreserve_metadata_space(root, 4);
5188 return ret; 5345 return ret;
5189} 5346}
5190 5347
@@ -5256,11 +5413,18 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5256 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 5413 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5257 return -ENAMETOOLONG; 5414 return -ENAMETOOLONG;
5258 5415
5259 err = btrfs_check_metadata_free_space(root); 5416 /*
5417 * 2 items for inode item and ref
5418 * 2 items for dir items
5419 * 1 item for xattr if selinux is on
5420 */
5421 err = btrfs_reserve_metadata_space(root, 5);
5260 if (err) 5422 if (err)
5261 goto out_fail; 5423 return err;
5262 5424
5263 trans = btrfs_start_transaction(root, 1); 5425 trans = btrfs_start_transaction(root, 1);
5426 if (!trans)
5427 goto out_fail;
5264 btrfs_set_trans_block_group(trans, dir); 5428 btrfs_set_trans_block_group(trans, dir);
5265 5429
5266 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 5430 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -5341,6 +5505,7 @@ out_unlock:
5341 nr = trans->blocks_used; 5505 nr = trans->blocks_used;
5342 btrfs_end_transaction_throttle(trans, root); 5506 btrfs_end_transaction_throttle(trans, root);
5343out_fail: 5507out_fail:
5508 btrfs_unreserve_metadata_space(root, 5);
5344 if (drop_inode) { 5509 if (drop_inode) {
5345 inode_dec_link_count(inode); 5510 inode_dec_link_count(inode);
5346 iput(inode); 5511 iput(inode);
@@ -5362,6 +5527,11 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
5362 5527
5363 while (num_bytes > 0) { 5528 while (num_bytes > 0) {
5364 alloc_size = min(num_bytes, root->fs_info->max_extent); 5529 alloc_size = min(num_bytes, root->fs_info->max_extent);
5530
5531 ret = btrfs_reserve_metadata_space(root, 1);
5532 if (ret)
5533 goto out;
5534
5365 ret = btrfs_reserve_extent(trans, root, alloc_size, 5535 ret = btrfs_reserve_extent(trans, root, alloc_size,
5366 root->sectorsize, 0, alloc_hint, 5536 root->sectorsize, 0, alloc_hint,
5367 (u64)-1, &ins, 1); 5537 (u64)-1, &ins, 1);
@@ -5381,6 +5551,7 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
5381 num_bytes -= ins.offset; 5551 num_bytes -= ins.offset;
5382 cur_offset += ins.offset; 5552 cur_offset += ins.offset;
5383 alloc_hint = ins.objectid + ins.offset; 5553 alloc_hint = ins.objectid + ins.offset;
5554 btrfs_unreserve_metadata_space(root, 1);
5384 } 5555 }
5385out: 5556out:
5386 if (cur_offset > start) { 5557 if (cur_offset > start) {
@@ -5544,7 +5715,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
5544 .permission = btrfs_permission, 5715 .permission = btrfs_permission,
5545}; 5716};
5546 5717
5547static struct file_operations btrfs_dir_file_operations = { 5718static const struct file_operations btrfs_dir_file_operations = {
5548 .llseek = generic_file_llseek, 5719 .llseek = generic_file_llseek,
5549 .read = generic_read_dir, 5720 .read = generic_read_dir,
5550 .readdir = btrfs_real_readdir, 5721 .readdir = btrfs_real_readdir,
@@ -5566,6 +5737,8 @@ static struct extent_io_ops btrfs_extent_io_ops = {
5566 .readpage_io_failed_hook = btrfs_io_failed_hook, 5737 .readpage_io_failed_hook = btrfs_io_failed_hook,
5567 .set_bit_hook = btrfs_set_bit_hook, 5738 .set_bit_hook = btrfs_set_bit_hook,
5568 .clear_bit_hook = btrfs_clear_bit_hook, 5739 .clear_bit_hook = btrfs_clear_bit_hook,
5740 .merge_extent_hook = btrfs_merge_extent_hook,
5741 .split_extent_hook = btrfs_split_extent_hook,
5569}; 5742};
5570 5743
5571/* 5744/*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a8577a7f26ab..9a780c8d0ac8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -239,7 +239,13 @@ static noinline int create_subvol(struct btrfs_root *root,
239 u64 index = 0; 239 u64 index = 0;
240 unsigned long nr = 1; 240 unsigned long nr = 1;
241 241
242 ret = btrfs_check_metadata_free_space(root); 242 /*
243 * 1 - inode item
244 * 2 - refs
245 * 1 - root item
246 * 2 - dir items
247 */
248 ret = btrfs_reserve_metadata_space(root, 6);
243 if (ret) 249 if (ret)
244 return ret; 250 return ret;
245 251
@@ -340,6 +346,9 @@ fail:
340 err = btrfs_commit_transaction(trans, root); 346 err = btrfs_commit_transaction(trans, root);
341 if (err && !ret) 347 if (err && !ret)
342 ret = err; 348 ret = err;
349
350 btrfs_unreserve_metadata_space(root, 6);
351 btrfs_btree_balance_dirty(root, nr);
343 return ret; 352 return ret;
344} 353}
345 354
@@ -355,19 +364,27 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
355 if (!root->ref_cows) 364 if (!root->ref_cows)
356 return -EINVAL; 365 return -EINVAL;
357 366
358 ret = btrfs_check_metadata_free_space(root); 367 /*
368 * 1 - inode item
369 * 2 - refs
370 * 1 - root item
371 * 2 - dir items
372 */
373 ret = btrfs_reserve_metadata_space(root, 6);
359 if (ret) 374 if (ret)
360 goto fail_unlock; 375 goto fail_unlock;
361 376
362 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 377 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
363 if (!pending_snapshot) { 378 if (!pending_snapshot) {
364 ret = -ENOMEM; 379 ret = -ENOMEM;
380 btrfs_unreserve_metadata_space(root, 6);
365 goto fail_unlock; 381 goto fail_unlock;
366 } 382 }
367 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS); 383 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
368 if (!pending_snapshot->name) { 384 if (!pending_snapshot->name) {
369 ret = -ENOMEM; 385 ret = -ENOMEM;
370 kfree(pending_snapshot); 386 kfree(pending_snapshot);
387 btrfs_unreserve_metadata_space(root, 6);
371 goto fail_unlock; 388 goto fail_unlock;
372 } 389 }
373 memcpy(pending_snapshot->name, name, namelen); 390 memcpy(pending_snapshot->name, name, namelen);
@@ -1215,15 +1232,15 @@ static long btrfs_ioctl_trans_start(struct file *file)
1215 struct inode *inode = fdentry(file)->d_inode; 1232 struct inode *inode = fdentry(file)->d_inode;
1216 struct btrfs_root *root = BTRFS_I(inode)->root; 1233 struct btrfs_root *root = BTRFS_I(inode)->root;
1217 struct btrfs_trans_handle *trans; 1234 struct btrfs_trans_handle *trans;
1218 int ret = 0; 1235 int ret;
1219 1236
1237 ret = -EPERM;
1220 if (!capable(CAP_SYS_ADMIN)) 1238 if (!capable(CAP_SYS_ADMIN))
1221 return -EPERM; 1239 goto out;
1222 1240
1223 if (file->private_data) { 1241 ret = -EINPROGRESS;
1224 ret = -EINPROGRESS; 1242 if (file->private_data)
1225 goto out; 1243 goto out;
1226 }
1227 1244
1228 ret = mnt_want_write(file->f_path.mnt); 1245 ret = mnt_want_write(file->f_path.mnt);
1229 if (ret) 1246 if (ret)
@@ -1233,12 +1250,19 @@ static long btrfs_ioctl_trans_start(struct file *file)
1233 root->fs_info->open_ioctl_trans++; 1250 root->fs_info->open_ioctl_trans++;
1234 mutex_unlock(&root->fs_info->trans_mutex); 1251 mutex_unlock(&root->fs_info->trans_mutex);
1235 1252
1253 ret = -ENOMEM;
1236 trans = btrfs_start_ioctl_transaction(root, 0); 1254 trans = btrfs_start_ioctl_transaction(root, 0);
1237 if (trans) 1255 if (!trans)
1238 file->private_data = trans; 1256 goto out_drop;
1239 else 1257
1240 ret = -ENOMEM; 1258 file->private_data = trans;
1241 /*printk(KERN_INFO "btrfs_ioctl_trans_start on %p\n", file);*/ 1259 return 0;
1260
1261out_drop:
1262 mutex_lock(&root->fs_info->trans_mutex);
1263 root->fs_info->open_ioctl_trans--;
1264 mutex_unlock(&root->fs_info->trans_mutex);
1265 mnt_drop_write(file->f_path.mnt);
1242out: 1266out:
1243 return ret; 1267 return ret;
1244} 1268}
@@ -1254,24 +1278,20 @@ long btrfs_ioctl_trans_end(struct file *file)
1254 struct inode *inode = fdentry(file)->d_inode; 1278 struct inode *inode = fdentry(file)->d_inode;
1255 struct btrfs_root *root = BTRFS_I(inode)->root; 1279 struct btrfs_root *root = BTRFS_I(inode)->root;
1256 struct btrfs_trans_handle *trans; 1280 struct btrfs_trans_handle *trans;
1257 int ret = 0;
1258 1281
1259 trans = file->private_data; 1282 trans = file->private_data;
1260 if (!trans) { 1283 if (!trans)
1261 ret = -EINVAL; 1284 return -EINVAL;
1262 goto out;
1263 }
1264 btrfs_end_transaction(trans, root);
1265 file->private_data = NULL; 1285 file->private_data = NULL;
1266 1286
1287 btrfs_end_transaction(trans, root);
1288
1267 mutex_lock(&root->fs_info->trans_mutex); 1289 mutex_lock(&root->fs_info->trans_mutex);
1268 root->fs_info->open_ioctl_trans--; 1290 root->fs_info->open_ioctl_trans--;
1269 mutex_unlock(&root->fs_info->trans_mutex); 1291 mutex_unlock(&root->fs_info->trans_mutex);
1270 1292
1271 mnt_drop_write(file->f_path.mnt); 1293 mnt_drop_write(file->f_path.mnt);
1272 1294 return 0;
1273out:
1274 return ret;
1275} 1295}
1276 1296
1277long btrfs_ioctl(struct file *file, unsigned int 1297long btrfs_ioctl(struct file *file, unsigned int
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b5d6d24726b0..897fba835f89 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -458,7 +458,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
458 * start IO on any dirty ones so the wait doesn't stall waiting 458 * start IO on any dirty ones so the wait doesn't stall waiting
459 * for pdflush to find them 459 * for pdflush to find them
460 */ 460 */
461 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); 461 filemap_fdatawrite_range(inode->i_mapping, start, end);
462 if (wait) { 462 if (wait) {
463 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 463 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
464 &entry->flags)); 464 &entry->flags));
@@ -488,17 +488,15 @@ again:
488 /* start IO across the range first to instantiate any delalloc 488 /* start IO across the range first to instantiate any delalloc
489 * extents 489 * extents
490 */ 490 */
491 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); 491 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
492 492
493 /* The compression code will leave pages locked but return from 493 /* The compression code will leave pages locked but return from
494 * writepage without setting the page writeback. Starting again 494 * writepage without setting the page writeback. Starting again
495 * with WB_SYNC_ALL will end up waiting for the IO to actually start. 495 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
496 */ 496 */
497 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); 497 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
498 498
499 btrfs_wait_on_page_writeback_range(inode->i_mapping, 499 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
500 start >> PAGE_CACHE_SHIFT,
501 orig_end >> PAGE_CACHE_SHIFT);
502 500
503 end = orig_end; 501 end = orig_end;
504 found = 0; 502 found = 0;
@@ -716,89 +714,6 @@ out:
716} 714}
717 715
718 716
719/**
720 * taken from mm/filemap.c because it isn't exported
721 *
722 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
723 * @mapping: address space structure to write
724 * @start: offset in bytes where the range starts
725 * @end: offset in bytes where the range ends (inclusive)
726 * @sync_mode: enable synchronous operation
727 *
728 * Start writeback against all of a mapping's dirty pages that lie
729 * within the byte offsets <start, end> inclusive.
730 *
731 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
732 * opposed to a regular memory cleansing writeback. The difference between
733 * these two operations is that if a dirty page/buffer is encountered, it must
734 * be waited upon, and not just skipped over.
735 */
736int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
737 loff_t end, int sync_mode)
738{
739 struct writeback_control wbc = {
740 .sync_mode = sync_mode,
741 .nr_to_write = mapping->nrpages * 2,
742 .range_start = start,
743 .range_end = end,
744 };
745 return btrfs_writepages(mapping, &wbc);
746}
747
748/**
749 * taken from mm/filemap.c because it isn't exported
750 *
751 * wait_on_page_writeback_range - wait for writeback to complete
752 * @mapping: target address_space
753 * @start: beginning page index
754 * @end: ending page index
755 *
756 * Wait for writeback to complete against pages indexed by start->end
757 * inclusive
758 */
759int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
760 pgoff_t start, pgoff_t end)
761{
762 struct pagevec pvec;
763 int nr_pages;
764 int ret = 0;
765 pgoff_t index;
766
767 if (end < start)
768 return 0;
769
770 pagevec_init(&pvec, 0);
771 index = start;
772 while ((index <= end) &&
773 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
774 PAGECACHE_TAG_WRITEBACK,
775 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
776 unsigned i;
777
778 for (i = 0; i < nr_pages; i++) {
779 struct page *page = pvec.pages[i];
780
781 /* until radix tree lookup accepts end_index */
782 if (page->index > end)
783 continue;
784
785 wait_on_page_writeback(page);
786 if (PageError(page))
787 ret = -EIO;
788 }
789 pagevec_release(&pvec);
790 cond_resched();
791 }
792
793 /* Check for outstanding write errors */
794 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
795 ret = -ENOSPC;
796 if (test_and_clear_bit(AS_EIO, &mapping->flags))
797 ret = -EIO;
798
799 return ret;
800}
801
802/* 717/*
803 * add a given inode to the list of inodes that must be fully on 718 * add a given inode to the list of inodes that must be fully on
804 * disk before a transaction commit finishes. 719 * disk before a transaction commit finishes.
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 993a7ea45c70..f82e87488ca8 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
153int btrfs_ordered_update_i_size(struct inode *inode, 153int btrfs_ordered_update_i_size(struct inode *inode,
154 struct btrfs_ordered_extent *ordered); 154 struct btrfs_ordered_extent *ordered);
155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
156int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
157 pgoff_t start, pgoff_t end);
158int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
159 loff_t end, int sync_mode);
160int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); 156int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
161int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); 157int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
162int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 158int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 67035385444c..9de9b2236419 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -344,7 +344,9 @@ static int btrfs_fill_super(struct super_block *sb,
344 sb->s_export_op = &btrfs_export_ops; 344 sb->s_export_op = &btrfs_export_ops;
345 sb->s_xattr = btrfs_xattr_handlers; 345 sb->s_xattr = btrfs_xattr_handlers;
346 sb->s_time_gran = 1; 346 sb->s_time_gran = 1;
347#ifdef CONFIG_BTRFS_POSIX_ACL
347 sb->s_flags |= MS_POSIXACL; 348 sb->s_flags |= MS_POSIXACL;
349#endif
348 350
349 tree_root = open_ctree(sb, fs_devices, (char *)data); 351 tree_root = open_ctree(sb, fs_devices, (char *)data);
350 352
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 88f866f85e7a..0b8f36d4400a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -186,6 +186,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
186 h->alloc_exclude_start = 0; 186 h->alloc_exclude_start = 0;
187 h->delayed_ref_updates = 0; 187 h->delayed_ref_updates = 0;
188 188
189 if (!current->journal_info)
190 current->journal_info = h;
191
189 root->fs_info->running_transaction->use_count++; 192 root->fs_info->running_transaction->use_count++;
190 record_root_in_trans(h, root); 193 record_root_in_trans(h, root);
191 mutex_unlock(&root->fs_info->trans_mutex); 194 mutex_unlock(&root->fs_info->trans_mutex);
@@ -317,6 +320,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
317 wake_up(&cur_trans->writer_wait); 320 wake_up(&cur_trans->writer_wait);
318 put_transaction(cur_trans); 321 put_transaction(cur_trans);
319 mutex_unlock(&info->trans_mutex); 322 mutex_unlock(&info->trans_mutex);
323
324 if (current->journal_info == trans)
325 current->journal_info = NULL;
320 memset(trans, 0, sizeof(*trans)); 326 memset(trans, 0, sizeof(*trans));
321 kmem_cache_free(btrfs_trans_handle_cachep, trans); 327 kmem_cache_free(btrfs_trans_handle_cachep, trans);
322 328
@@ -743,6 +749,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
743 memcpy(&pending->root_key, &key, sizeof(key)); 749 memcpy(&pending->root_key, &key, sizeof(key));
744fail: 750fail:
745 kfree(new_root_item); 751 kfree(new_root_item);
752 btrfs_unreserve_metadata_space(root, 6);
746 return ret; 753 return ret;
747} 754}
748 755
@@ -1059,6 +1066,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1059 1066
1060 mutex_unlock(&root->fs_info->trans_mutex); 1067 mutex_unlock(&root->fs_info->trans_mutex);
1061 1068
1069 if (current->journal_info == trans)
1070 current->journal_info = NULL;
1071
1062 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1072 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1063 return ret; 1073 return ret;
1064} 1074}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 23e7d36ff325..7eda483d7b5a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -446,8 +446,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
446 goto error; 446 goto error;
447 447
448 device->name = kstrdup(orig_dev->name, GFP_NOFS); 448 device->name = kstrdup(orig_dev->name, GFP_NOFS);
449 if (!device->name) 449 if (!device->name) {
450 kfree(device);
450 goto error; 451 goto error;
452 }
451 453
452 device->devid = orig_dev->devid; 454 device->devid = orig_dev->devid;
453 device->work.func = pending_bios_fn; 455 device->work.func = pending_bios_fn;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a9d3bf4d2689..b0fc93f95fd0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -260,7 +260,7 @@ err:
260 * attributes are handled directly. 260 * attributes are handled directly.
261 */ 261 */
262struct xattr_handler *btrfs_xattr_handlers[] = { 262struct xattr_handler *btrfs_xattr_handlers[] = {
263#ifdef CONFIG_FS_POSIX_ACL 263#ifdef CONFIG_BTRFS_POSIX_ACL
264 &btrfs_xattr_acl_access_handler, 264 &btrfs_xattr_acl_access_handler,
265 &btrfs_xattr_acl_default_handler, 265 &btrfs_xattr_acl_default_handler,
266#endif 266#endif
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e227eea23f05..984ca0cb38c3 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -65,6 +65,12 @@ typedef __u32 ext4_lblk_t;
65/* data type for block group number */ 65/* data type for block group number */
66typedef unsigned int ext4_group_t; 66typedef unsigned int ext4_group_t;
67 67
68/*
69 * Flags used in mballoc's allocation_context flags field.
70 *
71 * Also used to show what's going on for debugging purposes when the
72 * flag field is exported via the traceport interface
73 */
68 74
69/* prefer goal again. length */ 75/* prefer goal again. length */
70#define EXT4_MB_HINT_MERGE 0x0001 76#define EXT4_MB_HINT_MERGE 0x0001
@@ -127,6 +133,16 @@ struct mpage_da_data {
127 int pages_written; 133 int pages_written;
128 int retval; 134 int retval;
129}; 135};
136#define DIO_AIO_UNWRITTEN 0x1
137typedef struct ext4_io_end {
138 struct list_head list; /* per-file finished AIO list */
139 struct inode *inode; /* file being written to */
140 unsigned int flag; /* unwritten or not */
141 int error; /* I/O error code */
142 ext4_lblk_t offset; /* offset in the file */
143 size_t size; /* size of the extent */
144 struct work_struct work; /* data work queue */
145} ext4_io_end_t;
130 146
131/* 147/*
132 * Special inodes numbers 148 * Special inodes numbers
@@ -347,7 +363,16 @@ struct ext4_new_group_data {
347 /* Call ext4_da_update_reserve_space() after successfully 363 /* Call ext4_da_update_reserve_space() after successfully
348 allocating the blocks */ 364 allocating the blocks */
349#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008 365#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008
350 366 /* caller is from the direct IO path, request to creation of an
367 unitialized extents if not allocated, split the uninitialized
368 extent if blocks has been preallocated already*/
369#define EXT4_GET_BLOCKS_DIO 0x0010
370#define EXT4_GET_BLOCKS_CONVERT 0x0020
371#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
372 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
373 /* Convert extent to initialized after direct IO complete */
374#define EXT4_GET_BLOCKS_DIO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
375 EXT4_GET_BLOCKS_DIO_CREATE_EXT)
351 376
352/* 377/*
353 * ioctl commands 378 * ioctl commands
@@ -500,8 +525,8 @@ struct move_extent {
500static inline __le32 ext4_encode_extra_time(struct timespec *time) 525static inline __le32 ext4_encode_extra_time(struct timespec *time)
501{ 526{
502 return cpu_to_le32((sizeof(time->tv_sec) > 4 ? 527 return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
503 time->tv_sec >> 32 : 0) | 528 (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
504 ((time->tv_nsec << 2) & EXT4_NSEC_MASK)); 529 ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
505} 530}
506 531
507static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) 532static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
@@ -509,7 +534,7 @@ static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
509 if (sizeof(time->tv_sec) > 4) 534 if (sizeof(time->tv_sec) > 4)
510 time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) 535 time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
511 << 32; 536 << 32;
512 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> 2; 537 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
513} 538}
514 539
515#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ 540#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
@@ -672,6 +697,11 @@ struct ext4_inode_info {
672 __u16 i_extra_isize; 697 __u16 i_extra_isize;
673 698
674 spinlock_t i_block_reservation_lock; 699 spinlock_t i_block_reservation_lock;
700
701 /* completed async DIOs that might need unwritten extents handling */
702 struct list_head i_aio_dio_complete_list;
703 /* current io_end structure for async DIO write*/
704 ext4_io_end_t *cur_aio_dio;
675}; 705};
676 706
677/* 707/*
@@ -942,18 +972,11 @@ struct ext4_sb_info {
942 unsigned int s_mb_stats; 972 unsigned int s_mb_stats;
943 unsigned int s_mb_order2_reqs; 973 unsigned int s_mb_order2_reqs;
944 unsigned int s_mb_group_prealloc; 974 unsigned int s_mb_group_prealloc;
975 unsigned int s_max_writeback_mb_bump;
945 /* where last allocation was done - for stream allocation */ 976 /* where last allocation was done - for stream allocation */
946 unsigned long s_mb_last_group; 977 unsigned long s_mb_last_group;
947 unsigned long s_mb_last_start; 978 unsigned long s_mb_last_start;
948 979
949 /* history to debug policy */
950 struct ext4_mb_history *s_mb_history;
951 int s_mb_history_cur;
952 int s_mb_history_max;
953 int s_mb_history_num;
954 spinlock_t s_mb_history_lock;
955 int s_mb_history_filter;
956
957 /* stats for buddy allocator */ 980 /* stats for buddy allocator */
958 spinlock_t s_mb_pa_lock; 981 spinlock_t s_mb_pa_lock;
959 atomic_t s_bal_reqs; /* number of reqs with len > 1 */ 982 atomic_t s_bal_reqs; /* number of reqs with len > 1 */
@@ -980,6 +1003,9 @@ struct ext4_sb_info {
980 1003
981 unsigned int s_log_groups_per_flex; 1004 unsigned int s_log_groups_per_flex;
982 struct flex_groups *s_flex_groups; 1005 struct flex_groups *s_flex_groups;
1006
1007 /* workqueue for dio unwritten */
1008 struct workqueue_struct *dio_unwritten_wq;
983}; 1009};
984 1010
985static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) 1011static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1397,7 +1423,7 @@ extern int ext4_block_truncate_page(handle_t *handle,
1397 struct address_space *mapping, loff_t from); 1423 struct address_space *mapping, loff_t from);
1398extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1424extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1399extern qsize_t ext4_get_reserved_space(struct inode *inode); 1425extern qsize_t ext4_get_reserved_space(struct inode *inode);
1400 1426extern int flush_aio_dio_completed_IO(struct inode *inode);
1401/* ioctl.c */ 1427/* ioctl.c */
1402extern long ext4_ioctl(struct file *, unsigned int, unsigned long); 1428extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
1403extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); 1429extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -1699,6 +1725,8 @@ extern void ext4_ext_init(struct super_block *);
1699extern void ext4_ext_release(struct super_block *); 1725extern void ext4_ext_release(struct super_block *);
1700extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, 1726extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
1701 loff_t len); 1727 loff_t len);
1728extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
1729 loff_t len);
1702extern int ext4_get_blocks(handle_t *handle, struct inode *inode, 1730extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
1703 sector_t block, unsigned int max_blocks, 1731 sector_t block, unsigned int max_blocks,
1704 struct buffer_head *bh, int flags); 1732 struct buffer_head *bh, int flags);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 61652f1d15e6..2ca686454e87 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -220,6 +220,11 @@ static inline int ext4_ext_get_actual_len(struct ext4_extent *ext)
220 (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN)); 220 (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN));
221} 221}
222 222
223static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
224{
225 ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
226}
227
223extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks); 228extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
224extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex); 229extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
225extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *); 230extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
@@ -235,7 +240,7 @@ extern int ext4_ext_try_to_merge(struct inode *inode,
235 struct ext4_ext_path *path, 240 struct ext4_ext_path *path,
236 struct ext4_extent *); 241 struct ext4_extent *);
237extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *); 242extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
238extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); 243extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int);
239extern int ext4_ext_walk_space(struct inode *, ext4_lblk_t, ext4_lblk_t, 244extern int ext4_ext_walk_space(struct inode *, ext4_lblk_t, ext4_lblk_t,
240 ext_prepare_callback, void *); 245 ext_prepare_callback, void *);
241extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t, 246extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 139fb8cb87e4..a2865980342f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -161,11 +161,13 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
161handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks); 161handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
162int __ext4_journal_stop(const char *where, handle_t *handle); 162int __ext4_journal_stop(const char *where, handle_t *handle);
163 163
164#define EXT4_NOJOURNAL_HANDLE ((handle_t *) 0x1) 164#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
165 165
166/* Note: Do not use this for NULL handles. This is only to determine if
167 * a properly allocated handle is using a journal or not. */
166static inline int ext4_handle_valid(handle_t *handle) 168static inline int ext4_handle_valid(handle_t *handle)
167{ 169{
168 if (handle == EXT4_NOJOURNAL_HANDLE) 170 if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT)
169 return 0; 171 return 0;
170 return 1; 172 return 1;
171} 173}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7a3832577923..10539e364283 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -723,7 +723,7 @@ err:
723 * insert new index [@logical;@ptr] into the block at @curp; 723 * insert new index [@logical;@ptr] into the block at @curp;
724 * check where to insert: before @curp or after @curp 724 * check where to insert: before @curp or after @curp
725 */ 725 */
726static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 726int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
727 struct ext4_ext_path *curp, 727 struct ext4_ext_path *curp,
728 int logical, ext4_fsblk_t ptr) 728 int logical, ext4_fsblk_t ptr)
729{ 729{
@@ -1586,7 +1586,7 @@ out:
1586 */ 1586 */
1587int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1587int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1588 struct ext4_ext_path *path, 1588 struct ext4_ext_path *path,
1589 struct ext4_extent *newext) 1589 struct ext4_extent *newext, int flag)
1590{ 1590{
1591 struct ext4_extent_header *eh; 1591 struct ext4_extent_header *eh;
1592 struct ext4_extent *ex, *fex; 1592 struct ext4_extent *ex, *fex;
@@ -1602,7 +1602,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1602 BUG_ON(path[depth].p_hdr == NULL); 1602 BUG_ON(path[depth].p_hdr == NULL);
1603 1603
1604 /* try to insert block into found extent and return */ 1604 /* try to insert block into found extent and return */
1605 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) { 1605 if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
1606 && ext4_can_extents_be_merged(inode, ex, newext)) {
1606 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", 1607 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1607 ext4_ext_is_uninitialized(newext), 1608 ext4_ext_is_uninitialized(newext),
1608 ext4_ext_get_actual_len(newext), 1609 ext4_ext_get_actual_len(newext),
@@ -1722,7 +1723,8 @@ has_space:
1722 1723
1723merge: 1724merge:
1724 /* try to merge extents to the right */ 1725 /* try to merge extents to the right */
1725 ext4_ext_try_to_merge(inode, path, nearex); 1726 if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
1727 ext4_ext_try_to_merge(inode, path, nearex);
1726 1728
1727 /* try to merge extents to the left */ 1729 /* try to merge extents to the left */
1728 1730
@@ -2378,6 +2380,7 @@ void ext4_ext_init(struct super_block *sb)
2378 */ 2380 */
2379 2381
2380 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 2382 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2383#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2381 printk(KERN_INFO "EXT4-fs: file extents enabled"); 2384 printk(KERN_INFO "EXT4-fs: file extents enabled");
2382#ifdef AGGRESSIVE_TEST 2385#ifdef AGGRESSIVE_TEST
2383 printk(", aggressive tests"); 2386 printk(", aggressive tests");
@@ -2389,6 +2392,7 @@ void ext4_ext_init(struct super_block *sb)
2389 printk(", stats"); 2392 printk(", stats");
2390#endif 2393#endif
2391 printk("\n"); 2394 printk("\n");
2395#endif
2392#ifdef EXTENTS_STATS 2396#ifdef EXTENTS_STATS
2393 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2397 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2394 EXT4_SB(sb)->s_ext_min = 1 << 30; 2398 EXT4_SB(sb)->s_ext_min = 1 << 30;
@@ -2490,7 +2494,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2490} 2494}
2491 2495
2492#define EXT4_EXT_ZERO_LEN 7 2496#define EXT4_EXT_ZERO_LEN 7
2493
2494/* 2497/*
2495 * This function is called by ext4_ext_get_blocks() if someone tries to write 2498 * This function is called by ext4_ext_get_blocks() if someone tries to write
2496 * to an uninitialized extent. It may result in splitting the uninitialized 2499 * to an uninitialized extent. It may result in splitting the uninitialized
@@ -2583,7 +2586,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2583 ex3->ee_block = cpu_to_le32(iblock); 2586 ex3->ee_block = cpu_to_le32(iblock);
2584 ext4_ext_store_pblock(ex3, newblock); 2587 ext4_ext_store_pblock(ex3, newblock);
2585 ex3->ee_len = cpu_to_le16(allocated); 2588 ex3->ee_len = cpu_to_le16(allocated);
2586 err = ext4_ext_insert_extent(handle, inode, path, ex3); 2589 err = ext4_ext_insert_extent(handle, inode, path,
2590 ex3, 0);
2587 if (err == -ENOSPC) { 2591 if (err == -ENOSPC) {
2588 err = ext4_ext_zeroout(inode, &orig_ex); 2592 err = ext4_ext_zeroout(inode, &orig_ex);
2589 if (err) 2593 if (err)
@@ -2639,7 +2643,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2639 ext4_ext_store_pblock(ex3, newblock + max_blocks); 2643 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2640 ex3->ee_len = cpu_to_le16(allocated - max_blocks); 2644 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2641 ext4_ext_mark_uninitialized(ex3); 2645 ext4_ext_mark_uninitialized(ex3);
2642 err = ext4_ext_insert_extent(handle, inode, path, ex3); 2646 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2643 if (err == -ENOSPC) { 2647 if (err == -ENOSPC) {
2644 err = ext4_ext_zeroout(inode, &orig_ex); 2648 err = ext4_ext_zeroout(inode, &orig_ex);
2645 if (err) 2649 if (err)
@@ -2757,7 +2761,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2757 err = ext4_ext_dirty(handle, inode, path + depth); 2761 err = ext4_ext_dirty(handle, inode, path + depth);
2758 goto out; 2762 goto out;
2759insert: 2763insert:
2760 err = ext4_ext_insert_extent(handle, inode, path, &newex); 2764 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2761 if (err == -ENOSPC) { 2765 if (err == -ENOSPC) {
2762 err = ext4_ext_zeroout(inode, &orig_ex); 2766 err = ext4_ext_zeroout(inode, &orig_ex);
2763 if (err) 2767 if (err)
@@ -2785,6 +2789,324 @@ fix_extent_len:
2785} 2789}
2786 2790
2787/* 2791/*
2792 * This function is called by ext4_ext_get_blocks() from
2793 * ext4_get_blocks_dio_write() when DIO to write
2794 * to an uninitialized extent.
2795 *
2796 * Writing to an uninitized extent may result in splitting the uninitialized
2797 * extent into multiple /intialized unintialized extents (up to three)
2798 * There are three possibilities:
2799 * a> There is no split required: Entire extent should be uninitialized
2800 * b> Splits in two extents: Write is happening at either end of the extent
2801 * c> Splits in three extents: Somone is writing in middle of the extent
2802 *
2803 * One of more index blocks maybe needed if the extent tree grow after
2804 * the unintialized extent split. To prevent ENOSPC occur at the IO
2805 * complete, we need to split the uninitialized extent before DIO submit
2806 * the IO. The uninitilized extent called at this time will be split
2807 * into three uninitialized extent(at most). After IO complete, the part
2808 * being filled will be convert to initialized by the end_io callback function
2809 * via ext4_convert_unwritten_extents().
2810 */
2811static int ext4_split_unwritten_extents(handle_t *handle,
2812 struct inode *inode,
2813 struct ext4_ext_path *path,
2814 ext4_lblk_t iblock,
2815 unsigned int max_blocks,
2816 int flags)
2817{
2818 struct ext4_extent *ex, newex, orig_ex;
2819 struct ext4_extent *ex1 = NULL;
2820 struct ext4_extent *ex2 = NULL;
2821 struct ext4_extent *ex3 = NULL;
2822 struct ext4_extent_header *eh;
2823 ext4_lblk_t ee_block;
2824 unsigned int allocated, ee_len, depth;
2825 ext4_fsblk_t newblock;
2826 int err = 0;
2827 int ret = 0;
2828
2829 ext_debug("ext4_split_unwritten_extents: inode %lu,"
2830 "iblock %llu, max_blocks %u\n", inode->i_ino,
2831 (unsigned long long)iblock, max_blocks);
2832 depth = ext_depth(inode);
2833 eh = path[depth].p_hdr;
2834 ex = path[depth].p_ext;
2835 ee_block = le32_to_cpu(ex->ee_block);
2836 ee_len = ext4_ext_get_actual_len(ex);
2837 allocated = ee_len - (iblock - ee_block);
2838 newblock = iblock - ee_block + ext_pblock(ex);
2839 ex2 = ex;
2840 orig_ex.ee_block = ex->ee_block;
2841 orig_ex.ee_len = cpu_to_le16(ee_len);
2842 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2843
2844 /*
2845 * if the entire unintialized extent length less than
2846 * the size of extent to write, there is no need to split
2847 * uninitialized extent
2848 */
2849 if (allocated <= max_blocks)
2850 return ret;
2851
2852 err = ext4_ext_get_access(handle, inode, path + depth);
2853 if (err)
2854 goto out;
2855 /* ex1: ee_block to iblock - 1 : uninitialized */
2856 if (iblock > ee_block) {
2857 ex1 = ex;
2858 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2859 ext4_ext_mark_uninitialized(ex1);
2860 ex2 = &newex;
2861 }
2862 /*
2863 * for sanity, update the length of the ex2 extent before
2864 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2865 * overlap of blocks.
2866 */
2867 if (!ex1 && allocated > max_blocks)
2868 ex2->ee_len = cpu_to_le16(max_blocks);
2869 /* ex3: to ee_block + ee_len : uninitialised */
2870 if (allocated > max_blocks) {
2871 unsigned int newdepth;
2872 ex3 = &newex;
2873 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2874 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2875 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2876 ext4_ext_mark_uninitialized(ex3);
2877 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2878 if (err == -ENOSPC) {
2879 err = ext4_ext_zeroout(inode, &orig_ex);
2880 if (err)
2881 goto fix_extent_len;
2882 /* update the extent length and mark as initialized */
2883 ex->ee_block = orig_ex.ee_block;
2884 ex->ee_len = orig_ex.ee_len;
2885 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2886 ext4_ext_dirty(handle, inode, path + depth);
2887 /* zeroed the full extent */
2888 /* blocks available from iblock */
2889 return allocated;
2890
2891 } else if (err)
2892 goto fix_extent_len;
2893 /*
2894 * The depth, and hence eh & ex might change
2895 * as part of the insert above.
2896 */
2897 newdepth = ext_depth(inode);
2898 /*
2899 * update the extent length after successful insert of the
2900 * split extent
2901 */
2902 orig_ex.ee_len = cpu_to_le16(ee_len -
2903 ext4_ext_get_actual_len(ex3));
2904 depth = newdepth;
2905 ext4_ext_drop_refs(path);
2906 path = ext4_ext_find_extent(inode, iblock, path);
2907 if (IS_ERR(path)) {
2908 err = PTR_ERR(path);
2909 goto out;
2910 }
2911 eh = path[depth].p_hdr;
2912 ex = path[depth].p_ext;
2913 if (ex2 != &newex)
2914 ex2 = ex;
2915
2916 err = ext4_ext_get_access(handle, inode, path + depth);
2917 if (err)
2918 goto out;
2919
2920 allocated = max_blocks;
2921 }
2922 /*
2923 * If there was a change of depth as part of the
2924 * insertion of ex3 above, we need to update the length
2925 * of the ex1 extent again here
2926 */
2927 if (ex1 && ex1 != ex) {
2928 ex1 = ex;
2929 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2930 ext4_ext_mark_uninitialized(ex1);
2931 ex2 = &newex;
2932 }
2933 /*
2934 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
2935 * uninitialised still.
2936 */
2937 ex2->ee_block = cpu_to_le32(iblock);
2938 ext4_ext_store_pblock(ex2, newblock);
2939 ex2->ee_len = cpu_to_le16(allocated);
2940 ext4_ext_mark_uninitialized(ex2);
2941 if (ex2 != ex)
2942 goto insert;
2943 /* Mark modified extent as dirty */
2944 err = ext4_ext_dirty(handle, inode, path + depth);
2945 ext_debug("out here\n");
2946 goto out;
2947insert:
2948 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2949 if (err == -ENOSPC) {
2950 err = ext4_ext_zeroout(inode, &orig_ex);
2951 if (err)
2952 goto fix_extent_len;
2953 /* update the extent length and mark as initialized */
2954 ex->ee_block = orig_ex.ee_block;
2955 ex->ee_len = orig_ex.ee_len;
2956 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2957 ext4_ext_dirty(handle, inode, path + depth);
2958 /* zero out the first half */
2959 return allocated;
2960 } else if (err)
2961 goto fix_extent_len;
2962out:
2963 ext4_ext_show_leaf(inode, path);
2964 return err ? err : allocated;
2965
2966fix_extent_len:
2967 ex->ee_block = orig_ex.ee_block;
2968 ex->ee_len = orig_ex.ee_len;
2969 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2970 ext4_ext_mark_uninitialized(ex);
2971 ext4_ext_dirty(handle, inode, path + depth);
2972 return err;
2973}
2974static int ext4_convert_unwritten_extents_dio(handle_t *handle,
2975 struct inode *inode,
2976 struct ext4_ext_path *path)
2977{
2978 struct ext4_extent *ex;
2979 struct ext4_extent_header *eh;
2980 int depth;
2981 int err = 0;
2982 int ret = 0;
2983
2984 depth = ext_depth(inode);
2985 eh = path[depth].p_hdr;
2986 ex = path[depth].p_ext;
2987
2988 err = ext4_ext_get_access(handle, inode, path + depth);
2989 if (err)
2990 goto out;
2991 /* first mark the extent as initialized */
2992 ext4_ext_mark_initialized(ex);
2993
2994 /*
2995 * We have to see if it can be merged with the extent
2996 * on the left.
2997 */
2998 if (ex > EXT_FIRST_EXTENT(eh)) {
2999 /*
3000 * To merge left, pass "ex - 1" to try_to_merge(),
3001 * since it merges towards right _only_.
3002 */
3003 ret = ext4_ext_try_to_merge(inode, path, ex - 1);
3004 if (ret) {
3005 err = ext4_ext_correct_indexes(handle, inode, path);
3006 if (err)
3007 goto out;
3008 depth = ext_depth(inode);
3009 ex--;
3010 }
3011 }
3012 /*
3013 * Try to Merge towards right.
3014 */
3015 ret = ext4_ext_try_to_merge(inode, path, ex);
3016 if (ret) {
3017 err = ext4_ext_correct_indexes(handle, inode, path);
3018 if (err)
3019 goto out;
3020 depth = ext_depth(inode);
3021 }
3022 /* Mark modified extent as dirty */
3023 err = ext4_ext_dirty(handle, inode, path + depth);
3024out:
3025 ext4_ext_show_leaf(inode, path);
3026 return err;
3027}
3028
3029static int
3030ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3031 ext4_lblk_t iblock, unsigned int max_blocks,
3032 struct ext4_ext_path *path, int flags,
3033 unsigned int allocated, struct buffer_head *bh_result,
3034 ext4_fsblk_t newblock)
3035{
3036 int ret = 0;
3037 int err = 0;
3038 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3039
3040 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3041 "block %llu, max_blocks %u, flags %d, allocated %u",
3042 inode->i_ino, (unsigned long long)iblock, max_blocks,
3043 flags, allocated);
3044 ext4_ext_show_leaf(inode, path);
3045
3046 /* DIO get_block() before submit the IO, split the extent */
3047 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3048 ret = ext4_split_unwritten_extents(handle,
3049 inode, path, iblock,
3050 max_blocks, flags);
3051 /* flag the io_end struct that we need convert when IO done */
3052 if (io)
3053 io->flag = DIO_AIO_UNWRITTEN;
3054 goto out;
3055 }
3056 /* DIO end_io complete, convert the filled extent to written */
3057 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
3058 ret = ext4_convert_unwritten_extents_dio(handle, inode,
3059 path);
3060 goto out2;
3061 }
3062 /* buffered IO case */
3063 /*
3064 * repeat fallocate creation request
3065 * we already have an unwritten extent
3066 */
3067 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3068 goto map_out;
3069
3070 /* buffered READ or buffered write_begin() lookup */
3071 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3072 /*
3073 * We have blocks reserved already. We
3074 * return allocated blocks so that delalloc
3075 * won't do block reservation for us. But
3076 * the buffer head will be unmapped so that
3077 * a read from the block returns 0s.
3078 */
3079 set_buffer_unwritten(bh_result);
3080 goto out1;
3081 }
3082
3083 /* buffered write, writepage time, convert*/
3084 ret = ext4_ext_convert_to_initialized(handle, inode,
3085 path, iblock,
3086 max_blocks);
3087out:
3088 if (ret <= 0) {
3089 err = ret;
3090 goto out2;
3091 } else
3092 allocated = ret;
3093 set_buffer_new(bh_result);
3094map_out:
3095 set_buffer_mapped(bh_result);
3096out1:
3097 if (allocated > max_blocks)
3098 allocated = max_blocks;
3099 ext4_ext_show_leaf(inode, path);
3100 bh_result->b_bdev = inode->i_sb->s_bdev;
3101 bh_result->b_blocknr = newblock;
3102out2:
3103 if (path) {
3104 ext4_ext_drop_refs(path);
3105 kfree(path);
3106 }
3107 return err ? err : allocated;
3108}
3109/*
2788 * Block allocation/map/preallocation routine for extents based files 3110 * Block allocation/map/preallocation routine for extents based files
2789 * 3111 *
2790 * 3112 *
@@ -2814,6 +3136,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2814 int err = 0, depth, ret, cache_type; 3136 int err = 0, depth, ret, cache_type;
2815 unsigned int allocated = 0; 3137 unsigned int allocated = 0;
2816 struct ext4_allocation_request ar; 3138 struct ext4_allocation_request ar;
3139 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
2817 3140
2818 __clear_bit(BH_New, &bh_result->b_state); 3141 __clear_bit(BH_New, &bh_result->b_state);
2819 ext_debug("blocks %u/%u requested for inode %lu\n", 3142 ext_debug("blocks %u/%u requested for inode %lu\n",
@@ -2889,33 +3212,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2889 EXT4_EXT_CACHE_EXTENT); 3212 EXT4_EXT_CACHE_EXTENT);
2890 goto out; 3213 goto out;
2891 } 3214 }
2892 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) 3215 ret = ext4_ext_handle_uninitialized_extents(handle,
2893 goto out; 3216 inode, iblock, max_blocks, path,
2894 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3217 flags, allocated, bh_result, newblock);
2895 if (allocated > max_blocks) 3218 return ret;
2896 allocated = max_blocks;
2897 /*
2898 * We have blocks reserved already. We
2899 * return allocated blocks so that delalloc
2900 * won't do block reservation for us. But
2901 * the buffer head will be unmapped so that
2902 * a read from the block returns 0s.
2903 */
2904 set_buffer_unwritten(bh_result);
2905 bh_result->b_bdev = inode->i_sb->s_bdev;
2906 bh_result->b_blocknr = newblock;
2907 goto out2;
2908 }
2909
2910 ret = ext4_ext_convert_to_initialized(handle, inode,
2911 path, iblock,
2912 max_blocks);
2913 if (ret <= 0) {
2914 err = ret;
2915 goto out2;
2916 } else
2917 allocated = ret;
2918 goto outnew;
2919 } 3219 }
2920 } 3220 }
2921 3221
@@ -2986,9 +3286,21 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2986 /* try to insert new extent into found leaf and return */ 3286 /* try to insert new extent into found leaf and return */
2987 ext4_ext_store_pblock(&newex, newblock); 3287 ext4_ext_store_pblock(&newex, newblock);
2988 newex.ee_len = cpu_to_le16(ar.len); 3288 newex.ee_len = cpu_to_le16(ar.len);
2989 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) /* Mark uninitialized */ 3289 /* Mark uninitialized */
3290 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
2990 ext4_ext_mark_uninitialized(&newex); 3291 ext4_ext_mark_uninitialized(&newex);
2991 err = ext4_ext_insert_extent(handle, inode, path, &newex); 3292 /*
3293 * io_end structure was created for every async
3294 * direct IO write to the middle of the file.
3295 * To avoid unecessary convertion for every aio dio rewrite
3296 * to the mid of file, here we flag the IO that is really
3297 * need the convertion.
3298 *
3299 */
3300 if (io && flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT)
3301 io->flag = DIO_AIO_UNWRITTEN;
3302 }
3303 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2992 if (err) { 3304 if (err) {
2993 /* free data blocks we just allocated */ 3305 /* free data blocks we just allocated */
2994 /* not a good idea to call discard here directly, 3306 /* not a good idea to call discard here directly,
@@ -3002,7 +3314,6 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3002 /* previous routine could use block we allocated */ 3314 /* previous routine could use block we allocated */
3003 newblock = ext_pblock(&newex); 3315 newblock = ext_pblock(&newex);
3004 allocated = ext4_ext_get_actual_len(&newex); 3316 allocated = ext4_ext_get_actual_len(&newex);
3005outnew:
3006 set_buffer_new(bh_result); 3317 set_buffer_new(bh_result);
3007 3318
3008 /* Cache only when it is _not_ an uninitialized extent */ 3319 /* Cache only when it is _not_ an uninitialized extent */
@@ -3201,6 +3512,63 @@ retry:
3201} 3512}
3202 3513
3203/* 3514/*
3515 * This function convert a range of blocks to written extents
3516 * The caller of this function will pass the start offset and the size.
3517 * all unwritten extents within this range will be converted to
3518 * written extents.
3519 *
3520 * This function is called from the direct IO end io call back
3521 * function, to convert the fallocated extents after IO is completed.
3522 */
3523int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3524 loff_t len)
3525{
3526 handle_t *handle;
3527 ext4_lblk_t block;
3528 unsigned int max_blocks;
3529 int ret = 0;
3530 int ret2 = 0;
3531 struct buffer_head map_bh;
3532 unsigned int credits, blkbits = inode->i_blkbits;
3533
3534 block = offset >> blkbits;
3535 /*
3536 * We can't just convert len to max_blocks because
3537 * If blocksize = 4096 offset = 3072 and len = 2048
3538 */
3539 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3540 - block;
3541 /*
3542 * credits to insert 1 extent into extent tree
3543 */
3544 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3545 while (ret >= 0 && ret < max_blocks) {
3546 block = block + ret;
3547 max_blocks = max_blocks - ret;
3548 handle = ext4_journal_start(inode, credits);
3549 if (IS_ERR(handle)) {
3550 ret = PTR_ERR(handle);
3551 break;
3552 }
3553 map_bh.b_state = 0;
3554 ret = ext4_get_blocks(handle, inode, block,
3555 max_blocks, &map_bh,
3556 EXT4_GET_BLOCKS_DIO_CONVERT_EXT);
3557 if (ret <= 0) {
3558 WARN_ON(ret <= 0);
3559 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3560 "returned error inode#%lu, block=%u, "
3561 "max_blocks=%u", __func__,
3562 inode->i_ino, block, max_blocks);
3563 }
3564 ext4_mark_inode_dirty(handle, inode);
3565 ret2 = ext4_journal_stop(handle);
3566 if (ret <= 0 || ret2 )
3567 break;
3568 }
3569 return ret > 0 ? ret2 : ret;
3570}
3571/*
3204 * Callback function called for each extent to gather FIEMAP information. 3572 * Callback function called for each extent to gather FIEMAP information.
3205 */ 3573 */
3206static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3574static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5ca3eca70a1e..9630583cef28 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -81,7 +81,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
81 return generic_file_aio_write(iocb, iov, nr_segs, pos); 81 return generic_file_aio_write(iocb, iov, nr_segs, pos);
82} 82}
83 83
84static struct vm_operations_struct ext4_file_vm_ops = { 84static const struct vm_operations_struct ext4_file_vm_ops = {
85 .fault = filemap_fault, 85 .fault = filemap_fault,
86 .page_mkwrite = ext4_page_mkwrite, 86 .page_mkwrite = ext4_page_mkwrite,
87}; 87};
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 07475740b512..2b1531266ee2 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -44,6 +44,8 @@
44 * 44 *
45 * What we do is just kick off a commit and wait on it. This will snapshot the 45 * What we do is just kick off a commit and wait on it. This will snapshot the
46 * inode to disk. 46 * inode to disk.
47 *
48 * i_mutex lock is held when entering and exiting this function
47 */ 49 */
48 50
49int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) 51int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
@@ -56,6 +58,9 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
56 58
57 trace_ext4_sync_file(file, dentry, datasync); 59 trace_ext4_sync_file(file, dentry, datasync);
58 60
61 ret = flush_aio_dio_completed_IO(inode);
62 if (ret < 0)
63 goto out;
59 /* 64 /*
60 * data=writeback: 65 * data=writeback:
61 * The caller's filemap_fdatawrite()/wait will sync the data. 66 * The caller's filemap_fdatawrite()/wait will sync the data.
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 064746fad581..ec367bce7215 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,6 +37,7 @@
37#include <linux/namei.h> 37#include <linux/namei.h>
38#include <linux/uio.h> 38#include <linux/uio.h>
39#include <linux/bio.h> 39#include <linux/bio.h>
40#include <linux/workqueue.h>
40 41
41#include "ext4_jbd2.h" 42#include "ext4_jbd2.h"
42#include "xattr.h" 43#include "xattr.h"
@@ -1145,6 +1146,64 @@ static int check_block_validity(struct inode *inode, const char *msg,
1145} 1146}
1146 1147
1147/* 1148/*
1149 * Return the number of dirty pages in the given inode starting at
1150 * page frame idx.
1151 */
1152static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1153 unsigned int max_pages)
1154{
1155 struct address_space *mapping = inode->i_mapping;
1156 pgoff_t index;
1157 struct pagevec pvec;
1158 pgoff_t num = 0;
1159 int i, nr_pages, done = 0;
1160
1161 if (max_pages == 0)
1162 return 0;
1163 pagevec_init(&pvec, 0);
1164 while (!done) {
1165 index = idx;
1166 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1167 PAGECACHE_TAG_DIRTY,
1168 (pgoff_t)PAGEVEC_SIZE);
1169 if (nr_pages == 0)
1170 break;
1171 for (i = 0; i < nr_pages; i++) {
1172 struct page *page = pvec.pages[i];
1173 struct buffer_head *bh, *head;
1174
1175 lock_page(page);
1176 if (unlikely(page->mapping != mapping) ||
1177 !PageDirty(page) ||
1178 PageWriteback(page) ||
1179 page->index != idx) {
1180 done = 1;
1181 unlock_page(page);
1182 break;
1183 }
1184 head = page_buffers(page);
1185 bh = head;
1186 do {
1187 if (!buffer_delay(bh) &&
1188 !buffer_unwritten(bh)) {
1189 done = 1;
1190 break;
1191 }
1192 } while ((bh = bh->b_this_page) != head);
1193 unlock_page(page);
1194 if (done)
1195 break;
1196 idx++;
1197 num++;
1198 if (num >= max_pages)
1199 break;
1200 }
1201 pagevec_release(&pvec);
1202 }
1203 return num;
1204}
1205
1206/*
1148 * The ext4_get_blocks() function tries to look up the requested blocks, 1207 * The ext4_get_blocks() function tries to look up the requested blocks,
1149 * and returns if the blocks are already mapped. 1208 * and returns if the blocks are already mapped.
1150 * 1209 *
@@ -1175,6 +1234,9 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1175 clear_buffer_mapped(bh); 1234 clear_buffer_mapped(bh);
1176 clear_buffer_unwritten(bh); 1235 clear_buffer_unwritten(bh);
1177 1236
1237 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
1238 "logical block %lu\n", inode->i_ino, flags, max_blocks,
1239 (unsigned long)block);
1178 /* 1240 /*
1179 * Try to see if we can get the block without requesting a new 1241 * Try to see if we can get the block without requesting a new
1180 * file system block. 1242 * file system block.
@@ -1796,11 +1858,11 @@ repeat:
1796 1858
1797 if (ext4_claim_free_blocks(sbi, total)) { 1859 if (ext4_claim_free_blocks(sbi, total)) {
1798 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1860 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1861 vfs_dq_release_reservation_block(inode, total);
1799 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1862 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1800 yield(); 1863 yield();
1801 goto repeat; 1864 goto repeat;
1802 } 1865 }
1803 vfs_dq_release_reservation_block(inode, total);
1804 return -ENOSPC; 1866 return -ENOSPC;
1805 } 1867 }
1806 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1868 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -2092,18 +2154,18 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2092static void ext4_print_free_blocks(struct inode *inode) 2154static void ext4_print_free_blocks(struct inode *inode)
2093{ 2155{
2094 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2156 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2095 printk(KERN_EMERG "Total free blocks count %lld\n", 2157 printk(KERN_CRIT "Total free blocks count %lld\n",
2096 ext4_count_free_blocks(inode->i_sb)); 2158 ext4_count_free_blocks(inode->i_sb));
2097 printk(KERN_EMERG "Free/Dirty block details\n"); 2159 printk(KERN_CRIT "Free/Dirty block details\n");
2098 printk(KERN_EMERG "free_blocks=%lld\n", 2160 printk(KERN_CRIT "free_blocks=%lld\n",
2099 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); 2161 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2100 printk(KERN_EMERG "dirty_blocks=%lld\n", 2162 printk(KERN_CRIT "dirty_blocks=%lld\n",
2101 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2163 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2102 printk(KERN_EMERG "Block reservation details\n"); 2164 printk(KERN_CRIT "Block reservation details\n");
2103 printk(KERN_EMERG "i_reserved_data_blocks=%u\n", 2165 printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2104 EXT4_I(inode)->i_reserved_data_blocks); 2166 EXT4_I(inode)->i_reserved_data_blocks);
2105 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", 2167 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2106 EXT4_I(inode)->i_reserved_meta_blocks); 2168 EXT4_I(inode)->i_reserved_meta_blocks);
2107 return; 2169 return;
2108} 2170}
2109 2171
@@ -2189,14 +2251,14 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2189 * writepage and writepages will again try to write 2251 * writepage and writepages will again try to write
2190 * the same. 2252 * the same.
2191 */ 2253 */
2192 printk(KERN_EMERG "%s block allocation failed for inode %lu " 2254 ext4_msg(mpd->inode->i_sb, KERN_CRIT,
2193 "at logical offset %llu with max blocks " 2255 "delayed block allocation failed for inode %lu at "
2194 "%zd with error %d\n", 2256 "logical offset %llu with max blocks %zd with "
2195 __func__, mpd->inode->i_ino, 2257 "error %d\n", mpd->inode->i_ino,
2196 (unsigned long long)next, 2258 (unsigned long long) next,
2197 mpd->b_size >> mpd->inode->i_blkbits, err); 2259 mpd->b_size >> mpd->inode->i_blkbits, err);
2198 printk(KERN_EMERG "This should not happen.!! " 2260 printk(KERN_CRIT "This should not happen!! "
2199 "Data will be lost\n"); 2261 "Data will be lost\n");
2200 if (err == -ENOSPC) { 2262 if (err == -ENOSPC) {
2201 ext4_print_free_blocks(mpd->inode); 2263 ext4_print_free_blocks(mpd->inode);
2202 } 2264 }
@@ -2743,8 +2805,10 @@ static int ext4_da_writepages(struct address_space *mapping,
2743 int no_nrwrite_index_update; 2805 int no_nrwrite_index_update;
2744 int pages_written = 0; 2806 int pages_written = 0;
2745 long pages_skipped; 2807 long pages_skipped;
2808 unsigned int max_pages;
2746 int range_cyclic, cycled = 1, io_done = 0; 2809 int range_cyclic, cycled = 1, io_done = 0;
2747 int needed_blocks, ret = 0, nr_to_writebump = 0; 2810 int needed_blocks, ret = 0;
2811 long desired_nr_to_write, nr_to_writebump = 0;
2748 loff_t range_start = wbc->range_start; 2812 loff_t range_start = wbc->range_start;
2749 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2813 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2750 2814
@@ -2771,16 +2835,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2771 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2835 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2772 return -EROFS; 2836 return -EROFS;
2773 2837
2774 /*
2775 * Make sure nr_to_write is >= sbi->s_mb_stream_request
2776 * This make sure small files blocks are allocated in
2777 * single attempt. This ensure that small files
2778 * get less fragmented.
2779 */
2780 if (wbc->nr_to_write < sbi->s_mb_stream_request) {
2781 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
2782 wbc->nr_to_write = sbi->s_mb_stream_request;
2783 }
2784 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2838 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2785 range_whole = 1; 2839 range_whole = 1;
2786 2840
@@ -2795,6 +2849,36 @@ static int ext4_da_writepages(struct address_space *mapping,
2795 } else 2849 } else
2796 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2850 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2797 2851
2852 /*
2853 * This works around two forms of stupidity. The first is in
2854 * the writeback code, which caps the maximum number of pages
2855 * written to be 1024 pages. This is wrong on multiple
2856 * levels; different architectues have a different page size,
2857 * which changes the maximum amount of data which gets
2858 * written. Secondly, 4 megabytes is way too small. XFS
2859 * forces this value to be 16 megabytes by multiplying
2860 * nr_to_write parameter by four, and then relies on its
2861 * allocator to allocate larger extents to make them
2862 * contiguous. Unfortunately this brings us to the second
2863 * stupidity, which is that ext4's mballoc code only allocates
2864 * at most 2048 blocks. So we force contiguous writes up to
2865 * the number of dirty blocks in the inode, or
2866 * sbi->max_writeback_mb_bump whichever is smaller.
2867 */
2868 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2869 if (!range_cyclic && range_whole)
2870 desired_nr_to_write = wbc->nr_to_write * 8;
2871 else
2872 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2873 max_pages);
2874 if (desired_nr_to_write > max_pages)
2875 desired_nr_to_write = max_pages;
2876
2877 if (wbc->nr_to_write < desired_nr_to_write) {
2878 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2879 wbc->nr_to_write = desired_nr_to_write;
2880 }
2881
2798 mpd.wbc = wbc; 2882 mpd.wbc = wbc;
2799 mpd.inode = mapping->host; 2883 mpd.inode = mapping->host;
2800 2884
@@ -2822,10 +2906,9 @@ retry:
2822 handle = ext4_journal_start(inode, needed_blocks); 2906 handle = ext4_journal_start(inode, needed_blocks);
2823 if (IS_ERR(handle)) { 2907 if (IS_ERR(handle)) {
2824 ret = PTR_ERR(handle); 2908 ret = PTR_ERR(handle);
2825 printk(KERN_CRIT "%s: jbd2_start: " 2909 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2826 "%ld pages, ino %lu; err %d\n", __func__, 2910 "%ld pages, ino %lu; err %d\n", __func__,
2827 wbc->nr_to_write, inode->i_ino, ret); 2911 wbc->nr_to_write, inode->i_ino, ret);
2828 dump_stack();
2829 goto out_writepages; 2912 goto out_writepages;
2830 } 2913 }
2831 2914
@@ -2897,9 +2980,10 @@ retry:
2897 goto retry; 2980 goto retry;
2898 } 2981 }
2899 if (pages_skipped != wbc->pages_skipped) 2982 if (pages_skipped != wbc->pages_skipped)
2900 printk(KERN_EMERG "This should not happen leaving %s " 2983 ext4_msg(inode->i_sb, KERN_CRIT,
2901 "with nr_to_write = %ld ret = %d\n", 2984 "This should not happen leaving %s "
2902 __func__, wbc->nr_to_write, ret); 2985 "with nr_to_write = %ld ret = %d\n",
2986 __func__, wbc->nr_to_write, ret);
2903 2987
2904 /* Update index */ 2988 /* Update index */
2905 index += pages_written; 2989 index += pages_written;
@@ -2914,7 +2998,8 @@ retry:
2914out_writepages: 2998out_writepages:
2915 if (!no_nrwrite_index_update) 2999 if (!no_nrwrite_index_update)
2916 wbc->no_nrwrite_index_update = 0; 3000 wbc->no_nrwrite_index_update = 0;
2917 wbc->nr_to_write -= nr_to_writebump; 3001 if (wbc->nr_to_write > nr_to_writebump)
3002 wbc->nr_to_write -= nr_to_writebump;
2918 wbc->range_start = range_start; 3003 wbc->range_start = range_start;
2919 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 3004 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2920 return ret; 3005 return ret;
@@ -3272,6 +3357,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
3272} 3357}
3273 3358
3274/* 3359/*
3360 * O_DIRECT for ext3 (or indirect map) based files
3361 *
3275 * If the O_DIRECT write will extend the file then add this inode to the 3362 * If the O_DIRECT write will extend the file then add this inode to the
3276 * orphan list. So recovery will truncate it back to the original size 3363 * orphan list. So recovery will truncate it back to the original size
3277 * if the machine crashes during the write. 3364 * if the machine crashes during the write.
@@ -3280,7 +3367,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
3280 * crashes then stale disk data _may_ be exposed inside the file. But current 3367 * crashes then stale disk data _may_ be exposed inside the file. But current
3281 * VFS code falls back into buffered path in that case so we are safe. 3368 * VFS code falls back into buffered path in that case so we are safe.
3282 */ 3369 */
3283static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3370static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3284 const struct iovec *iov, loff_t offset, 3371 const struct iovec *iov, loff_t offset,
3285 unsigned long nr_segs) 3372 unsigned long nr_segs)
3286{ 3373{
@@ -3354,6 +3441,359 @@ out:
3354 return ret; 3441 return ret;
3355} 3442}
3356 3443
3444/* Maximum number of blocks we map for direct IO at once. */
3445
3446static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
3447 struct buffer_head *bh_result, int create)
3448{
3449 handle_t *handle = NULL;
3450 int ret = 0;
3451 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3452 int dio_credits;
3453
3454 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
3455 inode->i_ino, create);
3456 /*
3457 * DIO VFS code passes create = 0 flag for write to
3458 * the middle of file. It does this to avoid block
3459 * allocation for holes, to prevent expose stale data
3460 * out when there is parallel buffered read (which does
3461 * not hold the i_mutex lock) while direct IO write has
3462 * not completed. DIO request on holes finally falls back
3463 * to buffered IO for this reason.
3464 *
3465 * For ext4 extent based file, since we support fallocate,
3466 * new allocated extent as uninitialized, for holes, we
3467 * could fallocate blocks for holes, thus parallel
3468 * buffered IO read will zero out the page when read on
3469 * a hole while parallel DIO write to the hole has not completed.
3470 *
3471 * when we come here, we know it's a direct IO write to
3472 * to the middle of file (<i_size)
3473 * so it's safe to override the create flag from VFS.
3474 */
3475 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
3476
3477 if (max_blocks > DIO_MAX_BLOCKS)
3478 max_blocks = DIO_MAX_BLOCKS;
3479 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3480 handle = ext4_journal_start(inode, dio_credits);
3481 if (IS_ERR(handle)) {
3482 ret = PTR_ERR(handle);
3483 goto out;
3484 }
3485 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3486 create);
3487 if (ret > 0) {
3488 bh_result->b_size = (ret << inode->i_blkbits);
3489 ret = 0;
3490 }
3491 ext4_journal_stop(handle);
3492out:
3493 return ret;
3494}
3495
3496static void ext4_free_io_end(ext4_io_end_t *io)
3497{
3498 BUG_ON(!io);
3499 iput(io->inode);
3500 kfree(io);
3501}
3502static void dump_aio_dio_list(struct inode * inode)
3503{
3504#ifdef EXT4_DEBUG
3505 struct list_head *cur, *before, *after;
3506 ext4_io_end_t *io, *io0, *io1;
3507
3508 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3509 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
3510 return;
3511 }
3512
3513 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
3514 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
3515 cur = &io->list;
3516 before = cur->prev;
3517 io0 = container_of(before, ext4_io_end_t, list);
3518 after = cur->next;
3519 io1 = container_of(after, ext4_io_end_t, list);
3520
3521 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3522 io, inode->i_ino, io0, io1);
3523 }
3524#endif
3525}
3526
3527/*
3528 * check a range of space and convert unwritten extents to written.
3529 */
3530static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3531{
3532 struct inode *inode = io->inode;
3533 loff_t offset = io->offset;
3534 size_t size = io->size;
3535 int ret = 0;
3536
3537 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
3538 "list->prev 0x%p\n",
3539 io, inode->i_ino, io->list.next, io->list.prev);
3540
3541 if (list_empty(&io->list))
3542 return ret;
3543
3544 if (io->flag != DIO_AIO_UNWRITTEN)
3545 return ret;
3546
3547 if (offset + size <= i_size_read(inode))
3548 ret = ext4_convert_unwritten_extents(inode, offset, size);
3549
3550 if (ret < 0) {
3551 printk(KERN_EMERG "%s: failed to convert unwritten"
3552 "extents to written extents, error is %d"
3553 " io is still on inode %lu aio dio list\n",
3554 __func__, ret, inode->i_ino);
3555 return ret;
3556 }
3557
3558 /* clear the DIO AIO unwritten flag */
3559 io->flag = 0;
3560 return ret;
3561}
3562/*
3563 * work on completed aio dio IO, to convert unwritten extents to extents
3564 */
3565static void ext4_end_aio_dio_work(struct work_struct *work)
3566{
3567 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
3568 struct inode *inode = io->inode;
3569 int ret = 0;
3570
3571 mutex_lock(&inode->i_mutex);
3572 ret = ext4_end_aio_dio_nolock(io);
3573 if (ret >= 0) {
3574 if (!list_empty(&io->list))
3575 list_del_init(&io->list);
3576 ext4_free_io_end(io);
3577 }
3578 mutex_unlock(&inode->i_mutex);
3579}
3580/*
3581 * This function is called from ext4_sync_file().
3582 *
3583 * When AIO DIO IO is completed, the work to convert unwritten
3584 * extents to written is queued on workqueue but may not get immediately
3585 * scheduled. When fsync is called, we need to ensure the
3586 * conversion is complete before fsync returns.
3587 * The inode keeps track of a list of completed AIO from DIO path
3588 * that might needs to do the conversion. This function walks through
3589 * the list and convert the related unwritten extents to written.
3590 */
3591int flush_aio_dio_completed_IO(struct inode *inode)
3592{
3593 ext4_io_end_t *io;
3594 int ret = 0;
3595 int ret2 = 0;
3596
3597 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
3598 return ret;
3599
3600 dump_aio_dio_list(inode);
3601 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3602 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
3603 ext4_io_end_t, list);
3604 /*
3605 * Calling ext4_end_aio_dio_nolock() to convert completed
3606 * IO to written.
3607 *
3608 * When ext4_sync_file() is called, run_queue() may already
3609 * about to flush the work corresponding to this io structure.
3610 * It will be upset if it founds the io structure related
3611 * to the work-to-be schedule is freed.
3612 *
3613 * Thus we need to keep the io structure still valid here after
3614 * convertion finished. The io structure has a flag to
3615 * avoid double converting from both fsync and background work
3616 * queue work.
3617 */
3618 ret = ext4_end_aio_dio_nolock(io);
3619 if (ret < 0)
3620 ret2 = ret;
3621 else
3622 list_del_init(&io->list);
3623 }
3624 return (ret2 < 0) ? ret2 : 0;
3625}
3626
3627static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
3628{
3629 ext4_io_end_t *io = NULL;
3630
3631 io = kmalloc(sizeof(*io), GFP_NOFS);
3632
3633 if (io) {
3634 igrab(inode);
3635 io->inode = inode;
3636 io->flag = 0;
3637 io->offset = 0;
3638 io->size = 0;
3639 io->error = 0;
3640 INIT_WORK(&io->work, ext4_end_aio_dio_work);
3641 INIT_LIST_HEAD(&io->list);
3642 }
3643
3644 return io;
3645}
3646
3647static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3648 ssize_t size, void *private)
3649{
3650 ext4_io_end_t *io_end = iocb->private;
3651 struct workqueue_struct *wq;
3652
3653 ext_debug("ext4_end_io_dio(): io_end 0x%p"
3654 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3655 iocb->private, io_end->inode->i_ino, iocb, offset,
3656 size);
3657 /* if not async direct IO or dio with 0 bytes write, just return */
3658 if (!io_end || !size)
3659 return;
3660
3661 /* if not aio dio with unwritten extents, just free io and return */
3662 if (io_end->flag != DIO_AIO_UNWRITTEN){
3663 ext4_free_io_end(io_end);
3664 iocb->private = NULL;
3665 return;
3666 }
3667
3668 io_end->offset = offset;
3669 io_end->size = size;
3670 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3671
3672 /* queue the work to convert unwritten extents to written */
3673 queue_work(wq, &io_end->work);
3674
3675 /* Add the io_end to per-inode completed aio dio list*/
3676 list_add_tail(&io_end->list,
3677 &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
3678 iocb->private = NULL;
3679}
3680/*
3681 * For ext4 extent files, ext4 will do direct-io write to holes,
3682 * preallocated extents, and those write extend the file, no need to
3683 * fall back to buffered IO.
3684 *
3685 * For holes, we fallocate those blocks, mark them as unintialized
3686 * If those blocks were preallocated, we mark sure they are splited, but
3687 * still keep the range to write as unintialized.
3688 *
3689 * The unwrritten extents will be converted to written when DIO is completed.
3690 * For async direct IO, since the IO may still pending when return, we
3691 * set up an end_io call back function, which will do the convertion
3692 * when async direct IO completed.
3693 *
3694 * If the O_DIRECT write will extend the file then add this inode to the
3695 * orphan list. So recovery will truncate it back to the original size
3696 * if the machine crashes during the write.
3697 *
3698 */
3699static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3700 const struct iovec *iov, loff_t offset,
3701 unsigned long nr_segs)
3702{
3703 struct file *file = iocb->ki_filp;
3704 struct inode *inode = file->f_mapping->host;
3705 ssize_t ret;
3706 size_t count = iov_length(iov, nr_segs);
3707
3708 loff_t final_size = offset + count;
3709 if (rw == WRITE && final_size <= inode->i_size) {
3710 /*
3711 * We could direct write to holes and fallocate.
3712 *
3713 * Allocated blocks to fill the hole are marked as uninitialized
3714 * to prevent paralel buffered read to expose the stale data
3715 * before DIO complete the data IO.
3716 *
3717 * As to previously fallocated extents, ext4 get_block
3718 * will just simply mark the buffer mapped but still
3719 * keep the extents uninitialized.
3720 *
3721 * for non AIO case, we will convert those unwritten extents
3722 * to written after return back from blockdev_direct_IO.
3723 *
3724 * for async DIO, the conversion needs to be defered when
3725 * the IO is completed. The ext4 end_io callback function
3726 * will be called to take care of the conversion work.
3727 * Here for async case, we allocate an io_end structure to
3728 * hook to the iocb.
3729 */
3730 iocb->private = NULL;
3731 EXT4_I(inode)->cur_aio_dio = NULL;
3732 if (!is_sync_kiocb(iocb)) {
3733 iocb->private = ext4_init_io_end(inode);
3734 if (!iocb->private)
3735 return -ENOMEM;
3736 /*
3737 * we save the io structure for current async
3738 * direct IO, so that later ext4_get_blocks()
3739 * could flag the io structure whether there
3740 * is a unwritten extents needs to be converted
3741 * when IO is completed.
3742 */
3743 EXT4_I(inode)->cur_aio_dio = iocb->private;
3744 }
3745
3746 ret = blockdev_direct_IO(rw, iocb, inode,
3747 inode->i_sb->s_bdev, iov,
3748 offset, nr_segs,
3749 ext4_get_block_dio_write,
3750 ext4_end_io_dio);
3751 if (iocb->private)
3752 EXT4_I(inode)->cur_aio_dio = NULL;
3753 /*
3754 * The io_end structure takes a reference to the inode,
3755 * that structure needs to be destroyed and the
3756 * reference to the inode need to be dropped, when IO is
3757 * complete, even with 0 byte write, or failed.
3758 *
3759 * In the successful AIO DIO case, the io_end structure will be
3760 * desctroyed and the reference to the inode will be dropped
3761 * after the end_io call back function is called.
3762 *
3763 * In the case there is 0 byte write, or error case, since
3764 * VFS direct IO won't invoke the end_io call back function,
3765 * we need to free the end_io structure here.
3766 */
3767 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3768 ext4_free_io_end(iocb->private);
3769 iocb->private = NULL;
3770 } else if (ret > 0)
3771 /*
3772 * for non AIO case, since the IO is already
3773 * completed, we could do the convertion right here
3774 */
3775 ret = ext4_convert_unwritten_extents(inode,
3776 offset, ret);
3777 return ret;
3778 }
3779
3780 /* for write the the end of file case, we fall back to old way */
3781 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3782}
3783
3784static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3785 const struct iovec *iov, loff_t offset,
3786 unsigned long nr_segs)
3787{
3788 struct file *file = iocb->ki_filp;
3789 struct inode *inode = file->f_mapping->host;
3790
3791 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3792 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3793
3794 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3795}
3796
3357/* 3797/*
3358 * Pages can be marked dirty completely asynchronously from ext4's journalling 3798 * Pages can be marked dirty completely asynchronously from ext4's journalling
3359 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3799 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
@@ -4551,8 +4991,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
4551 */ 4991 */
4552static int ext4_do_update_inode(handle_t *handle, 4992static int ext4_do_update_inode(handle_t *handle,
4553 struct inode *inode, 4993 struct inode *inode,
4554 struct ext4_iloc *iloc, 4994 struct ext4_iloc *iloc)
4555 int do_sync)
4556{ 4995{
4557 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4996 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4558 struct ext4_inode_info *ei = EXT4_I(inode); 4997 struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4653,22 +5092,10 @@ static int ext4_do_update_inode(handle_t *handle,
4653 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 5092 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4654 } 5093 }
4655 5094
4656 /* 5095 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4657 * If we're not using a journal and we were called from 5096 rc = ext4_handle_dirty_metadata(handle, inode, bh);
4658 * ext4_write_inode() to sync the inode (making do_sync true), 5097 if (!err)
4659 * we can just use sync_dirty_buffer() directly to do our dirty 5098 err = rc;
4660 * work. Testing s_journal here is a bit redundant but it's
4661 * worth it to avoid potential future trouble.
4662 */
4663 if (EXT4_SB(inode->i_sb)->s_journal == NULL && do_sync) {
4664 BUFFER_TRACE(bh, "call sync_dirty_buffer");
4665 sync_dirty_buffer(bh);
4666 } else {
4667 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4668 rc = ext4_handle_dirty_metadata(handle, inode, bh);
4669 if (!err)
4670 err = rc;
4671 }
4672 ei->i_state &= ~EXT4_STATE_NEW; 5099 ei->i_state &= ~EXT4_STATE_NEW;
4673 5100
4674out_brelse: 5101out_brelse:
@@ -4736,8 +5163,16 @@ int ext4_write_inode(struct inode *inode, int wait)
4736 err = ext4_get_inode_loc(inode, &iloc); 5163 err = ext4_get_inode_loc(inode, &iloc);
4737 if (err) 5164 if (err)
4738 return err; 5165 return err;
4739 err = ext4_do_update_inode(EXT4_NOJOURNAL_HANDLE, 5166 if (wait)
4740 inode, &iloc, wait); 5167 sync_dirty_buffer(iloc.bh);
5168 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5169 ext4_error(inode->i_sb, __func__,
5170 "IO error syncing inode, "
5171 "inode=%lu, block=%llu",
5172 inode->i_ino,
5173 (unsigned long long)iloc.bh->b_blocknr);
5174 err = -EIO;
5175 }
4741 } 5176 }
4742 return err; 5177 return err;
4743} 5178}
@@ -5033,7 +5468,7 @@ int ext4_mark_iloc_dirty(handle_t *handle,
5033 get_bh(iloc->bh); 5468 get_bh(iloc->bh);
5034 5469
5035 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5470 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5036 err = ext4_do_update_inode(handle, inode, iloc, 0); 5471 err = ext4_do_update_inode(handle, inode, iloc);
5037 put_bh(iloc->bh); 5472 put_bh(iloc->bh);
5038 return err; 5473 return err;
5039} 5474}
@@ -5180,24 +5615,13 @@ void ext4_dirty_inode(struct inode *inode)
5180 handle_t *current_handle = ext4_journal_current_handle(); 5615 handle_t *current_handle = ext4_journal_current_handle();
5181 handle_t *handle; 5616 handle_t *handle;
5182 5617
5183 if (!ext4_handle_valid(current_handle)) {
5184 ext4_mark_inode_dirty(current_handle, inode);
5185 return;
5186 }
5187
5188 handle = ext4_journal_start(inode, 2); 5618 handle = ext4_journal_start(inode, 2);
5189 if (IS_ERR(handle)) 5619 if (IS_ERR(handle))
5190 goto out; 5620 goto out;
5191 if (current_handle && 5621
5192 current_handle->h_transaction != handle->h_transaction) { 5622 jbd_debug(5, "marking dirty. outer handle=%p\n", current_handle);
5193 /* This task has a transaction open against a different fs */ 5623 ext4_mark_inode_dirty(handle, inode);
5194 printk(KERN_EMERG "%s: transactions do not match!\n", 5624
5195 __func__);
5196 } else {
5197 jbd_debug(5, "marking dirty. outer handle=%p\n",
5198 current_handle);
5199 ext4_mark_inode_dirty(handle, inode);
5200 }
5201 ext4_journal_stop(handle); 5625 ext4_journal_stop(handle);
5202out: 5626out:
5203 return; 5627 return;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e9c61896d605..bba12824defa 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2096,207 +2096,6 @@ out:
2096 return err; 2096 return err;
2097} 2097}
2098 2098
2099#ifdef EXT4_MB_HISTORY
2100struct ext4_mb_proc_session {
2101 struct ext4_mb_history *history;
2102 struct super_block *sb;
2103 int start;
2104 int max;
2105};
2106
2107static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
2108 struct ext4_mb_history *hs,
2109 int first)
2110{
2111 if (hs == s->history + s->max)
2112 hs = s->history;
2113 if (!first && hs == s->history + s->start)
2114 return NULL;
2115 while (hs->orig.fe_len == 0) {
2116 hs++;
2117 if (hs == s->history + s->max)
2118 hs = s->history;
2119 if (hs == s->history + s->start)
2120 return NULL;
2121 }
2122 return hs;
2123}
2124
2125static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2126{
2127 struct ext4_mb_proc_session *s = seq->private;
2128 struct ext4_mb_history *hs;
2129 int l = *pos;
2130
2131 if (l == 0)
2132 return SEQ_START_TOKEN;
2133 hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2134 if (!hs)
2135 return NULL;
2136 while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2137 return hs;
2138}
2139
2140static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
2141 loff_t *pos)
2142{
2143 struct ext4_mb_proc_session *s = seq->private;
2144 struct ext4_mb_history *hs = v;
2145
2146 ++*pos;
2147 if (v == SEQ_START_TOKEN)
2148 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2149 else
2150 return ext4_mb_history_skip_empty(s, ++hs, 0);
2151}
2152
2153static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
2154{
2155 char buf[25], buf2[25], buf3[25], *fmt;
2156 struct ext4_mb_history *hs = v;
2157
2158 if (v == SEQ_START_TOKEN) {
2159 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2160 "%-5s %-2s %-6s %-5s %-5s %-6s\n",
2161 "pid", "inode", "original", "goal", "result", "found",
2162 "grps", "cr", "flags", "merge", "tail", "broken");
2163 return 0;
2164 }
2165
2166 if (hs->op == EXT4_MB_HISTORY_ALLOC) {
2167 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2168 "0x%04x %-5s %-5u %-6u\n";
2169 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
2170 hs->result.fe_start, hs->result.fe_len,
2171 hs->result.fe_logical);
2172 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
2173 hs->orig.fe_start, hs->orig.fe_len,
2174 hs->orig.fe_logical);
2175 sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
2176 hs->goal.fe_start, hs->goal.fe_len,
2177 hs->goal.fe_logical);
2178 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2179 hs->found, hs->groups, hs->cr, hs->flags,
2180 hs->merged ? "M" : "", hs->tail,
2181 hs->buddy ? 1 << hs->buddy : 0);
2182 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
2183 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2184 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
2185 hs->result.fe_start, hs->result.fe_len,
2186 hs->result.fe_logical);
2187 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
2188 hs->orig.fe_start, hs->orig.fe_len,
2189 hs->orig.fe_logical);
2190 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2191 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
2192 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
2193 hs->result.fe_start, hs->result.fe_len);
2194 seq_printf(seq, "%-5u %-8u %-23s discard\n",
2195 hs->pid, hs->ino, buf2);
2196 } else if (hs->op == EXT4_MB_HISTORY_FREE) {
2197 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
2198 hs->result.fe_start, hs->result.fe_len);
2199 seq_printf(seq, "%-5u %-8u %-23s free\n",
2200 hs->pid, hs->ino, buf2);
2201 }
2202 return 0;
2203}
2204
2205static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
2206{
2207}
2208
2209static const struct seq_operations ext4_mb_seq_history_ops = {
2210 .start = ext4_mb_seq_history_start,
2211 .next = ext4_mb_seq_history_next,
2212 .stop = ext4_mb_seq_history_stop,
2213 .show = ext4_mb_seq_history_show,
2214};
2215
2216static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
2217{
2218 struct super_block *sb = PDE(inode)->data;
2219 struct ext4_sb_info *sbi = EXT4_SB(sb);
2220 struct ext4_mb_proc_session *s;
2221 int rc;
2222 int size;
2223
2224 if (unlikely(sbi->s_mb_history == NULL))
2225 return -ENOMEM;
2226 s = kmalloc(sizeof(*s), GFP_KERNEL);
2227 if (s == NULL)
2228 return -ENOMEM;
2229 s->sb = sb;
2230 size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
2231 s->history = kmalloc(size, GFP_KERNEL);
2232 if (s->history == NULL) {
2233 kfree(s);
2234 return -ENOMEM;
2235 }
2236
2237 spin_lock(&sbi->s_mb_history_lock);
2238 memcpy(s->history, sbi->s_mb_history, size);
2239 s->max = sbi->s_mb_history_max;
2240 s->start = sbi->s_mb_history_cur % s->max;
2241 spin_unlock(&sbi->s_mb_history_lock);
2242
2243 rc = seq_open(file, &ext4_mb_seq_history_ops);
2244 if (rc == 0) {
2245 struct seq_file *m = (struct seq_file *)file->private_data;
2246 m->private = s;
2247 } else {
2248 kfree(s->history);
2249 kfree(s);
2250 }
2251 return rc;
2252
2253}
2254
2255static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2256{
2257 struct seq_file *seq = (struct seq_file *)file->private_data;
2258 struct ext4_mb_proc_session *s = seq->private;
2259 kfree(s->history);
2260 kfree(s);
2261 return seq_release(inode, file);
2262}
2263
2264static ssize_t ext4_mb_seq_history_write(struct file *file,
2265 const char __user *buffer,
2266 size_t count, loff_t *ppos)
2267{
2268 struct seq_file *seq = (struct seq_file *)file->private_data;
2269 struct ext4_mb_proc_session *s = seq->private;
2270 struct super_block *sb = s->sb;
2271 char str[32];
2272 int value;
2273
2274 if (count >= sizeof(str)) {
2275 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2276 "mb_history", (int)sizeof(str));
2277 return -EOVERFLOW;
2278 }
2279
2280 if (copy_from_user(str, buffer, count))
2281 return -EFAULT;
2282
2283 value = simple_strtol(str, NULL, 0);
2284 if (value < 0)
2285 return -ERANGE;
2286 EXT4_SB(sb)->s_mb_history_filter = value;
2287
2288 return count;
2289}
2290
2291static const struct file_operations ext4_mb_seq_history_fops = {
2292 .owner = THIS_MODULE,
2293 .open = ext4_mb_seq_history_open,
2294 .read = seq_read,
2295 .write = ext4_mb_seq_history_write,
2296 .llseek = seq_lseek,
2297 .release = ext4_mb_seq_history_release,
2298};
2299
2300static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2099static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2301{ 2100{
2302 struct super_block *sb = seq->private; 2101 struct super_block *sb = seq->private;
@@ -2396,82 +2195,6 @@ static const struct file_operations ext4_mb_seq_groups_fops = {
2396 .release = seq_release, 2195 .release = seq_release,
2397}; 2196};
2398 2197
2399static void ext4_mb_history_release(struct super_block *sb)
2400{
2401 struct ext4_sb_info *sbi = EXT4_SB(sb);
2402
2403 if (sbi->s_proc != NULL) {
2404 remove_proc_entry("mb_groups", sbi->s_proc);
2405 if (sbi->s_mb_history_max)
2406 remove_proc_entry("mb_history", sbi->s_proc);
2407 }
2408 kfree(sbi->s_mb_history);
2409}
2410
2411static void ext4_mb_history_init(struct super_block *sb)
2412{
2413 struct ext4_sb_info *sbi = EXT4_SB(sb);
2414 int i;
2415
2416 if (sbi->s_proc != NULL) {
2417 if (sbi->s_mb_history_max)
2418 proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
2419 &ext4_mb_seq_history_fops, sb);
2420 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2421 &ext4_mb_seq_groups_fops, sb);
2422 }
2423
2424 sbi->s_mb_history_cur = 0;
2425 spin_lock_init(&sbi->s_mb_history_lock);
2426 i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2427 sbi->s_mb_history = i ? kzalloc(i, GFP_KERNEL) : NULL;
2428 /* if we can't allocate history, then we simple won't use it */
2429}
2430
2431static noinline_for_stack void
2432ext4_mb_store_history(struct ext4_allocation_context *ac)
2433{
2434 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2435 struct ext4_mb_history h;
2436
2437 if (sbi->s_mb_history == NULL)
2438 return;
2439
2440 if (!(ac->ac_op & sbi->s_mb_history_filter))
2441 return;
2442
2443 h.op = ac->ac_op;
2444 h.pid = current->pid;
2445 h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2446 h.orig = ac->ac_o_ex;
2447 h.result = ac->ac_b_ex;
2448 h.flags = ac->ac_flags;
2449 h.found = ac->ac_found;
2450 h.groups = ac->ac_groups_scanned;
2451 h.cr = ac->ac_criteria;
2452 h.tail = ac->ac_tail;
2453 h.buddy = ac->ac_buddy;
2454 h.merged = 0;
2455 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2456 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2457 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2458 h.merged = 1;
2459 h.goal = ac->ac_g_ex;
2460 h.result = ac->ac_f_ex;
2461 }
2462
2463 spin_lock(&sbi->s_mb_history_lock);
2464 memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2465 if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2466 sbi->s_mb_history_cur = 0;
2467 spin_unlock(&sbi->s_mb_history_lock);
2468}
2469
2470#else
2471#define ext4_mb_history_release(sb)
2472#define ext4_mb_history_init(sb)
2473#endif
2474
2475 2198
2476/* Create and initialize ext4_group_info data for the given group. */ 2199/* Create and initialize ext4_group_info data for the given group. */
2477int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2200int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
@@ -2690,7 +2413,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2690 sbi->s_mb_stats = MB_DEFAULT_STATS; 2413 sbi->s_mb_stats = MB_DEFAULT_STATS;
2691 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2414 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2692 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2415 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2693 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2694 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC; 2416 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2695 2417
2696 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2418 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
@@ -2708,12 +2430,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2708 spin_lock_init(&lg->lg_prealloc_lock); 2430 spin_lock_init(&lg->lg_prealloc_lock);
2709 } 2431 }
2710 2432
2711 ext4_mb_history_init(sb); 2433 if (sbi->s_proc)
2434 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2435 &ext4_mb_seq_groups_fops, sb);
2712 2436
2713 if (sbi->s_journal) 2437 if (sbi->s_journal)
2714 sbi->s_journal->j_commit_callback = release_blocks_on_commit; 2438 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2715
2716 printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
2717 return 0; 2439 return 0;
2718} 2440}
2719 2441
@@ -2790,7 +2512,8 @@ int ext4_mb_release(struct super_block *sb)
2790 } 2512 }
2791 2513
2792 free_percpu(sbi->s_locality_groups); 2514 free_percpu(sbi->s_locality_groups);
2793 ext4_mb_history_release(sb); 2515 if (sbi->s_proc)
2516 remove_proc_entry("mb_groups", sbi->s_proc);
2794 2517
2795 return 0; 2518 return 0;
2796} 2519}
@@ -3276,7 +2999,10 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3276 atomic_inc(&sbi->s_bal_breaks); 2999 atomic_inc(&sbi->s_bal_breaks);
3277 } 3000 }
3278 3001
3279 ext4_mb_store_history(ac); 3002 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3003 trace_ext4_mballoc_alloc(ac);
3004 else
3005 trace_ext4_mballoc_prealloc(ac);
3280} 3006}
3281 3007
3282/* 3008/*
@@ -3776,7 +3502,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3776 if (ac) { 3502 if (ac) {
3777 ac->ac_sb = sb; 3503 ac->ac_sb = sb;
3778 ac->ac_inode = pa->pa_inode; 3504 ac->ac_inode = pa->pa_inode;
3779 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3780 } 3505 }
3781 3506
3782 while (bit < end) { 3507 while (bit < end) {
@@ -3796,7 +3521,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3796 ac->ac_b_ex.fe_start = bit; 3521 ac->ac_b_ex.fe_start = bit;
3797 ac->ac_b_ex.fe_len = next - bit; 3522 ac->ac_b_ex.fe_len = next - bit;
3798 ac->ac_b_ex.fe_logical = 0; 3523 ac->ac_b_ex.fe_logical = 0;
3799 ext4_mb_store_history(ac); 3524 trace_ext4_mballoc_discard(ac);
3800 } 3525 }
3801 3526
3802 trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit, 3527 trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit,
@@ -3831,9 +3556,6 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3831 ext4_group_t group; 3556 ext4_group_t group;
3832 ext4_grpblk_t bit; 3557 ext4_grpblk_t bit;
3833 3558
3834 if (ac)
3835 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3836
3837 trace_ext4_mb_release_group_pa(ac, pa); 3559 trace_ext4_mb_release_group_pa(ac, pa);
3838 BUG_ON(pa->pa_deleted == 0); 3560 BUG_ON(pa->pa_deleted == 0);
3839 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3561 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
@@ -3848,7 +3570,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3848 ac->ac_b_ex.fe_start = bit; 3570 ac->ac_b_ex.fe_start = bit;
3849 ac->ac_b_ex.fe_len = pa->pa_len; 3571 ac->ac_b_ex.fe_len = pa->pa_len;
3850 ac->ac_b_ex.fe_logical = 0; 3572 ac->ac_b_ex.fe_logical = 0;
3851 ext4_mb_store_history(ac); 3573 trace_ext4_mballoc_discard(ac);
3852 } 3574 }
3853 3575
3854 return 0; 3576 return 0;
@@ -4189,7 +3911,6 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4189 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; 3911 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4190 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 3912 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4191 >> bsbits; 3913 >> bsbits;
4192 size = max(size, isize);
4193 3914
4194 if ((size == isize) && 3915 if ((size == isize) &&
4195 !ext4_fs_is_busy(sbi) && 3916 !ext4_fs_is_busy(sbi) &&
@@ -4199,6 +3920,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4199 } 3920 }
4200 3921
4201 /* don't use group allocation for large files */ 3922 /* don't use group allocation for large files */
3923 size = max(size, isize);
4202 if (size >= sbi->s_mb_stream_request) { 3924 if (size >= sbi->s_mb_stream_request) {
4203 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 3925 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4204 return; 3926 return;
@@ -4739,7 +4461,6 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4739 4461
4740 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4462 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4741 if (ac) { 4463 if (ac) {
4742 ac->ac_op = EXT4_MB_HISTORY_FREE;
4743 ac->ac_inode = inode; 4464 ac->ac_inode = inode;
4744 ac->ac_sb = sb; 4465 ac->ac_sb = sb;
4745 } 4466 }
@@ -4806,7 +4527,7 @@ do_more:
4806 ac->ac_b_ex.fe_group = block_group; 4527 ac->ac_b_ex.fe_group = block_group;
4807 ac->ac_b_ex.fe_start = bit; 4528 ac->ac_b_ex.fe_start = bit;
4808 ac->ac_b_ex.fe_len = count; 4529 ac->ac_b_ex.fe_len = count;
4809 ext4_mb_store_history(ac); 4530 trace_ext4_mballoc_free(ac);
4810 } 4531 }
4811 4532
4812 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4533 err = ext4_mb_load_buddy(sb, block_group, &e4b);
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 188d3d709b24..0ca811061bc7 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -52,18 +52,8 @@ extern u8 mb_enable_debug;
52#define mb_debug(n, fmt, a...) 52#define mb_debug(n, fmt, a...)
53#endif 53#endif
54 54
55/*
56 * with EXT4_MB_HISTORY mballoc stores last N allocations in memory
57 * and you can monitor it in /proc/fs/ext4/<dev>/mb_history
58 */
59#define EXT4_MB_HISTORY
60#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */ 55#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
61#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */ 56#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
62#define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */
63#define EXT4_MB_HISTORY_FREE 8 /* free */
64
65#define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \
66 EXT4_MB_HISTORY_PREALLOC)
67 57
68/* 58/*
69 * How long mballoc can look for a best extent (in found extents) 59 * How long mballoc can look for a best extent (in found extents)
@@ -84,7 +74,7 @@ extern u8 mb_enable_debug;
84 * with 'ext4_mb_stats' allocator will collect stats that will be 74 * with 'ext4_mb_stats' allocator will collect stats that will be
85 * shown at umount. The collecting costs though! 75 * shown at umount. The collecting costs though!
86 */ 76 */
87#define MB_DEFAULT_STATS 1 77#define MB_DEFAULT_STATS 0
88 78
89/* 79/*
90 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served 80 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
@@ -217,22 +207,6 @@ struct ext4_allocation_context {
217#define AC_STATUS_FOUND 2 207#define AC_STATUS_FOUND 2
218#define AC_STATUS_BREAK 3 208#define AC_STATUS_BREAK 3
219 209
220struct ext4_mb_history {
221 struct ext4_free_extent orig; /* orig allocation */
222 struct ext4_free_extent goal; /* goal allocation */
223 struct ext4_free_extent result; /* result allocation */
224 unsigned pid;
225 unsigned ino;
226 __u16 found; /* how many extents have been found */
227 __u16 groups; /* how many groups have been scanned */
228 __u16 tail; /* what tail broke some buddy */
229 __u16 buddy; /* buddy the tail ^^^ broke */
230 __u16 flags;
231 __u8 cr:3; /* which phase the result extent was found at */
232 __u8 op:4;
233 __u8 merged:1;
234};
235
236struct ext4_buddy { 210struct ext4_buddy {
237 struct page *bd_buddy_page; 211 struct page *bd_buddy_page;
238 void *bd_buddy; 212 void *bd_buddy;
@@ -247,13 +221,6 @@ struct ext4_buddy {
247#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) 221#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
248#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) 222#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
249 223
250#ifndef EXT4_MB_HISTORY
251static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
252{
253 return;
254}
255#endif
256
257#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 224#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
258 225
259static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, 226static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index bf519f239ae6..a93d5b80f3e2 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -75,7 +75,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
75 goto err_out; 75 goto err_out;
76 } 76 }
77 } 77 }
78 retval = ext4_ext_insert_extent(handle, inode, path, &newext); 78 retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
79err_out: 79err_out:
80 if (path) { 80 if (path) {
81 ext4_ext_drop_refs(path); 81 ext4_ext_drop_refs(path);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index c07a2915e40b..25b6b1457360 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -322,7 +322,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
322 goto out; 322 goto out;
323 323
324 if (ext4_ext_insert_extent(handle, orig_inode, 324 if (ext4_ext_insert_extent(handle, orig_inode,
325 orig_path, new_ext)) 325 orig_path, new_ext, 0))
326 goto out; 326 goto out;
327 } 327 }
328 328
@@ -333,7 +333,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
333 goto out; 333 goto out;
334 334
335 if (ext4_ext_insert_extent(handle, orig_inode, 335 if (ext4_ext_insert_extent(handle, orig_inode,
336 orig_path, end_ext)) 336 orig_path, end_ext, 0))
337 goto out; 337 goto out;
338 } 338 }
339out: 339out:
@@ -1001,14 +1001,6 @@ mext_check_arguments(struct inode *orig_inode,
1001 return -EINVAL; 1001 return -EINVAL;
1002 } 1002 }
1003 1003
1004 /* orig and donor should be different file */
1005 if (orig_inode->i_ino == donor_inode->i_ino) {
1006 ext4_debug("ext4 move extent: The argument files should not "
1007 "be same file [ino:orig %lu, donor %lu]\n",
1008 orig_inode->i_ino, donor_inode->i_ino);
1009 return -EINVAL;
1010 }
1011
1012 /* Ext4 move extent supports only extent based file */ 1004 /* Ext4 move extent supports only extent based file */
1013 if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) { 1005 if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) {
1014 ext4_debug("ext4 move extent: orig file is not extents " 1006 ext4_debug("ext4 move extent: orig file is not extents "
@@ -1232,6 +1224,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1232 int block_len_in_page; 1224 int block_len_in_page;
1233 int uninit; 1225 int uninit;
1234 1226
1227 /* orig and donor should be different file */
1228 if (orig_inode->i_ino == donor_inode->i_ino) {
1229 ext4_debug("ext4 move extent: The argument files should not "
1230 "be same file [ino:orig %lu, donor %lu]\n",
1231 orig_inode->i_ino, donor_inode->i_ino);
1232 return -EINVAL;
1233 }
1234
1235 /* protect orig and donor against a truncate */ 1235 /* protect orig and donor against a truncate */
1236 ret1 = mext_inode_double_lock(orig_inode, donor_inode); 1236 ret1 = mext_inode_double_lock(orig_inode, donor_inode);
1237 if (ret1 < 0) 1237 if (ret1 < 0)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 42f81d285cd5..7c8fe80bacdd 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2076,7 +2076,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
2076 struct ext4_iloc iloc; 2076 struct ext4_iloc iloc;
2077 int err = 0; 2077 int err = 0;
2078 2078
2079 if (!ext4_handle_valid(handle)) 2079 /* ext4_handle_valid() assumes a valid handle_t pointer */
2080 if (handle && !ext4_handle_valid(handle))
2080 return 0; 2081 return 0;
2081 2082
2082 mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock); 2083 mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index df539ba27779..12e726a7073f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -50,13 +50,6 @@
50#define CREATE_TRACE_POINTS 50#define CREATE_TRACE_POINTS
51#include <trace/events/ext4.h> 51#include <trace/events/ext4.h>
52 52
53static int default_mb_history_length = 1000;
54
55module_param_named(default_mb_history_length, default_mb_history_length,
56 int, 0644);
57MODULE_PARM_DESC(default_mb_history_length,
58 "Default number of entries saved for mb_history");
59
60struct proc_dir_entry *ext4_proc_root; 53struct proc_dir_entry *ext4_proc_root;
61static struct kset *ext4_kset; 54static struct kset *ext4_kset;
62 55
@@ -189,6 +182,36 @@ void ext4_itable_unused_set(struct super_block *sb,
189 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 182 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
190} 183}
191 184
185
186/* Just increment the non-pointer handle value */
187static handle_t *ext4_get_nojournal(void)
188{
189 handle_t *handle = current->journal_info;
190 unsigned long ref_cnt = (unsigned long)handle;
191
192 BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
193
194 ref_cnt++;
195 handle = (handle_t *)ref_cnt;
196
197 current->journal_info = handle;
198 return handle;
199}
200
201
202/* Decrement the non-pointer handle value */
203static void ext4_put_nojournal(handle_t *handle)
204{
205 unsigned long ref_cnt = (unsigned long)handle;
206
207 BUG_ON(ref_cnt == 0);
208
209 ref_cnt--;
210 handle = (handle_t *)ref_cnt;
211
212 current->journal_info = handle;
213}
214
192/* 215/*
193 * Wrappers for jbd2_journal_start/end. 216 * Wrappers for jbd2_journal_start/end.
194 * 217 *
@@ -215,11 +238,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
215 } 238 }
216 return jbd2_journal_start(journal, nblocks); 239 return jbd2_journal_start(journal, nblocks);
217 } 240 }
218 /* 241 return ext4_get_nojournal();
219 * We're not journaling, return the appropriate indication.
220 */
221 current->journal_info = EXT4_NOJOURNAL_HANDLE;
222 return current->journal_info;
223} 242}
224 243
225/* 244/*
@@ -235,11 +254,7 @@ int __ext4_journal_stop(const char *where, handle_t *handle)
235 int rc; 254 int rc;
236 255
237 if (!ext4_handle_valid(handle)) { 256 if (!ext4_handle_valid(handle)) {
238 /* 257 ext4_put_nojournal(handle);
239 * Do this here since we don't call jbd2_journal_stop() in
240 * no-journal mode.
241 */
242 current->journal_info = NULL;
243 return 0; 258 return 0;
244 } 259 }
245 sb = handle->h_transaction->t_journal->j_private; 260 sb = handle->h_transaction->t_journal->j_private;
@@ -580,6 +595,9 @@ static void ext4_put_super(struct super_block *sb)
580 struct ext4_super_block *es = sbi->s_es; 595 struct ext4_super_block *es = sbi->s_es;
581 int i, err; 596 int i, err;
582 597
598 flush_workqueue(sbi->dio_unwritten_wq);
599 destroy_workqueue(sbi->dio_unwritten_wq);
600
583 lock_super(sb); 601 lock_super(sb);
584 lock_kernel(); 602 lock_kernel();
585 if (sb->s_dirt) 603 if (sb->s_dirt)
@@ -684,6 +702,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
684 ei->i_allocated_meta_blocks = 0; 702 ei->i_allocated_meta_blocks = 0;
685 ei->i_delalloc_reserved_flag = 0; 703 ei->i_delalloc_reserved_flag = 0;
686 spin_lock_init(&(ei->i_block_reservation_lock)); 704 spin_lock_init(&(ei->i_block_reservation_lock));
705 INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
706 ei->cur_aio_dio = NULL;
687 707
688 return &ei->vfs_inode; 708 return &ei->vfs_inode;
689} 709}
@@ -1052,7 +1072,7 @@ enum {
1052 Opt_journal_update, Opt_journal_dev, 1072 Opt_journal_update, Opt_journal_dev,
1053 Opt_journal_checksum, Opt_journal_async_commit, 1073 Opt_journal_checksum, Opt_journal_async_commit,
1054 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 1074 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1055 Opt_data_err_abort, Opt_data_err_ignore, Opt_mb_history_length, 1075 Opt_data_err_abort, Opt_data_err_ignore,
1056 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 1076 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1057 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota, 1077 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
1058 Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, 1078 Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
@@ -1099,7 +1119,6 @@ static const match_table_t tokens = {
1099 {Opt_data_writeback, "data=writeback"}, 1119 {Opt_data_writeback, "data=writeback"},
1100 {Opt_data_err_abort, "data_err=abort"}, 1120 {Opt_data_err_abort, "data_err=abort"},
1101 {Opt_data_err_ignore, "data_err=ignore"}, 1121 {Opt_data_err_ignore, "data_err=ignore"},
1102 {Opt_mb_history_length, "mb_history_length=%u"},
1103 {Opt_offusrjquota, "usrjquota="}, 1122 {Opt_offusrjquota, "usrjquota="},
1104 {Opt_usrjquota, "usrjquota=%s"}, 1123 {Opt_usrjquota, "usrjquota=%s"},
1105 {Opt_offgrpjquota, "grpjquota="}, 1124 {Opt_offgrpjquota, "grpjquota="},
@@ -1340,13 +1359,6 @@ static int parse_options(char *options, struct super_block *sb,
1340 case Opt_data_err_ignore: 1359 case Opt_data_err_ignore:
1341 clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); 1360 clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
1342 break; 1361 break;
1343 case Opt_mb_history_length:
1344 if (match_int(&args[0], &option))
1345 return 0;
1346 if (option < 0)
1347 return 0;
1348 sbi->s_mb_history_max = option;
1349 break;
1350#ifdef CONFIG_QUOTA 1362#ifdef CONFIG_QUOTA
1351 case Opt_usrjquota: 1363 case Opt_usrjquota:
1352 qtype = USRQUOTA; 1364 qtype = USRQUOTA;
@@ -1646,13 +1658,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1646 EXT4_INODES_PER_GROUP(sb), 1658 EXT4_INODES_PER_GROUP(sb),
1647 sbi->s_mount_opt); 1659 sbi->s_mount_opt);
1648 1660
1649 if (EXT4_SB(sb)->s_journal) {
1650 ext4_msg(sb, KERN_INFO, "%s journal on %s",
1651 EXT4_SB(sb)->s_journal->j_inode ? "internal" :
1652 "external", EXT4_SB(sb)->s_journal->j_devname);
1653 } else {
1654 ext4_msg(sb, KERN_INFO, "no journal");
1655 }
1656 return res; 1661 return res;
1657} 1662}
1658 1663
@@ -2197,6 +2202,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
2197EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); 2202EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
2198EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); 2203EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
2199EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); 2204EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
2205EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
2200 2206
2201static struct attribute *ext4_attrs[] = { 2207static struct attribute *ext4_attrs[] = {
2202 ATTR_LIST(delayed_allocation_blocks), 2208 ATTR_LIST(delayed_allocation_blocks),
@@ -2210,6 +2216,7 @@ static struct attribute *ext4_attrs[] = {
2210 ATTR_LIST(mb_order2_req), 2216 ATTR_LIST(mb_order2_req),
2211 ATTR_LIST(mb_stream_req), 2217 ATTR_LIST(mb_stream_req),
2212 ATTR_LIST(mb_group_prealloc), 2218 ATTR_LIST(mb_group_prealloc),
2219 ATTR_LIST(max_writeback_mb_bump),
2213 NULL, 2220 NULL,
2214}; 2221};
2215 2222
@@ -2413,7 +2420,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2413 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 2420 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
2414 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 2421 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
2415 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 2422 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
2416 sbi->s_mb_history_max = default_mb_history_length;
2417 2423
2418 set_opt(sbi->s_mount_opt, BARRIER); 2424 set_opt(sbi->s_mount_opt, BARRIER);
2419 2425
@@ -2679,6 +2685,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2679 } 2685 }
2680 2686
2681 sbi->s_stripe = ext4_get_stripe_size(sbi); 2687 sbi->s_stripe = ext4_get_stripe_size(sbi);
2688 sbi->s_max_writeback_mb_bump = 128;
2682 2689
2683 /* 2690 /*
2684 * set up enough so that it can read an inode 2691 * set up enough so that it can read an inode
@@ -2798,6 +2805,12 @@ no_journal:
2798 clear_opt(sbi->s_mount_opt, NOBH); 2805 clear_opt(sbi->s_mount_opt, NOBH);
2799 } 2806 }
2800 } 2807 }
2808 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
2809 if (!EXT4_SB(sb)->dio_unwritten_wq) {
2810 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
2811 goto failed_mount_wq;
2812 }
2813
2801 /* 2814 /*
2802 * The jbd2_journal_load will have done any necessary log recovery, 2815 * The jbd2_journal_load will have done any necessary log recovery,
2803 * so we can safely mount the rest of the filesystem now. 2816 * so we can safely mount the rest of the filesystem now.
@@ -2849,12 +2862,12 @@ no_journal:
2849 "available"); 2862 "available");
2850 } 2863 }
2851 2864
2852 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 2865 if (test_opt(sb, DELALLOC) &&
2866 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
2853 ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - " 2867 ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
2854 "requested data journaling mode"); 2868 "requested data journaling mode");
2855 clear_opt(sbi->s_mount_opt, DELALLOC); 2869 clear_opt(sbi->s_mount_opt, DELALLOC);
2856 } else if (test_opt(sb, DELALLOC)) 2870 }
2857 ext4_msg(sb, KERN_INFO, "delayed allocation enabled");
2858 2871
2859 err = ext4_setup_system_zone(sb); 2872 err = ext4_setup_system_zone(sb);
2860 if (err) { 2873 if (err) {
@@ -2910,6 +2923,8 @@ cantfind_ext4:
2910 2923
2911failed_mount4: 2924failed_mount4:
2912 ext4_msg(sb, KERN_ERR, "mount failed"); 2925 ext4_msg(sb, KERN_ERR, "mount failed");
2926 destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
2927failed_mount_wq:
2913 ext4_release_system_zone(sb); 2928 ext4_release_system_zone(sb);
2914 if (sbi->s_journal) { 2929 if (sbi->s_journal) {
2915 jbd2_journal_destroy(sbi->s_journal); 2930 jbd2_journal_destroy(sbi->s_journal);
@@ -3164,9 +3179,7 @@ static int ext4_load_journal(struct super_block *sb,
3164 return -EINVAL; 3179 return -EINVAL;
3165 } 3180 }
3166 3181
3167 if (journal->j_flags & JBD2_BARRIER) 3182 if (!(journal->j_flags & JBD2_BARRIER))
3168 ext4_msg(sb, KERN_INFO, "barriers enabled");
3169 else
3170 ext4_msg(sb, KERN_INFO, "barriers disabled"); 3183 ext4_msg(sb, KERN_INFO, "barriers disabled");
3171 3184
3172 if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { 3185 if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
@@ -3361,11 +3374,13 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
3361{ 3374{
3362 int ret = 0; 3375 int ret = 0;
3363 tid_t target; 3376 tid_t target;
3377 struct ext4_sb_info *sbi = EXT4_SB(sb);
3364 3378
3365 trace_ext4_sync_fs(sb, wait); 3379 trace_ext4_sync_fs(sb, wait);
3366 if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) { 3380 flush_workqueue(sbi->dio_unwritten_wq);
3381 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
3367 if (wait) 3382 if (wait)
3368 jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target); 3383 jbd2_log_wait_commit(sbi->s_journal, target);
3369 } 3384 }
3370 return ret; 3385 return ret;
3371} 3386}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index adb0e72a176d..7db0979c6b72 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -323,7 +323,7 @@ extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
323/* fat/misc.c */ 323/* fat/misc.c */
324extern void fat_fs_error(struct super_block *s, const char *fmt, ...) 324extern void fat_fs_error(struct super_block *s, const char *fmt, ...)
325 __attribute__ ((format (printf, 2, 3))) __cold; 325 __attribute__ ((format (printf, 2, 3))) __cold;
326extern void fat_clusters_flush(struct super_block *sb); 326extern int fat_clusters_flush(struct super_block *sb);
327extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); 327extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
328extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts, 328extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
329 __le16 __time, __le16 __date, u8 time_cs); 329 __le16 __time, __le16 __date, u8 time_cs);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 04629d1302fc..76b7961ab663 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -451,12 +451,16 @@ static void fat_write_super(struct super_block *sb)
451 451
452static int fat_sync_fs(struct super_block *sb, int wait) 452static int fat_sync_fs(struct super_block *sb, int wait)
453{ 453{
454 lock_super(sb); 454 int err = 0;
455 fat_clusters_flush(sb);
456 sb->s_dirt = 0;
457 unlock_super(sb);
458 455
459 return 0; 456 if (sb->s_dirt) {
457 lock_super(sb);
458 sb->s_dirt = 0;
459 err = fat_clusters_flush(sb);
460 unlock_super(sb);
461 }
462
463 return err;
460} 464}
461 465
462static void fat_put_super(struct super_block *sb) 466static void fat_put_super(struct super_block *sb)
@@ -812,7 +816,7 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
812 seq_puts(m, ",shortname=mixed"); 816 seq_puts(m, ",shortname=mixed");
813 break; 817 break;
814 case VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95: 818 case VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95:
815 /* seq_puts(m, ",shortname=lower"); */ 819 seq_puts(m, ",shortname=lower");
816 break; 820 break;
817 default: 821 default:
818 seq_puts(m, ",shortname=unknown"); 822 seq_puts(m, ",shortname=unknown");
@@ -963,7 +967,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
963 opts->codepage = fat_default_codepage; 967 opts->codepage = fat_default_codepage;
964 opts->iocharset = fat_default_iocharset; 968 opts->iocharset = fat_default_iocharset;
965 if (is_vfat) { 969 if (is_vfat) {
966 opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95; 970 opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
967 opts->rodir = 0; 971 opts->rodir = 0;
968 } else { 972 } else {
969 opts->shortname = 0; 973 opts->shortname = 0;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 4e35be873e09..0f55f5cb732f 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -43,19 +43,19 @@ EXPORT_SYMBOL_GPL(fat_fs_error);
43 43
44/* Flushes the number of free clusters on FAT32 */ 44/* Flushes the number of free clusters on FAT32 */
45/* XXX: Need to write one per FSINFO block. Currently only writes 1 */ 45/* XXX: Need to write one per FSINFO block. Currently only writes 1 */
46void fat_clusters_flush(struct super_block *sb) 46int fat_clusters_flush(struct super_block *sb)
47{ 47{
48 struct msdos_sb_info *sbi = MSDOS_SB(sb); 48 struct msdos_sb_info *sbi = MSDOS_SB(sb);
49 struct buffer_head *bh; 49 struct buffer_head *bh;
50 struct fat_boot_fsinfo *fsinfo; 50 struct fat_boot_fsinfo *fsinfo;
51 51
52 if (sbi->fat_bits != 32) 52 if (sbi->fat_bits != 32)
53 return; 53 return 0;
54 54
55 bh = sb_bread(sb, sbi->fsinfo_sector); 55 bh = sb_bread(sb, sbi->fsinfo_sector);
56 if (bh == NULL) { 56 if (bh == NULL) {
57 printk(KERN_ERR "FAT: bread failed in fat_clusters_flush\n"); 57 printk(KERN_ERR "FAT: bread failed in fat_clusters_flush\n");
58 return; 58 return -EIO;
59 } 59 }
60 60
61 fsinfo = (struct fat_boot_fsinfo *)bh->b_data; 61 fsinfo = (struct fat_boot_fsinfo *)bh->b_data;
@@ -74,6 +74,8 @@ void fat_clusters_flush(struct super_block *sb)
74 mark_buffer_dirty(bh); 74 mark_buffer_dirty(bh);
75 } 75 }
76 brelse(bh); 76 brelse(bh);
77
78 return 0;
77} 79}
78 80
79/* 81/*
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index cb6e83557112..f565f24019b5 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -499,17 +499,10 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
499 int charlen; 499 int charlen;
500 500
501 if (utf8) { 501 if (utf8) {
502 int name_len = strlen(name); 502 *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
503 503 if (*outlen < 0)
504 *outlen = utf8s_to_utf16s(name, PATH_MAX, (wchar_t *) outname); 504 return *outlen;
505 505 else if (*outlen > 255)
506 /*
507 * We stripped '.'s before and set len appropriately,
508 * but utf8s_to_utf16s doesn't care about len
509 */
510 *outlen -= (name_len - len);
511
512 if (*outlen > 255)
513 return -ENAMETOOLONG; 506 return -ENAMETOOLONG;
514 507
515 op = &outname[*outlen * sizeof(wchar_t)]; 508 op = &outname[*outlen * sizeof(wchar_t)];
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index cbc464043b6f..a3492f7d207c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1313,7 +1313,7 @@ static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1313 return 0; 1313 return 0;
1314} 1314}
1315 1315
1316static struct vm_operations_struct fuse_file_vm_ops = { 1316static const struct vm_operations_struct fuse_file_vm_ops = {
1317 .close = fuse_vma_close, 1317 .close = fuse_vma_close,
1318 .fault = filemap_fault, 1318 .fault = filemap_fault,
1319 .page_mkwrite = fuse_page_mkwrite, 1319 .page_mkwrite = fuse_page_mkwrite,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 166f38fbd246..4eb308aa3234 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -418,7 +418,7 @@ out:
418 return ret; 418 return ret;
419} 419}
420 420
421static struct vm_operations_struct gfs2_vm_ops = { 421static const struct vm_operations_struct gfs2_vm_ops = {
422 .fault = filemap_fault, 422 .fault = filemap_fault,
423 .page_mkwrite = gfs2_page_mkwrite, 423 .page_mkwrite = gfs2_page_mkwrite,
424}; 424};
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 5d70b3e6d49b..ca0f5eb62b20 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -643,6 +643,7 @@ out:
643 643
644int __jbd2_journal_remove_checkpoint(struct journal_head *jh) 644int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
645{ 645{
646 struct transaction_chp_stats_s *stats;
646 transaction_t *transaction; 647 transaction_t *transaction;
647 journal_t *journal; 648 journal_t *journal;
648 int ret = 0; 649 int ret = 0;
@@ -679,6 +680,12 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
679 680
680 /* OK, that was the last buffer for the transaction: we can now 681 /* OK, that was the last buffer for the transaction: we can now
681 safely remove this transaction from the log */ 682 safely remove this transaction from the log */
683 stats = &transaction->t_chp_stats;
684 if (stats->cs_chp_time)
685 stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time,
686 jiffies);
687 trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev,
688 transaction->t_tid, stats);
682 689
683 __jbd2_journal_drop_transaction(journal, transaction); 690 __jbd2_journal_drop_transaction(journal, transaction);
684 kfree(transaction); 691 kfree(transaction);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 26d991ddc1e6..d4cfd6d2779e 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -410,10 +410,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
410 if (commit_transaction->t_synchronous_commit) 410 if (commit_transaction->t_synchronous_commit)
411 write_op = WRITE_SYNC_PLUG; 411 write_op = WRITE_SYNC_PLUG;
412 trace_jbd2_commit_locking(journal, commit_transaction); 412 trace_jbd2_commit_locking(journal, commit_transaction);
413 stats.u.run.rs_wait = commit_transaction->t_max_wait; 413 stats.run.rs_wait = commit_transaction->t_max_wait;
414 stats.u.run.rs_locked = jiffies; 414 stats.run.rs_locked = jiffies;
415 stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start, 415 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
416 stats.u.run.rs_locked); 416 stats.run.rs_locked);
417 417
418 spin_lock(&commit_transaction->t_handle_lock); 418 spin_lock(&commit_transaction->t_handle_lock);
419 while (commit_transaction->t_updates) { 419 while (commit_transaction->t_updates) {
@@ -486,9 +486,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
486 jbd2_journal_switch_revoke_table(journal); 486 jbd2_journal_switch_revoke_table(journal);
487 487
488 trace_jbd2_commit_flushing(journal, commit_transaction); 488 trace_jbd2_commit_flushing(journal, commit_transaction);
489 stats.u.run.rs_flushing = jiffies; 489 stats.run.rs_flushing = jiffies;
490 stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked, 490 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
491 stats.u.run.rs_flushing); 491 stats.run.rs_flushing);
492 492
493 commit_transaction->t_state = T_FLUSH; 493 commit_transaction->t_state = T_FLUSH;
494 journal->j_committing_transaction = commit_transaction; 494 journal->j_committing_transaction = commit_transaction;
@@ -523,11 +523,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
523 spin_unlock(&journal->j_state_lock); 523 spin_unlock(&journal->j_state_lock);
524 524
525 trace_jbd2_commit_logging(journal, commit_transaction); 525 trace_jbd2_commit_logging(journal, commit_transaction);
526 stats.u.run.rs_logging = jiffies; 526 stats.run.rs_logging = jiffies;
527 stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing, 527 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
528 stats.u.run.rs_logging); 528 stats.run.rs_logging);
529 stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits; 529 stats.run.rs_blocks = commit_transaction->t_outstanding_credits;
530 stats.u.run.rs_blocks_logged = 0; 530 stats.run.rs_blocks_logged = 0;
531 531
532 J_ASSERT(commit_transaction->t_nr_buffers <= 532 J_ASSERT(commit_transaction->t_nr_buffers <=
533 commit_transaction->t_outstanding_credits); 533 commit_transaction->t_outstanding_credits);
@@ -695,7 +695,7 @@ start_journal_io:
695 submit_bh(write_op, bh); 695 submit_bh(write_op, bh);
696 } 696 }
697 cond_resched(); 697 cond_resched();
698 stats.u.run.rs_blocks_logged += bufs; 698 stats.run.rs_blocks_logged += bufs;
699 699
700 /* Force a new descriptor to be generated next 700 /* Force a new descriptor to be generated next
701 time round the loop. */ 701 time round the loop. */
@@ -988,33 +988,30 @@ restart_loop:
988 J_ASSERT(commit_transaction->t_state == T_COMMIT); 988 J_ASSERT(commit_transaction->t_state == T_COMMIT);
989 989
990 commit_transaction->t_start = jiffies; 990 commit_transaction->t_start = jiffies;
991 stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging, 991 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
992 commit_transaction->t_start); 992 commit_transaction->t_start);
993 993
994 /* 994 /*
995 * File the transaction for history 995 * File the transaction statistics
996 */ 996 */
997 stats.ts_type = JBD2_STATS_RUN;
998 stats.ts_tid = commit_transaction->t_tid; 997 stats.ts_tid = commit_transaction->t_tid;
999 stats.u.run.rs_handle_count = commit_transaction->t_handle_count; 998 stats.run.rs_handle_count = commit_transaction->t_handle_count;
1000 spin_lock(&journal->j_history_lock); 999 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1001 memcpy(journal->j_history + journal->j_history_cur, &stats, 1000 commit_transaction->t_tid, &stats.run);
1002 sizeof(stats));
1003 if (++journal->j_history_cur == journal->j_history_max)
1004 journal->j_history_cur = 0;
1005 1001
1006 /* 1002 /*
1007 * Calculate overall stats 1003 * Calculate overall stats
1008 */ 1004 */
1005 spin_lock(&journal->j_history_lock);
1009 journal->j_stats.ts_tid++; 1006 journal->j_stats.ts_tid++;
1010 journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait; 1007 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1011 journal->j_stats.u.run.rs_running += stats.u.run.rs_running; 1008 journal->j_stats.run.rs_running += stats.run.rs_running;
1012 journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked; 1009 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1013 journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing; 1010 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1014 journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging; 1011 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1015 journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count; 1012 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1016 journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks; 1013 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1017 journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged; 1014 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1018 spin_unlock(&journal->j_history_lock); 1015 spin_unlock(&journal->j_history_lock);
1019 1016
1020 commit_transaction->t_state = T_FINISHED; 1017 commit_transaction->t_state = T_FINISHED;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 53b86e16e5fe..b0ab5219becb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -136,10 +136,6 @@ static int kjournald2(void *arg)
136 journal->j_task = current; 136 journal->j_task = current;
137 wake_up(&journal->j_wait_done_commit); 137 wake_up(&journal->j_wait_done_commit);
138 138
139 printk(KERN_INFO "kjournald2 starting: pid %d, dev %s, "
140 "commit interval %ld seconds\n", current->pid,
141 journal->j_devname, journal->j_commit_interval / HZ);
142
143 /* 139 /*
144 * And now, wait forever for commit wakeup events. 140 * And now, wait forever for commit wakeup events.
145 */ 141 */
@@ -223,7 +219,8 @@ static int jbd2_journal_start_thread(journal_t *journal)
223{ 219{
224 struct task_struct *t; 220 struct task_struct *t;
225 221
226 t = kthread_run(kjournald2, journal, "kjournald2"); 222 t = kthread_run(kjournald2, journal, "jbd2/%s",
223 journal->j_devname);
227 if (IS_ERR(t)) 224 if (IS_ERR(t))
228 return PTR_ERR(t); 225 return PTR_ERR(t);
229 226
@@ -679,153 +676,6 @@ struct jbd2_stats_proc_session {
679 int max; 676 int max;
680}; 677};
681 678
682static void *jbd2_history_skip_empty(struct jbd2_stats_proc_session *s,
683 struct transaction_stats_s *ts,
684 int first)
685{
686 if (ts == s->stats + s->max)
687 ts = s->stats;
688 if (!first && ts == s->stats + s->start)
689 return NULL;
690 while (ts->ts_type == 0) {
691 ts++;
692 if (ts == s->stats + s->max)
693 ts = s->stats;
694 if (ts == s->stats + s->start)
695 return NULL;
696 }
697 return ts;
698
699}
700
701static void *jbd2_seq_history_start(struct seq_file *seq, loff_t *pos)
702{
703 struct jbd2_stats_proc_session *s = seq->private;
704 struct transaction_stats_s *ts;
705 int l = *pos;
706
707 if (l == 0)
708 return SEQ_START_TOKEN;
709 ts = jbd2_history_skip_empty(s, s->stats + s->start, 1);
710 if (!ts)
711 return NULL;
712 l--;
713 while (l) {
714 ts = jbd2_history_skip_empty(s, ++ts, 0);
715 if (!ts)
716 break;
717 l--;
718 }
719 return ts;
720}
721
722static void *jbd2_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
723{
724 struct jbd2_stats_proc_session *s = seq->private;
725 struct transaction_stats_s *ts = v;
726
727 ++*pos;
728 if (v == SEQ_START_TOKEN)
729 return jbd2_history_skip_empty(s, s->stats + s->start, 1);
730 else
731 return jbd2_history_skip_empty(s, ++ts, 0);
732}
733
734static int jbd2_seq_history_show(struct seq_file *seq, void *v)
735{
736 struct transaction_stats_s *ts = v;
737 if (v == SEQ_START_TOKEN) {
738 seq_printf(seq, "%-4s %-5s %-5s %-5s %-5s %-5s %-5s %-6s %-5s "
739 "%-5s %-5s %-5s %-5s %-5s\n", "R/C", "tid",
740 "wait", "run", "lock", "flush", "log", "hndls",
741 "block", "inlog", "ctime", "write", "drop",
742 "close");
743 return 0;
744 }
745 if (ts->ts_type == JBD2_STATS_RUN)
746 seq_printf(seq, "%-4s %-5lu %-5u %-5u %-5u %-5u %-5u "
747 "%-6lu %-5lu %-5lu\n", "R", ts->ts_tid,
748 jiffies_to_msecs(ts->u.run.rs_wait),
749 jiffies_to_msecs(ts->u.run.rs_running),
750 jiffies_to_msecs(ts->u.run.rs_locked),
751 jiffies_to_msecs(ts->u.run.rs_flushing),
752 jiffies_to_msecs(ts->u.run.rs_logging),
753 ts->u.run.rs_handle_count,
754 ts->u.run.rs_blocks,
755 ts->u.run.rs_blocks_logged);
756 else if (ts->ts_type == JBD2_STATS_CHECKPOINT)
757 seq_printf(seq, "%-4s %-5lu %48s %-5u %-5lu %-5lu %-5lu\n",
758 "C", ts->ts_tid, " ",
759 jiffies_to_msecs(ts->u.chp.cs_chp_time),
760 ts->u.chp.cs_written, ts->u.chp.cs_dropped,
761 ts->u.chp.cs_forced_to_close);
762 else
763 J_ASSERT(0);
764 return 0;
765}
766
767static void jbd2_seq_history_stop(struct seq_file *seq, void *v)
768{
769}
770
771static const struct seq_operations jbd2_seq_history_ops = {
772 .start = jbd2_seq_history_start,
773 .next = jbd2_seq_history_next,
774 .stop = jbd2_seq_history_stop,
775 .show = jbd2_seq_history_show,
776};
777
778static int jbd2_seq_history_open(struct inode *inode, struct file *file)
779{
780 journal_t *journal = PDE(inode)->data;
781 struct jbd2_stats_proc_session *s;
782 int rc, size;
783
784 s = kmalloc(sizeof(*s), GFP_KERNEL);
785 if (s == NULL)
786 return -ENOMEM;
787 size = sizeof(struct transaction_stats_s) * journal->j_history_max;
788 s->stats = kmalloc(size, GFP_KERNEL);
789 if (s->stats == NULL) {
790 kfree(s);
791 return -ENOMEM;
792 }
793 spin_lock(&journal->j_history_lock);
794 memcpy(s->stats, journal->j_history, size);
795 s->max = journal->j_history_max;
796 s->start = journal->j_history_cur % s->max;
797 spin_unlock(&journal->j_history_lock);
798
799 rc = seq_open(file, &jbd2_seq_history_ops);
800 if (rc == 0) {
801 struct seq_file *m = file->private_data;
802 m->private = s;
803 } else {
804 kfree(s->stats);
805 kfree(s);
806 }
807 return rc;
808
809}
810
811static int jbd2_seq_history_release(struct inode *inode, struct file *file)
812{
813 struct seq_file *seq = file->private_data;
814 struct jbd2_stats_proc_session *s = seq->private;
815
816 kfree(s->stats);
817 kfree(s);
818 return seq_release(inode, file);
819}
820
821static struct file_operations jbd2_seq_history_fops = {
822 .owner = THIS_MODULE,
823 .open = jbd2_seq_history_open,
824 .read = seq_read,
825 .llseek = seq_lseek,
826 .release = jbd2_seq_history_release,
827};
828
829static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos) 679static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos)
830{ 680{
831 return *pos ? NULL : SEQ_START_TOKEN; 681 return *pos ? NULL : SEQ_START_TOKEN;
@@ -842,29 +692,29 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
842 692
843 if (v != SEQ_START_TOKEN) 693 if (v != SEQ_START_TOKEN)
844 return 0; 694 return 0;
845 seq_printf(seq, "%lu transaction, each upto %u blocks\n", 695 seq_printf(seq, "%lu transaction, each up to %u blocks\n",
846 s->stats->ts_tid, 696 s->stats->ts_tid,
847 s->journal->j_max_transaction_buffers); 697 s->journal->j_max_transaction_buffers);
848 if (s->stats->ts_tid == 0) 698 if (s->stats->ts_tid == 0)
849 return 0; 699 return 0;
850 seq_printf(seq, "average: \n %ums waiting for transaction\n", 700 seq_printf(seq, "average: \n %ums waiting for transaction\n",
851 jiffies_to_msecs(s->stats->u.run.rs_wait / s->stats->ts_tid)); 701 jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid));
852 seq_printf(seq, " %ums running transaction\n", 702 seq_printf(seq, " %ums running transaction\n",
853 jiffies_to_msecs(s->stats->u.run.rs_running / s->stats->ts_tid)); 703 jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid));
854 seq_printf(seq, " %ums transaction was being locked\n", 704 seq_printf(seq, " %ums transaction was being locked\n",
855 jiffies_to_msecs(s->stats->u.run.rs_locked / s->stats->ts_tid)); 705 jiffies_to_msecs(s->stats->run.rs_locked / s->stats->ts_tid));
856 seq_printf(seq, " %ums flushing data (in ordered mode)\n", 706 seq_printf(seq, " %ums flushing data (in ordered mode)\n",
857 jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid)); 707 jiffies_to_msecs(s->stats->run.rs_flushing / s->stats->ts_tid));
858 seq_printf(seq, " %ums logging transaction\n", 708 seq_printf(seq, " %ums logging transaction\n",
859 jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid)); 709 jiffies_to_msecs(s->stats->run.rs_logging / s->stats->ts_tid));
860 seq_printf(seq, " %lluus average transaction commit time\n", 710 seq_printf(seq, " %lluus average transaction commit time\n",
861 div_u64(s->journal->j_average_commit_time, 1000)); 711 div_u64(s->journal->j_average_commit_time, 1000));
862 seq_printf(seq, " %lu handles per transaction\n", 712 seq_printf(seq, " %lu handles per transaction\n",
863 s->stats->u.run.rs_handle_count / s->stats->ts_tid); 713 s->stats->run.rs_handle_count / s->stats->ts_tid);
864 seq_printf(seq, " %lu blocks per transaction\n", 714 seq_printf(seq, " %lu blocks per transaction\n",
865 s->stats->u.run.rs_blocks / s->stats->ts_tid); 715 s->stats->run.rs_blocks / s->stats->ts_tid);
866 seq_printf(seq, " %lu logged blocks per transaction\n", 716 seq_printf(seq, " %lu logged blocks per transaction\n",
867 s->stats->u.run.rs_blocks_logged / s->stats->ts_tid); 717 s->stats->run.rs_blocks_logged / s->stats->ts_tid);
868 return 0; 718 return 0;
869} 719}
870 720
@@ -920,7 +770,7 @@ static int jbd2_seq_info_release(struct inode *inode, struct file *file)
920 return seq_release(inode, file); 770 return seq_release(inode, file);
921} 771}
922 772
923static struct file_operations jbd2_seq_info_fops = { 773static const struct file_operations jbd2_seq_info_fops = {
924 .owner = THIS_MODULE, 774 .owner = THIS_MODULE,
925 .open = jbd2_seq_info_open, 775 .open = jbd2_seq_info_open,
926 .read = seq_read, 776 .read = seq_read,
@@ -934,8 +784,6 @@ static void jbd2_stats_proc_init(journal_t *journal)
934{ 784{
935 journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats); 785 journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats);
936 if (journal->j_proc_entry) { 786 if (journal->j_proc_entry) {
937 proc_create_data("history", S_IRUGO, journal->j_proc_entry,
938 &jbd2_seq_history_fops, journal);
939 proc_create_data("info", S_IRUGO, journal->j_proc_entry, 787 proc_create_data("info", S_IRUGO, journal->j_proc_entry,
940 &jbd2_seq_info_fops, journal); 788 &jbd2_seq_info_fops, journal);
941 } 789 }
@@ -944,27 +792,9 @@ static void jbd2_stats_proc_init(journal_t *journal)
944static void jbd2_stats_proc_exit(journal_t *journal) 792static void jbd2_stats_proc_exit(journal_t *journal)
945{ 793{
946 remove_proc_entry("info", journal->j_proc_entry); 794 remove_proc_entry("info", journal->j_proc_entry);
947 remove_proc_entry("history", journal->j_proc_entry);
948 remove_proc_entry(journal->j_devname, proc_jbd2_stats); 795 remove_proc_entry(journal->j_devname, proc_jbd2_stats);
949} 796}
950 797
951static void journal_init_stats(journal_t *journal)
952{
953 int size;
954
955 if (!proc_jbd2_stats)
956 return;
957
958 journal->j_history_max = 100;
959 size = sizeof(struct transaction_stats_s) * journal->j_history_max;
960 journal->j_history = kzalloc(size, GFP_KERNEL);
961 if (!journal->j_history) {
962 journal->j_history_max = 0;
963 return;
964 }
965 spin_lock_init(&journal->j_history_lock);
966}
967
968/* 798/*
969 * Management for journal control blocks: functions to create and 799 * Management for journal control blocks: functions to create and
970 * destroy journal_t structures, and to initialise and read existing 800 * destroy journal_t structures, and to initialise and read existing
@@ -1009,7 +839,7 @@ static journal_t * journal_init_common (void)
1009 goto fail; 839 goto fail;
1010 } 840 }
1011 841
1012 journal_init_stats(journal); 842 spin_lock_init(&journal->j_history_lock);
1013 843
1014 return journal; 844 return journal;
1015fail: 845fail:
@@ -1115,7 +945,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1115 while ((p = strchr(p, '/'))) 945 while ((p = strchr(p, '/')))
1116 *p = '!'; 946 *p = '!';
1117 p = journal->j_devname + strlen(journal->j_devname); 947 p = journal->j_devname + strlen(journal->j_devname);
1118 sprintf(p, ":%lu", journal->j_inode->i_ino); 948 sprintf(p, "-%lu", journal->j_inode->i_ino);
1119 jbd_debug(1, 949 jbd_debug(1,
1120 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n", 950 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
1121 journal, inode->i_sb->s_id, inode->i_ino, 951 journal, inode->i_sb->s_id, inode->i_ino,
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 5d8dcb9ee326..15458decdb8a 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -95,7 +95,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
95 return VM_FAULT_MAJOR; 95 return VM_FAULT_MAJOR;
96} 96}
97 97
98static struct vm_operations_struct ncp_file_mmap = 98static const struct vm_operations_struct ncp_file_mmap =
99{ 99{
100 .fault = ncp_file_mmap_fault, 100 .fault = ncp_file_mmap_fault,
101}; 101};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 86d6b4db1096..f5fdd39e037a 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -59,7 +59,7 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
59static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); 59static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
60static int nfs_setlease(struct file *file, long arg, struct file_lock **fl); 60static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
61 61
62static struct vm_operations_struct nfs_file_vm_ops; 62static const struct vm_operations_struct nfs_file_vm_ops;
63 63
64const struct file_operations nfs_file_operations = { 64const struct file_operations nfs_file_operations = {
65 .llseek = nfs_file_llseek, 65 .llseek = nfs_file_llseek,
@@ -572,7 +572,7 @@ out_unlock:
572 return VM_FAULT_SIGBUS; 572 return VM_FAULT_SIGBUS;
573} 573}
574 574
575static struct vm_operations_struct nfs_file_vm_ops = { 575static const struct vm_operations_struct nfs_file_vm_ops = {
576 .fault = filemap_fault, 576 .fault = filemap_fault,
577 .page_mkwrite = nfs_vm_page_mkwrite, 577 .page_mkwrite = nfs_vm_page_mkwrite,
578}; 578};
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 00388d2a3c99..5c01fc148ce8 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -176,7 +176,7 @@ static const struct file_operations exports_operations = {
176extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 176extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
177extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 177extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
178 178
179static struct file_operations pool_stats_operations = { 179static const struct file_operations pool_stats_operations = {
180 .open = nfsd_pool_stats_open, 180 .open = nfsd_pool_stats_open,
181 .read = seq_read, 181 .read = seq_read,
182 .llseek = seq_lseek, 182 .llseek = seq_lseek,
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 6a2711f4c321..5941958f1e47 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -36,6 +36,7 @@
36 36
37void nilfs_btnode_cache_init_once(struct address_space *btnc) 37void nilfs_btnode_cache_init_once(struct address_space *btnc)
38{ 38{
39 memset(btnc, 0, sizeof(*btnc));
39 INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC); 40 INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
40 spin_lock_init(&btnc->tree_lock); 41 spin_lock_init(&btnc->tree_lock);
41 INIT_LIST_HEAD(&btnc->private_list); 42 INIT_LIST_HEAD(&btnc->private_list);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 1a4fa04cf071..e097099bfc8f 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -697,7 +697,7 @@ not_empty:
697 return 0; 697 return 0;
698} 698}
699 699
700struct file_operations nilfs_dir_operations = { 700const struct file_operations nilfs_dir_operations = {
701 .llseek = generic_file_llseek, 701 .llseek = generic_file_llseek,
702 .read = generic_read_dir, 702 .read = generic_read_dir,
703 .readdir = nilfs_readdir, 703 .readdir = nilfs_readdir,
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index fc8278c77cdd..30292df443ce 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -117,7 +117,7 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
117 return 0; 117 return 0;
118} 118}
119 119
120struct vm_operations_struct nilfs_file_vm_ops = { 120static const struct vm_operations_struct nilfs_file_vm_ops = {
121 .fault = filemap_fault, 121 .fault = filemap_fault,
122 .page_mkwrite = nilfs_page_mkwrite, 122 .page_mkwrite = nilfs_page_mkwrite,
123}; 123};
@@ -134,7 +134,7 @@ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
134 * We have mostly NULL's here: the current defaults are ok for 134 * We have mostly NULL's here: the current defaults are ok for
135 * the nilfs filesystem. 135 * the nilfs filesystem.
136 */ 136 */
137struct file_operations nilfs_file_operations = { 137const struct file_operations nilfs_file_operations = {
138 .llseek = generic_file_llseek, 138 .llseek = generic_file_llseek,
139 .read = do_sync_read, 139 .read = do_sync_read,
140 .write = do_sync_write, 140 .write = do_sync_write,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2d2c501deb54..5040220c3732 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -400,6 +400,7 @@ int nilfs_read_inode_common(struct inode *inode,
400 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 400 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
401 0 : le32_to_cpu(raw_inode->i_dir_acl); 401 0 : le32_to_cpu(raw_inode->i_dir_acl);
402#endif 402#endif
403 ii->i_dir_start_lookup = 0;
403 ii->i_cno = 0; 404 ii->i_cno = 0;
404 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 405 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
405 406
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index b18c4998f8d0..f6326112d647 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -433,7 +433,7 @@ static const struct address_space_operations def_mdt_aops = {
433}; 433};
434 434
435static const struct inode_operations def_mdt_iops; 435static const struct inode_operations def_mdt_iops;
436static struct file_operations def_mdt_fops; 436static const struct file_operations def_mdt_fops;
437 437
438/* 438/*
439 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile, 439 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index bad7368782d0..4da6f67e9a91 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -294,9 +294,9 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *);
294/* 294/*
295 * Inodes and files operations 295 * Inodes and files operations
296 */ 296 */
297extern struct file_operations nilfs_dir_operations; 297extern const struct file_operations nilfs_dir_operations;
298extern const struct inode_operations nilfs_file_inode_operations; 298extern const struct inode_operations nilfs_file_inode_operations;
299extern struct file_operations nilfs_file_operations; 299extern const struct file_operations nilfs_file_operations;
300extern const struct address_space_operations nilfs_aops; 300extern const struct address_space_operations nilfs_aops;
301extern const struct inode_operations nilfs_dir_inode_operations; 301extern const struct inode_operations nilfs_dir_inode_operations;
302extern const struct inode_operations nilfs_special_inode_operations; 302extern const struct inode_operations nilfs_special_inode_operations;
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index 2224b4d07bf0..44a88a9fa2c8 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -124,10 +124,10 @@ int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
124 while (*s && len > 0) { 124 while (*s && len > 0) {
125 if (*s & 0x80) { 125 if (*s & 0x80) {
126 size = utf8_to_utf32(s, len, &u); 126 size = utf8_to_utf32(s, len, &u);
127 if (size < 0) { 127 if (size < 0)
128 /* Ignore character and move on */ 128 return -EINVAL;
129 size = 1; 129
130 } else if (u >= PLANE_SIZE) { 130 if (u >= PLANE_SIZE) {
131 u -= PLANE_SIZE; 131 u -= PLANE_SIZE;
132 *op++ = (wchar_t) (SURROGATE_PAIR | 132 *op++ = (wchar_t) (SURROGATE_PAIR |
133 ((u >> 10) & SURROGATE_BITS)); 133 ((u >> 10) & SURROGATE_BITS));
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 09cc25d04611..c452d116b892 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -966,7 +966,7 @@ static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
966} 966}
967#endif /* CONFIG_DEBUG_FS */ 967#endif /* CONFIG_DEBUG_FS */
968 968
969static struct file_operations o2hb_debug_fops = { 969static const struct file_operations o2hb_debug_fops = {
970 .open = o2hb_debug_open, 970 .open = o2hb_debug_open,
971 .release = o2hb_debug_release, 971 .release = o2hb_debug_release,
972 .read = o2hb_debug_read, 972 .read = o2hb_debug_read,
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index cfb2be708abe..da794bc07a6c 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -207,7 +207,7 @@ static int nst_fop_release(struct inode *inode, struct file *file)
207 return seq_release_private(inode, file); 207 return seq_release_private(inode, file);
208} 208}
209 209
210static struct file_operations nst_seq_fops = { 210static const struct file_operations nst_seq_fops = {
211 .open = nst_fop_open, 211 .open = nst_fop_open,
212 .read = seq_read, 212 .read = seq_read,
213 .llseek = seq_lseek, 213 .llseek = seq_lseek,
@@ -388,7 +388,7 @@ static int sc_fop_release(struct inode *inode, struct file *file)
388 return seq_release_private(inode, file); 388 return seq_release_private(inode, file);
389} 389}
390 390
391static struct file_operations sc_seq_fops = { 391static const struct file_operations sc_seq_fops = {
392 .open = sc_fop_open, 392 .open = sc_fop_open,
393 .read = seq_read, 393 .read = seq_read,
394 .llseek = seq_lseek, 394 .llseek = seq_lseek,
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index ca46002ec10e..42b0bad7a612 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -478,7 +478,7 @@ bail:
478 return -ENOMEM; 478 return -ENOMEM;
479} 479}
480 480
481static struct file_operations debug_purgelist_fops = { 481static const struct file_operations debug_purgelist_fops = {
482 .open = debug_purgelist_open, 482 .open = debug_purgelist_open,
483 .release = debug_buffer_release, 483 .release = debug_buffer_release,
484 .read = debug_buffer_read, 484 .read = debug_buffer_read,
@@ -538,7 +538,7 @@ bail:
538 return -ENOMEM; 538 return -ENOMEM;
539} 539}
540 540
541static struct file_operations debug_mle_fops = { 541static const struct file_operations debug_mle_fops = {
542 .open = debug_mle_open, 542 .open = debug_mle_open,
543 .release = debug_buffer_release, 543 .release = debug_buffer_release,
544 .read = debug_buffer_read, 544 .read = debug_buffer_read,
@@ -741,7 +741,7 @@ static int debug_lockres_release(struct inode *inode, struct file *file)
741 return seq_release_private(inode, file); 741 return seq_release_private(inode, file);
742} 742}
743 743
744static struct file_operations debug_lockres_fops = { 744static const struct file_operations debug_lockres_fops = {
745 .open = debug_lockres_open, 745 .open = debug_lockres_open,
746 .release = debug_lockres_release, 746 .release = debug_lockres_release,
747 .read = seq_read, 747 .read = seq_read,
@@ -925,7 +925,7 @@ bail:
925 return -ENOMEM; 925 return -ENOMEM;
926} 926}
927 927
928static struct file_operations debug_state_fops = { 928static const struct file_operations debug_state_fops = {
929 .open = debug_state_open, 929 .open = debug_state_open,
930 .release = debug_buffer_release, 930 .release = debug_buffer_release,
931 .read = debug_buffer_read, 931 .read = debug_buffer_read,
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index b606496b72ec..39737613424a 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -202,7 +202,7 @@ out:
202 return ret; 202 return ret;
203} 203}
204 204
205static struct vm_operations_struct ocfs2_file_vm_ops = { 205static const struct vm_operations_struct ocfs2_file_vm_ops = {
206 .fault = ocfs2_fault, 206 .fault = ocfs2_fault,
207 .page_mkwrite = ocfs2_page_mkwrite, 207 .page_mkwrite = ocfs2_page_mkwrite,
208}; 208};
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4cc3c890a2cd..c0e48aeebb1c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -373,7 +373,7 @@ static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
373} 373}
374#endif /* CONFIG_DEBUG_FS */ 374#endif /* CONFIG_DEBUG_FS */
375 375
376static struct file_operations ocfs2_osb_debug_fops = { 376static const struct file_operations ocfs2_osb_debug_fops = {
377 .open = ocfs2_osb_debug_open, 377 .open = ocfs2_osb_debug_open,
378 .release = ocfs2_debug_release, 378 .release = ocfs2_debug_release,
379 .read = ocfs2_debug_read, 379 .read = ocfs2_debug_read,
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 3680bae335b5..b42d62419034 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -498,7 +498,7 @@ const struct inode_operations omfs_dir_inops = {
498 .rmdir = omfs_rmdir, 498 .rmdir = omfs_rmdir,
499}; 499};
500 500
501struct file_operations omfs_dir_operations = { 501const struct file_operations omfs_dir_operations = {
502 .read = generic_read_dir, 502 .read = generic_read_dir,
503 .readdir = omfs_readdir, 503 .readdir = omfs_readdir,
504 .llseek = generic_file_llseek, 504 .llseek = generic_file_llseek,
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 4845fbb18e6e..399487c09364 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -322,7 +322,7 @@ static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
322 return generic_block_bmap(mapping, block, omfs_get_block); 322 return generic_block_bmap(mapping, block, omfs_get_block);
323} 323}
324 324
325struct file_operations omfs_file_operations = { 325const struct file_operations omfs_file_operations = {
326 .llseek = generic_file_llseek, 326 .llseek = generic_file_llseek,
327 .read = do_sync_read, 327 .read = do_sync_read,
328 .write = do_sync_write, 328 .write = do_sync_write,
diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h
index df71039945ac..ebe2fdbe535e 100644
--- a/fs/omfs/omfs.h
+++ b/fs/omfs/omfs.h
@@ -44,14 +44,14 @@ extern int omfs_allocate_range(struct super_block *sb, int min_request,
44extern int omfs_clear_range(struct super_block *sb, u64 block, int count); 44extern int omfs_clear_range(struct super_block *sb, u64 block, int count);
45 45
46/* dir.c */ 46/* dir.c */
47extern struct file_operations omfs_dir_operations; 47extern const struct file_operations omfs_dir_operations;
48extern const struct inode_operations omfs_dir_inops; 48extern const struct inode_operations omfs_dir_inops;
49extern int omfs_make_empty(struct inode *inode, struct super_block *sb); 49extern int omfs_make_empty(struct inode *inode, struct super_block *sb);
50extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, 50extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
51 u64 fsblock); 51 u64 fsblock);
52 52
53/* file.c */ 53/* file.c */
54extern struct file_operations omfs_file_operations; 54extern const struct file_operations omfs_file_operations;
55extern const struct inode_operations omfs_file_inops; 55extern const struct inode_operations omfs_file_inops;
56extern const struct address_space_operations omfs_aops; 56extern const struct address_space_operations omfs_aops;
57extern void omfs_make_empty_table(struct buffer_head *bh, int offset); 57extern void omfs_make_empty_table(struct buffer_head *bh, int offset);
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 2524714bece1..60c702bc10ae 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -40,7 +40,7 @@ struct bin_buffer {
40 struct mutex mutex; 40 struct mutex mutex;
41 void *buffer; 41 void *buffer;
42 int mmapped; 42 int mmapped;
43 struct vm_operations_struct *vm_ops; 43 const struct vm_operations_struct *vm_ops;
44 struct file *file; 44 struct file *file;
45 struct hlist_node list; 45 struct hlist_node list;
46}; 46};
@@ -331,7 +331,7 @@ static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
331} 331}
332#endif 332#endif
333 333
334static struct vm_operations_struct bin_vm_ops = { 334static const struct vm_operations_struct bin_vm_ops = {
335 .open = bin_vma_open, 335 .open = bin_vma_open,
336 .close = bin_vma_close, 336 .close = bin_vma_close,
337 .fault = bin_fault, 337 .fault = bin_fault,
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 2e6481a7701c..1009adc8d602 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1534,7 +1534,7 @@ out_unlock:
1534 return err; 1534 return err;
1535} 1535}
1536 1536
1537static struct vm_operations_struct ubifs_file_vm_ops = { 1537static const struct vm_operations_struct ubifs_file_vm_ops = {
1538 .fault = filemap_fault, 1538 .fault = filemap_fault,
1539 .page_mkwrite = ubifs_vm_page_mkwrite, 1539 .page_mkwrite = ubifs_vm_page_mkwrite,
1540}; 1540};
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 988d8f87bc0f..629370974e57 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -42,7 +42,7 @@
42 42
43#include <linux/dcache.h> 43#include <linux/dcache.h>
44 44
45static struct vm_operations_struct xfs_file_vm_ops; 45static const struct vm_operations_struct xfs_file_vm_ops;
46 46
47STATIC ssize_t 47STATIC ssize_t
48xfs_file_aio_read( 48xfs_file_aio_read(
@@ -280,7 +280,7 @@ const struct file_operations xfs_dir_file_operations = {
280 .fsync = xfs_file_fsync, 280 .fsync = xfs_file_fsync,
281}; 281};
282 282
283static struct vm_operations_struct xfs_file_vm_ops = { 283static const struct vm_operations_struct xfs_file_vm_ops = {
284 .fault = filemap_fault, 284 .fault = filemap_fault,
285 .page_mkwrite = xfs_vm_page_mkwrite, 285 .page_mkwrite = xfs_vm_page_mkwrite,
286}; 286};
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 9cca3785cab8..66d6106a2067 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_GENERIC_GPIO_H 1#ifndef _ASM_GENERIC_GPIO_H
2#define _ASM_GENERIC_GPIO_H 2#define _ASM_GENERIC_GPIO_H
3 3
4#include <linux/kernel.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/errno.h> 6#include <linux/errno.h>
6 7
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index ae1e9e166959..b69347b8904f 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -387,6 +387,7 @@ struct drm_crtc {
387 * @get_modes: get mode list for this connector 387 * @get_modes: get mode list for this connector
388 * @set_property: property for this connector may need update 388 * @set_property: property for this connector may need update
389 * @destroy: make object go away 389 * @destroy: make object go away
390 * @force: notify the driver the connector is forced on
390 * 391 *
391 * Each CRTC may have one or more connectors attached to it. The functions 392 * Each CRTC may have one or more connectors attached to it. The functions
392 * below allow the core DRM code to control connectors, enumerate available modes, 393 * below allow the core DRM code to control connectors, enumerate available modes,
@@ -401,6 +402,7 @@ struct drm_connector_funcs {
401 int (*set_property)(struct drm_connector *connector, struct drm_property *property, 402 int (*set_property)(struct drm_connector *connector, struct drm_property *property,
402 uint64_t val); 403 uint64_t val);
403 void (*destroy)(struct drm_connector *connector); 404 void (*destroy)(struct drm_connector *connector);
405 void (*force)(struct drm_connector *connector);
404}; 406};
405 407
406struct drm_encoder_funcs { 408struct drm_encoder_funcs {
@@ -429,6 +431,13 @@ struct drm_encoder {
429 void *helper_private; 431 void *helper_private;
430}; 432};
431 433
434enum drm_connector_force {
435 DRM_FORCE_UNSPECIFIED,
436 DRM_FORCE_OFF,
437 DRM_FORCE_ON, /* force on analog part normally */
438 DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
439};
440
432/** 441/**
433 * drm_connector - central DRM connector control structure 442 * drm_connector - central DRM connector control structure
434 * @crtc: CRTC this connector is currently connected to, NULL if none 443 * @crtc: CRTC this connector is currently connected to, NULL if none
@@ -478,9 +487,12 @@ struct drm_connector {
478 487
479 void *helper_private; 488 void *helper_private;
480 489
490 /* forced on connector */
491 enum drm_connector_force force;
481 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 492 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
482 uint32_t force_encoder_id; 493 uint32_t force_encoder_id;
483 struct drm_encoder *encoder; /* currently active encoder */ 494 struct drm_encoder *encoder; /* currently active encoder */
495 void *fb_helper_private;
484}; 496};
485 497
486/** 498/**
@@ -746,7 +758,7 @@ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
746extern bool drm_detect_hdmi_monitor(struct edid *edid); 758extern bool drm_detect_hdmi_monitor(struct edid *edid);
747extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, 759extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
748 int hdisplay, int vdisplay, int vrefresh, 760 int hdisplay, int vdisplay, int vrefresh,
749 bool reduced, bool interlaced); 761 bool reduced, bool interlaced, bool margins);
750extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, 762extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
751 int hdisplay, int vdisplay, int vrefresh, 763 int hdisplay, int vdisplay, int vrefresh,
752 bool interlaced, int margins); 764 bool interlaced, int margins);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4c8dacaf4f58..ef47dfd8e5e9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,6 +39,7 @@
39 39
40#include <linux/fb.h> 40#include <linux/fb.h>
41 41
42#include "drm_fb_helper.h"
42struct drm_crtc_helper_funcs { 43struct drm_crtc_helper_funcs {
43 /* 44 /*
44 * Control power levels on the CRTC. If the mode passed in is 45 * Control power levels on the CRTC. If the mode passed in is
@@ -119,10 +120,11 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
119 encoder->helper_private = (void *)funcs; 120 encoder->helper_private = (void *)funcs;
120} 121}
121 122
122static inline void drm_connector_helper_add(struct drm_connector *connector, 123static inline int drm_connector_helper_add(struct drm_connector *connector,
123 const struct drm_connector_helper_funcs *funcs) 124 const struct drm_connector_helper_funcs *funcs)
124{ 125{
125 connector->helper_private = (void *)funcs; 126 connector->helper_private = (void *)funcs;
127 return drm_fb_helper_add_connector(connector);
126} 128}
127 129
128extern int drm_helper_resume_force_mode(struct drm_device *dev); 130extern int drm_helper_resume_force_mode(struct drm_device *dev);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 88fffbdfa26f..4aa5740ce59f 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -35,11 +35,30 @@ struct drm_fb_helper_crtc {
35 struct drm_mode_set mode_set; 35 struct drm_mode_set mode_set;
36}; 36};
37 37
38
38struct drm_fb_helper_funcs { 39struct drm_fb_helper_funcs {
39 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, 40 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
40 u16 blue, int regno); 41 u16 blue, int regno);
41}; 42};
42 43
44/* mode specified on the command line */
45struct drm_fb_helper_cmdline_mode {
46 bool specified;
47 bool refresh_specified;
48 bool bpp_specified;
49 int xres, yres;
50 int bpp;
51 int refresh;
52 bool rb;
53 bool interlace;
54 bool cvt;
55 bool margins;
56};
57
58struct drm_fb_helper_connector {
59 struct drm_fb_helper_cmdline_mode cmdline_mode;
60};
61
43struct drm_fb_helper { 62struct drm_fb_helper {
44 struct drm_framebuffer *fb; 63 struct drm_framebuffer *fb;
45 struct drm_device *dev; 64 struct drm_device *dev;
@@ -57,6 +76,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
57 uint32_t fb_height, 76 uint32_t fb_height,
58 uint32_t surface_width, 77 uint32_t surface_width,
59 uint32_t surface_height, 78 uint32_t surface_height,
79 uint32_t surface_depth,
80 uint32_t surface_bpp,
60 struct drm_framebuffer **fb_ptr)); 81 struct drm_framebuffer **fb_ptr));
61int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, 82int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
62 int max_conn); 83 int max_conn);
@@ -79,4 +100,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
79 uint32_t fb_width, uint32_t fb_height); 100 uint32_t fb_width, uint32_t fb_height);
80void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch); 101void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch);
81 102
103int drm_fb_helper_add_connector(struct drm_connector *connector);
104int drm_fb_helper_parse_command_line(struct drm_device *dev);
105
82#endif 106#endif
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 880130f7311f..9101ed64f803 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -53,7 +53,7 @@ struct agp_kern_info {
53 int current_memory; 53 int current_memory;
54 bool cant_use_aperture; 54 bool cant_use_aperture;
55 unsigned long page_mask; 55 unsigned long page_mask;
56 struct vm_operations_struct *vm_ops; 56 const struct vm_operations_struct *vm_ops;
57}; 57};
58 58
59/* 59/*
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 086e5c362d3a..817b23705c91 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -397,7 +397,7 @@ struct atmdev_ops { /* only send is required */
397 int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, 397 int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
398 void __user *optval,int optlen); 398 void __user *optval,int optlen);
399 int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, 399 int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
400 void __user *optval,int optlen); 400 void __user *optval,unsigned int optlen);
401 int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); 401 int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
402 int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); 402 int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
403 void (*phy_put)(struct atm_dev *dev,unsigned char value, 403 void (*phy_put)(struct atm_dev *dev,unsigned char value,
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b62bb9294d0c..0008dee66514 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -37,7 +37,7 @@ extern void cgroup_exit(struct task_struct *p, int run_callbacks);
37extern int cgroupstats_build(struct cgroupstats *stats, 37extern int cgroupstats_build(struct cgroupstats *stats,
38 struct dentry *dentry); 38 struct dentry *dentry);
39 39
40extern struct file_operations proc_cgroup_operations; 40extern const struct file_operations proc_cgroup_operations;
41 41
42/* Define the enumeration of all cgroup subsystems */ 42/* Define the enumeration of all cgroup subsystems */
43#define SUBSYS(_x) _x ## _subsys_id, 43#define SUBSYS(_x) _x ## _subsys_id,
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 47ebf416f512..3a14615fd35c 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -132,11 +132,8 @@ struct cn_callback_id {
132}; 132};
133 133
134struct cn_callback_data { 134struct cn_callback_data {
135 void (*destruct_data) (void *); 135 struct sk_buff *skb;
136 void *ddata; 136 void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
137
138 void *callback_priv;
139 void (*callback) (struct cn_msg *);
140 137
141 void *free; 138 void *free;
142}; 139};
@@ -167,11 +164,11 @@ struct cn_dev {
167 struct cn_queue_dev *cbdev; 164 struct cn_queue_dev *cbdev;
168}; 165};
169 166
170int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *)); 167int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
171void cn_del_callback(struct cb_id *); 168void cn_del_callback(struct cb_id *);
172int cn_netlink_send(struct cn_msg *, u32, gfp_t); 169int cn_netlink_send(struct cn_msg *, u32, gfp_t);
173 170
174int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *)); 171int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
175void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 172void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
176 173
177int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); 174int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2adaa2529f18..a1e6899d4b6c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2446,7 +2446,7 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
2446 __simple_attr_check_format(__fmt, 0ull); \ 2446 __simple_attr_check_format(__fmt, 0ull); \
2447 return simple_attr_open(inode, file, __get, __set, __fmt); \ 2447 return simple_attr_open(inode, file, __get, __set, __fmt); \
2448} \ 2448} \
2449static struct file_operations __fops = { \ 2449static const struct file_operations __fops = { \
2450 .owner = THIS_MODULE, \ 2450 .owner = THIS_MODULE, \
2451 .open = __fops ## _open, \ 2451 .open = __fops ## _open, \
2452 .release = simple_attr_release, \ 2452 .release = simple_attr_release, \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 16937995abd4..41a59afc70fa 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -163,7 +163,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
163} 163}
164 164
165extern const struct file_operations hugetlbfs_file_operations; 165extern const struct file_operations hugetlbfs_file_operations;
166extern struct vm_operations_struct hugetlb_vm_ops; 166extern const struct vm_operations_struct hugetlb_vm_ops;
167struct file *hugetlb_file_setup(const char *name, size_t size, int acct, 167struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
168 struct user_struct **user, int creat_flags); 168 struct user_struct **user, int creat_flags);
169int hugetlb_get_quota(struct address_space *mapping, long delta); 169int hugetlb_get_quota(struct address_space *mapping, long delta);
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 5eb9b0f857e0..5a9aae4adb44 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -44,7 +44,7 @@ struct ip_tunnel_prl {
44 __u16 flags; 44 __u16 flags;
45 __u16 __reserved; 45 __u16 __reserved;
46 __u32 datalen; 46 __u32 datalen;
47 __u32 rs_delay; 47 __u32 __reserved2;
48 /* data follows */ 48 /* data follows */
49}; 49};
50 50
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 52695d3dfd0b..f1011f7f3d41 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -464,9 +464,9 @@ struct handle_s
464 */ 464 */
465struct transaction_chp_stats_s { 465struct transaction_chp_stats_s {
466 unsigned long cs_chp_time; 466 unsigned long cs_chp_time;
467 unsigned long cs_forced_to_close; 467 __u32 cs_forced_to_close;
468 unsigned long cs_written; 468 __u32 cs_written;
469 unsigned long cs_dropped; 469 __u32 cs_dropped;
470}; 470};
471 471
472/* The transaction_t type is the guts of the journaling mechanism. It 472/* The transaction_t type is the guts of the journaling mechanism. It
@@ -668,23 +668,16 @@ struct transaction_run_stats_s {
668 unsigned long rs_flushing; 668 unsigned long rs_flushing;
669 unsigned long rs_logging; 669 unsigned long rs_logging;
670 670
671 unsigned long rs_handle_count; 671 __u32 rs_handle_count;
672 unsigned long rs_blocks; 672 __u32 rs_blocks;
673 unsigned long rs_blocks_logged; 673 __u32 rs_blocks_logged;
674}; 674};
675 675
676struct transaction_stats_s { 676struct transaction_stats_s {
677 int ts_type;
678 unsigned long ts_tid; 677 unsigned long ts_tid;
679 union { 678 struct transaction_run_stats_s run;
680 struct transaction_run_stats_s run;
681 struct transaction_chp_stats_s chp;
682 } u;
683}; 679};
684 680
685#define JBD2_STATS_RUN 1
686#define JBD2_STATS_CHECKPOINT 2
687
688static inline unsigned long 681static inline unsigned long
689jbd2_time_diff(unsigned long start, unsigned long end) 682jbd2_time_diff(unsigned long start, unsigned long end)
690{ 683{
@@ -988,12 +981,6 @@ struct journal_s
988 /* 981 /*
989 * Journal statistics 982 * Journal statistics
990 */ 983 */
991 struct transaction_stats_s *j_history;
992 int j_history_max;
993 int j_history_cur;
994 /*
995 * Protect the transactions statistics history
996 */
997 spinlock_t j_history_lock; 984 spinlock_t j_history_lock;
998 struct proc_dir_entry *j_proc_entry; 985 struct proc_dir_entry *j_proc_entry;
999 struct transaction_stats_s j_stats; 986 struct transaction_stats_s j_stats;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 21d6aa45206a..84a524afb3dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -171,7 +171,7 @@ struct vm_area_struct {
171 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 171 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
172 172
173 /* Function pointers to deal with this struct. */ 173 /* Function pointers to deal with this struct. */
174 struct vm_operations_struct * vm_ops; 174 const struct vm_operations_struct *vm_ops;
175 175
176 /* Information about our backing store: */ 176 /* Information about our backing store: */
177 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 177 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 0d45b4e8d367..08bc776d05e2 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -145,14 +145,14 @@ static inline int ip_mroute_opt(int opt)
145#endif 145#endif
146 146
147#ifdef CONFIG_IP_MROUTE 147#ifdef CONFIG_IP_MROUTE
148extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int); 148extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
149extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); 149extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
150extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); 150extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
151extern int ip_mr_init(void); 151extern int ip_mr_init(void);
152#else 152#else
153static inline 153static inline
154int ip_mroute_setsockopt(struct sock *sock, 154int ip_mroute_setsockopt(struct sock *sock,
155 int optname, char __user *optval, int optlen) 155 int optname, char __user *optval, unsigned int optlen)
156{ 156{
157 return -ENOPROTOOPT; 157 return -ENOPROTOOPT;
158} 158}
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 43dc97e32183..b191865a6ca3 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -134,7 +134,7 @@ static inline int ip6_mroute_opt(int opt)
134struct sock; 134struct sock;
135 135
136#ifdef CONFIG_IPV6_MROUTE 136#ifdef CONFIG_IPV6_MROUTE
137extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int); 137extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
138extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); 138extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
139extern int ip6_mr_input(struct sk_buff *skb); 139extern int ip6_mr_input(struct sk_buff *skb);
140extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); 140extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
@@ -143,7 +143,7 @@ extern void ip6_mr_cleanup(void);
143#else 143#else
144static inline 144static inline
145int ip6_mroute_setsockopt(struct sock *sock, 145int ip6_mroute_setsockopt(struct sock *sock,
146 int optname, char __user *optval, int optlen) 146 int optname, char __user *optval, unsigned int optlen)
147{ 147{
148 return -ENOPROTOOPT; 148 return -ENOPROTOOPT;
149} 149}
diff --git a/include/linux/net.h b/include/linux/net.h
index 9040a10584f7..529a0931711d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -178,11 +178,11 @@ struct proto_ops {
178 int (*listen) (struct socket *sock, int len); 178 int (*listen) (struct socket *sock, int len);
179 int (*shutdown) (struct socket *sock, int flags); 179 int (*shutdown) (struct socket *sock, int flags);
180 int (*setsockopt)(struct socket *sock, int level, 180 int (*setsockopt)(struct socket *sock, int level,
181 int optname, char __user *optval, int optlen); 181 int optname, char __user *optval, unsigned int optlen);
182 int (*getsockopt)(struct socket *sock, int level, 182 int (*getsockopt)(struct socket *sock, int level,
183 int optname, char __user *optval, int __user *optlen); 183 int optname, char __user *optval, int __user *optlen);
184 int (*compat_setsockopt)(struct socket *sock, int level, 184 int (*compat_setsockopt)(struct socket *sock, int level,
185 int optname, char __user *optval, int optlen); 185 int optname, char __user *optval, unsigned int optlen);
186 int (*compat_getsockopt)(struct socket *sock, int level, 186 int (*compat_getsockopt)(struct socket *sock, int level,
187 int optname, char __user *optval, int __user *optlen); 187 int optname, char __user *optval, int __user *optlen);
188 int (*sendmsg) (struct kiocb *iocb, struct socket *sock, 188 int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
@@ -256,7 +256,7 @@ extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
256extern int kernel_getsockopt(struct socket *sock, int level, int optname, 256extern int kernel_getsockopt(struct socket *sock, int level, int optname,
257 char *optval, int *optlen); 257 char *optval, int *optlen);
258extern int kernel_setsockopt(struct socket *sock, int level, int optname, 258extern int kernel_setsockopt(struct socket *sock, int level, int optname,
259 char *optval, int optlen); 259 char *optval, unsigned int optlen);
260extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, 260extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
261 size_t size, int flags); 261 size_t size, int flags);
262extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); 262extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
@@ -313,7 +313,7 @@ SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \
313SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \ 313SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \
314SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \ 314SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \
315SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \ 315SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \
316 char __user *optval, int optlen), (sock, level, optname, optval, optlen)) \ 316 char __user *optval, unsigned int optlen), (sock, level, optname, optval, optlen)) \
317SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \ 317SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \
318 char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \ 318 char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \
319SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \ 319SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 48cfe51bfddc..6132b5e6d9d3 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -221,12 +221,12 @@ __ret;})
221 221
222/* Call setsockopt() */ 222/* Call setsockopt() */
223int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 223int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
224 int len); 224 unsigned int len);
225int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 225int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
226 int *len); 226 int *len);
227 227
228int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, 228int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
229 char __user *opt, int len); 229 char __user *opt, unsigned int len);
230int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, 230int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
231 char __user *opt, int *len); 231 char __user *opt, int *len);
232 232
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 37aaf2b39863..4e768dda87b0 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -17,7 +17,7 @@ extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
17#endif 17#endif
18 18
19extern const struct file_operations ramfs_file_operations; 19extern const struct file_operations ramfs_file_operations;
20extern struct vm_operations_struct generic_file_vm_ops; 20extern const struct vm_operations_struct generic_file_vm_ops;
21extern int __init init_rootfs(void); 21extern int __init init_rootfs(void);
22 22
23#endif 23#endif
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 731af71cddc9..fcb9884df618 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -114,8 +114,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
114int __must_check res_counter_charge_locked(struct res_counter *counter, 114int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val); 115 unsigned long val);
116int __must_check res_counter_charge(struct res_counter *counter, 116int __must_check res_counter_charge(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at, 117 unsigned long val, struct res_counter **limit_fail_at);
118 struct res_counter **soft_limit_at);
119 118
120/* 119/*
121 * uncharge - tell that some portion of the resource is released 120 * uncharge - tell that some portion of the resource is released
@@ -128,8 +127,7 @@ int __must_check res_counter_charge(struct res_counter *counter,
128 */ 127 */
129 128
130void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 129void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
131void res_counter_uncharge(struct res_counter *counter, unsigned long val, 130void res_counter_uncharge(struct res_counter *counter, unsigned long val);
132 bool *was_soft_limit_excess);
133 131
134static inline bool res_counter_limit_check_locked(struct res_counter *cnt) 132static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
135{ 133{
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index fe661afe0713..db532ce288be 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -176,6 +176,9 @@
176/* Qualcomm MSM SoCs */ 176/* Qualcomm MSM SoCs */
177#define PORT_MSM 88 177#define PORT_MSM 88
178 178
179/* BCM63xx family SoCs */
180#define PORT_BCM63XX 89
181
179#ifdef __KERNEL__ 182#ifdef __KERNEL__
180 183
181#include <linux/compiler.h> 184#include <linux/compiler.h>
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 3566129384a4..b08677982525 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -45,8 +45,16 @@
45 * 45 *
46 * void (*shutdown)(struct tty_struct * tty); 46 * void (*shutdown)(struct tty_struct * tty);
47 * 47 *
48 * This routine is called when a particular tty device is closed for 48 * This routine is called synchronously when a particular tty device
49 * the last time freeing up the resources. 49 * is closed for the last time freeing up the resources.
50 *
51 *
52 * void (*cleanup)(struct tty_struct * tty);
53 *
54 * This routine is called asynchronously when a particular tty device
55 * is closed for the last time freeing up the resources. This is
56 * actually the second part of shutdown for routines that might sleep.
57 *
50 * 58 *
51 * int (*write)(struct tty_struct * tty, 59 * int (*write)(struct tty_struct * tty,
52 * const unsigned char *buf, int count); 60 * const unsigned char *buf, int count);
@@ -233,6 +241,7 @@ struct tty_operations {
233 int (*open)(struct tty_struct * tty, struct file * filp); 241 int (*open)(struct tty_struct * tty, struct file * filp);
234 void (*close)(struct tty_struct * tty, struct file * filp); 242 void (*close)(struct tty_struct * tty, struct file * filp);
235 void (*shutdown)(struct tty_struct *tty); 243 void (*shutdown)(struct tty_struct *tty);
244 void (*cleanup)(struct tty_struct *tty);
236 int (*write)(struct tty_struct * tty, 245 int (*write)(struct tty_struct * tty,
237 const unsigned char *buf, int count); 246 const unsigned char *buf, int count);
238 int (*put_char)(struct tty_struct *tty, unsigned char ch); 247 int (*put_char)(struct tty_struct *tty, unsigned char ch);
diff --git a/include/net/compat.h b/include/net/compat.h
index 5bbf8bf9efea..7c3002832d05 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,8 +40,8 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
40 40
41extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); 41extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
42 42
43extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int, 43extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
44 int (*)(struct sock *, int, int, char __user *, int)); 44 int (*)(struct sock *, int, int, char __user *, unsigned int));
45extern int compat_mc_getsockopt(struct sock *, int, int, char __user *, 45extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
46 int __user *, int (*)(struct sock *, int, int, char __user *, 46 int __user *, int (*)(struct sock *, int, int, char __user *,
47 int __user *)); 47 int __user *));
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 03cffd9f64e3..696d6e4ce68a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -48,13 +48,13 @@ struct inet_connection_sock_af_ops {
48 u16 net_header_len; 48 u16 net_header_len;
49 u16 sockaddr_len; 49 u16 sockaddr_len;
50 int (*setsockopt)(struct sock *sk, int level, int optname, 50 int (*setsockopt)(struct sock *sk, int level, int optname,
51 char __user *optval, int optlen); 51 char __user *optval, unsigned int optlen);
52 int (*getsockopt)(struct sock *sk, int level, int optname, 52 int (*getsockopt)(struct sock *sk, int level, int optname,
53 char __user *optval, int __user *optlen); 53 char __user *optval, int __user *optlen);
54#ifdef CONFIG_COMPAT 54#ifdef CONFIG_COMPAT
55 int (*compat_setsockopt)(struct sock *sk, 55 int (*compat_setsockopt)(struct sock *sk,
56 int level, int optname, 56 int level, int optname,
57 char __user *optval, int optlen); 57 char __user *optval, unsigned int optlen);
58 int (*compat_getsockopt)(struct sock *sk, 58 int (*compat_getsockopt)(struct sock *sk,
59 int level, int optname, 59 int level, int optname,
60 char __user *optval, int __user *optlen); 60 char __user *optval, int __user *optlen);
@@ -332,5 +332,5 @@ extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
332extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, 332extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
333 char __user *optval, int __user *optlen); 333 char __user *optval, int __user *optlen);
334extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, 334extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
335 char __user *optval, int optlen); 335 char __user *optval, unsigned int optlen);
336#endif /* _INET_CONNECTION_SOCK_H */ 336#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 5b26a0bd178e..2f47e5482b55 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -381,10 +381,10 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
381extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); 381extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
382extern int ip_cmsg_send(struct net *net, 382extern int ip_cmsg_send(struct net *net,
383 struct msghdr *msg, struct ipcm_cookie *ipc); 383 struct msghdr *msg, struct ipcm_cookie *ipc);
384extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); 384extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
385extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); 385extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
386extern int compat_ip_setsockopt(struct sock *sk, int level, 386extern int compat_ip_setsockopt(struct sock *sk, int level,
387 int optname, char __user *optval, int optlen); 387 int optname, char __user *optval, unsigned int optlen);
388extern int compat_ip_getsockopt(struct sock *sk, int level, 388extern int compat_ip_getsockopt(struct sock *sk, int level,
389 int optname, char __user *optval, int __user *optlen); 389 int optname, char __user *optval, int __user *optlen);
390extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); 390extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 76e3ea6e2fe5..87acf8f3a155 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -27,18 +27,11 @@ struct ip_tunnel
27 unsigned int prl_count; /* # of entries in PRL */ 27 unsigned int prl_count; /* # of entries in PRL */
28}; 28};
29 29
30/* ISATAP: default interval between RS in secondy */
31#define IPTUNNEL_RS_DEFAULT_DELAY (900)
32
33struct ip_tunnel_prl_entry 30struct ip_tunnel_prl_entry
34{ 31{
35 struct ip_tunnel_prl_entry *next; 32 struct ip_tunnel_prl_entry *next;
36 __be32 addr; 33 __be32 addr;
37 u16 flags; 34 u16 flags;
38 unsigned long rs_delay;
39 struct timer_list rs_timer;
40 struct ip_tunnel *tunnel;
41 spinlock_t lock;
42}; 35};
43 36
44#define IPTUNNEL_XMIT() do { \ 37#define IPTUNNEL_XMIT() do { \
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ad9a51130254..8c31d8a0c1fe 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -550,7 +550,7 @@ extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
550extern int ipv6_setsockopt(struct sock *sk, int level, 550extern int ipv6_setsockopt(struct sock *sk, int level,
551 int optname, 551 int optname,
552 char __user *optval, 552 char __user *optval,
553 int optlen); 553 unsigned int optlen);
554extern int ipv6_getsockopt(struct sock *sk, int level, 554extern int ipv6_getsockopt(struct sock *sk, int level,
555 int optname, 555 int optname,
556 char __user *optval, 556 char __user *optval,
@@ -559,7 +559,7 @@ extern int compat_ipv6_setsockopt(struct sock *sk,
559 int level, 559 int level,
560 int optname, 560 int optname,
561 char __user *optval, 561 char __user *optval,
562 int optlen); 562 unsigned int optlen);
563extern int compat_ipv6_getsockopt(struct sock *sk, 563extern int compat_ipv6_getsockopt(struct sock *sk,
564 int level, 564 int level,
565 int optname, 565 int optname,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 42d00ced5eb8..6e5f0e0c7967 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -544,7 +544,7 @@ struct sctp_af {
544 int level, 544 int level,
545 int optname, 545 int optname,
546 char __user *optval, 546 char __user *optval,
547 int optlen); 547 unsigned int optlen);
548 int (*getsockopt) (struct sock *sk, 548 int (*getsockopt) (struct sock *sk,
549 int level, 549 int level,
550 int optname, 550 int optname,
@@ -554,7 +554,7 @@ struct sctp_af {
554 int level, 554 int level,
555 int optname, 555 int optname,
556 char __user *optval, 556 char __user *optval,
557 int optlen); 557 unsigned int optlen);
558 int (*compat_getsockopt) (struct sock *sk, 558 int (*compat_getsockopt) (struct sock *sk,
559 int level, 559 int level,
560 int optname, 560 int optname,
diff --git a/include/net/sock.h b/include/net/sock.h
index 950409dcec3d..1621935aad5b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -624,7 +624,7 @@ struct proto {
624 void (*shutdown)(struct sock *sk, int how); 624 void (*shutdown)(struct sock *sk, int how);
625 int (*setsockopt)(struct sock *sk, int level, 625 int (*setsockopt)(struct sock *sk, int level,
626 int optname, char __user *optval, 626 int optname, char __user *optval,
627 int optlen); 627 unsigned int optlen);
628 int (*getsockopt)(struct sock *sk, int level, 628 int (*getsockopt)(struct sock *sk, int level,
629 int optname, char __user *optval, 629 int optname, char __user *optval,
630 int __user *option); 630 int __user *option);
@@ -632,7 +632,7 @@ struct proto {
632 int (*compat_setsockopt)(struct sock *sk, 632 int (*compat_setsockopt)(struct sock *sk,
633 int level, 633 int level,
634 int optname, char __user *optval, 634 int optname, char __user *optval,
635 int optlen); 635 unsigned int optlen);
636 int (*compat_getsockopt)(struct sock *sk, 636 int (*compat_getsockopt)(struct sock *sk,
637 int level, 637 int level,
638 int optname, char __user *optval, 638 int optname, char __user *optval,
@@ -951,7 +951,7 @@ extern void sock_rfree(struct sk_buff *skb);
951 951
952extern int sock_setsockopt(struct socket *sock, int level, 952extern int sock_setsockopt(struct socket *sock, int level,
953 int op, char __user *optval, 953 int op, char __user *optval,
954 int optlen); 954 unsigned int optlen);
955 955
956extern int sock_getsockopt(struct socket *sock, int level, 956extern int sock_getsockopt(struct socket *sock, int level,
957 int op, char __user *optval, 957 int op, char __user *optval,
@@ -993,7 +993,7 @@ extern int sock_no_shutdown(struct socket *, int);
993extern int sock_no_getsockopt(struct socket *, int , int, 993extern int sock_no_getsockopt(struct socket *, int , int,
994 char __user *, int __user *); 994 char __user *, int __user *);
995extern int sock_no_setsockopt(struct socket *, int, int, 995extern int sock_no_setsockopt(struct socket *, int, int,
996 char __user *, int); 996 char __user *, unsigned int);
997extern int sock_no_sendmsg(struct kiocb *, struct socket *, 997extern int sock_no_sendmsg(struct kiocb *, struct socket *,
998 struct msghdr *, size_t); 998 struct msghdr *, size_t);
999extern int sock_no_recvmsg(struct kiocb *, struct socket *, 999extern int sock_no_recvmsg(struct kiocb *, struct socket *,
@@ -1015,11 +1015,11 @@ extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1015extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 1015extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1016 struct msghdr *msg, size_t size, int flags); 1016 struct msghdr *msg, size_t size, int flags);
1017extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 1017extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1018 char __user *optval, int optlen); 1018 char __user *optval, unsigned int optlen);
1019extern int compat_sock_common_getsockopt(struct socket *sock, int level, 1019extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1020 int optname, char __user *optval, int __user *optlen); 1020 int optname, char __user *optval, int __user *optlen);
1021extern int compat_sock_common_setsockopt(struct socket *sock, int level, 1021extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1022 int optname, char __user *optval, int optlen); 1022 int optname, char __user *optval, unsigned int optlen);
1023 1023
1024extern void sk_common_release(struct sock *sk); 1024extern void sk_common_release(struct sock *sk);
1025 1025
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 56b76027b85e..03a49c703377 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -394,13 +394,13 @@ extern int tcp_getsockopt(struct sock *sk, int level,
394 int __user *optlen); 394 int __user *optlen);
395extern int tcp_setsockopt(struct sock *sk, int level, 395extern int tcp_setsockopt(struct sock *sk, int level,
396 int optname, char __user *optval, 396 int optname, char __user *optval,
397 int optlen); 397 unsigned int optlen);
398extern int compat_tcp_getsockopt(struct sock *sk, 398extern int compat_tcp_getsockopt(struct sock *sk,
399 int level, int optname, 399 int level, int optname,
400 char __user *optval, int __user *optlen); 400 char __user *optval, int __user *optlen);
401extern int compat_tcp_setsockopt(struct sock *sk, 401extern int compat_tcp_setsockopt(struct sock *sk,
402 int level, int optname, 402 int level, int optname,
403 char __user *optval, int optlen); 403 char __user *optval, unsigned int optlen);
404extern void tcp_set_keepalive(struct sock *sk, int val); 404extern void tcp_set_keepalive(struct sock *sk, int val);
405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, 405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
406 struct msghdr *msg, 406 struct msghdr *msg,
diff --git a/include/net/udp.h b/include/net/udp.h
index 5fb029f817a3..f98abd2ce709 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -144,7 +144,7 @@ extern unsigned int udp_poll(struct file *file, struct socket *sock,
144extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, 144extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
145 char __user *optval, int __user *optlen); 145 char __user *optval, int __user *optlen);
146extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, 146extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
147 char __user *optval, int optlen, 147 char __user *optval, unsigned int optlen,
148 int (*push_pending_frames)(struct sock *)); 148 int (*push_pending_frames)(struct sock *));
149 149
150extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 150extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
diff --git a/include/net/wext.h b/include/net/wext.h
index 6d76a39a9c5b..3f2b94de2cfa 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -14,6 +14,7 @@ extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cm
14 void __user *arg); 14 void __user *arg);
15extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 15extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
16 unsigned long arg); 16 unsigned long arg);
17extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
17#else 18#else
18static inline int wext_proc_init(struct net *net) 19static inline int wext_proc_init(struct net *net)
19{ 20{
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 9a3b49865173..d696a692d94a 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -279,7 +279,7 @@ extern struct pccard_resource_ops pccard_iodyn_ops;
279extern struct pccard_resource_ops pccard_nonstatic_ops; 279extern struct pccard_resource_ops pccard_nonstatic_ops;
280 280
281/* socket drivers are expected to use these callbacks in their .drv struct */ 281/* socket drivers are expected to use these callbacks in their .drv struct */
282extern int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state); 282extern int pcmcia_socket_dev_suspend(struct device *dev);
283extern int pcmcia_socket_dev_resume(struct device *dev); 283extern int pcmcia_socket_dev_resume(struct device *dev);
284 284
285/* socket drivers use this callback in their IRQ handler */ 285/* socket drivers use this callback in their IRQ handler */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index c1bd8f1e8b94..d09550bf3f95 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -11,6 +11,7 @@ struct ext4_allocation_context;
11struct ext4_allocation_request; 11struct ext4_allocation_request;
12struct ext4_prealloc_space; 12struct ext4_prealloc_space;
13struct ext4_inode_info; 13struct ext4_inode_info;
14struct mpage_da_data;
14 15
15#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) 16#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
16 17
@@ -236,6 +237,7 @@ TRACE_EVENT(ext4_da_writepages,
236 __field( char, for_kupdate ) 237 __field( char, for_kupdate )
237 __field( char, for_reclaim ) 238 __field( char, for_reclaim )
238 __field( char, range_cyclic ) 239 __field( char, range_cyclic )
240 __field( pgoff_t, writeback_index )
239 ), 241 ),
240 242
241 TP_fast_assign( 243 TP_fast_assign(
@@ -249,15 +251,17 @@ TRACE_EVENT(ext4_da_writepages,
249 __entry->for_kupdate = wbc->for_kupdate; 251 __entry->for_kupdate = wbc->for_kupdate;
250 __entry->for_reclaim = wbc->for_reclaim; 252 __entry->for_reclaim = wbc->for_reclaim;
251 __entry->range_cyclic = wbc->range_cyclic; 253 __entry->range_cyclic = wbc->range_cyclic;
254 __entry->writeback_index = inode->i_mapping->writeback_index;
252 ), 255 ),
253 256
254 TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d", 257 TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
255 jbd2_dev_to_name(__entry->dev), 258 jbd2_dev_to_name(__entry->dev),
256 (unsigned long) __entry->ino, __entry->nr_to_write, 259 (unsigned long) __entry->ino, __entry->nr_to_write,
257 __entry->pages_skipped, __entry->range_start, 260 __entry->pages_skipped, __entry->range_start,
258 __entry->range_end, __entry->nonblocking, 261 __entry->range_end, __entry->nonblocking,
259 __entry->for_kupdate, __entry->for_reclaim, 262 __entry->for_kupdate, __entry->for_reclaim,
260 __entry->range_cyclic) 263 __entry->range_cyclic,
264 (unsigned long) __entry->writeback_index)
261); 265);
262 266
263TRACE_EVENT(ext4_da_write_pages, 267TRACE_EVENT(ext4_da_write_pages,
@@ -309,6 +313,7 @@ TRACE_EVENT(ext4_da_writepages_result,
309 __field( char, encountered_congestion ) 313 __field( char, encountered_congestion )
310 __field( char, more_io ) 314 __field( char, more_io )
311 __field( char, no_nrwrite_index_update ) 315 __field( char, no_nrwrite_index_update )
316 __field( pgoff_t, writeback_index )
312 ), 317 ),
313 318
314 TP_fast_assign( 319 TP_fast_assign(
@@ -320,14 +325,16 @@ TRACE_EVENT(ext4_da_writepages_result,
320 __entry->encountered_congestion = wbc->encountered_congestion; 325 __entry->encountered_congestion = wbc->encountered_congestion;
321 __entry->more_io = wbc->more_io; 326 __entry->more_io = wbc->more_io;
322 __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update; 327 __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update;
328 __entry->writeback_index = inode->i_mapping->writeback_index;
323 ), 329 ),
324 330
325 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d", 331 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d writeback_index %lu",
326 jbd2_dev_to_name(__entry->dev), 332 jbd2_dev_to_name(__entry->dev),
327 (unsigned long) __entry->ino, __entry->ret, 333 (unsigned long) __entry->ino, __entry->ret,
328 __entry->pages_written, __entry->pages_skipped, 334 __entry->pages_written, __entry->pages_skipped,
329 __entry->encountered_congestion, __entry->more_io, 335 __entry->encountered_congestion, __entry->more_io,
330 __entry->no_nrwrite_index_update) 336 __entry->no_nrwrite_index_update,
337 (unsigned long) __entry->writeback_index)
331); 338);
332 339
333TRACE_EVENT(ext4_da_write_begin, 340TRACE_EVENT(ext4_da_write_begin,
@@ -737,6 +744,169 @@ TRACE_EVENT(ext4_alloc_da_blocks,
737 __entry->data_blocks, __entry->meta_blocks) 744 __entry->data_blocks, __entry->meta_blocks)
738); 745);
739 746
747TRACE_EVENT(ext4_mballoc_alloc,
748 TP_PROTO(struct ext4_allocation_context *ac),
749
750 TP_ARGS(ac),
751
752 TP_STRUCT__entry(
753 __field( dev_t, dev )
754 __field( ino_t, ino )
755 __field( __u16, found )
756 __field( __u16, groups )
757 __field( __u16, buddy )
758 __field( __u16, flags )
759 __field( __u16, tail )
760 __field( __u8, cr )
761 __field( __u32, orig_logical )
762 __field( int, orig_start )
763 __field( __u32, orig_group )
764 __field( int, orig_len )
765 __field( __u32, goal_logical )
766 __field( int, goal_start )
767 __field( __u32, goal_group )
768 __field( int, goal_len )
769 __field( __u32, result_logical )
770 __field( int, result_start )
771 __field( __u32, result_group )
772 __field( int, result_len )
773 ),
774
775 TP_fast_assign(
776 __entry->dev = ac->ac_inode->i_sb->s_dev;
777 __entry->ino = ac->ac_inode->i_ino;
778 __entry->found = ac->ac_found;
779 __entry->flags = ac->ac_flags;
780 __entry->groups = ac->ac_groups_scanned;
781 __entry->buddy = ac->ac_buddy;
782 __entry->tail = ac->ac_tail;
783 __entry->cr = ac->ac_criteria;
784 __entry->orig_logical = ac->ac_o_ex.fe_logical;
785 __entry->orig_start = ac->ac_o_ex.fe_start;
786 __entry->orig_group = ac->ac_o_ex.fe_group;
787 __entry->orig_len = ac->ac_o_ex.fe_len;
788 __entry->goal_logical = ac->ac_g_ex.fe_logical;
789 __entry->goal_start = ac->ac_g_ex.fe_start;
790 __entry->goal_group = ac->ac_g_ex.fe_group;
791 __entry->goal_len = ac->ac_g_ex.fe_len;
792 __entry->result_logical = ac->ac_f_ex.fe_logical;
793 __entry->result_start = ac->ac_f_ex.fe_start;
794 __entry->result_group = ac->ac_f_ex.fe_group;
795 __entry->result_len = ac->ac_f_ex.fe_len;
796 ),
797
798 TP_printk("dev %s inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
799 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
800 "tail %u broken %u",
801 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
802 __entry->orig_group, __entry->orig_start,
803 __entry->orig_len, __entry->orig_logical,
804 __entry->goal_group, __entry->goal_start,
805 __entry->goal_len, __entry->goal_logical,
806 __entry->result_group, __entry->result_start,
807 __entry->result_len, __entry->result_logical,
808 __entry->found, __entry->groups, __entry->cr,
809 __entry->flags, __entry->tail,
810 __entry->buddy ? 1 << __entry->buddy : 0)
811);
812
813TRACE_EVENT(ext4_mballoc_prealloc,
814 TP_PROTO(struct ext4_allocation_context *ac),
815
816 TP_ARGS(ac),
817
818 TP_STRUCT__entry(
819 __field( dev_t, dev )
820 __field( ino_t, ino )
821 __field( __u32, orig_logical )
822 __field( int, orig_start )
823 __field( __u32, orig_group )
824 __field( int, orig_len )
825 __field( __u32, result_logical )
826 __field( int, result_start )
827 __field( __u32, result_group )
828 __field( int, result_len )
829 ),
830
831 TP_fast_assign(
832 __entry->dev = ac->ac_inode->i_sb->s_dev;
833 __entry->ino = ac->ac_inode->i_ino;
834 __entry->orig_logical = ac->ac_o_ex.fe_logical;
835 __entry->orig_start = ac->ac_o_ex.fe_start;
836 __entry->orig_group = ac->ac_o_ex.fe_group;
837 __entry->orig_len = ac->ac_o_ex.fe_len;
838 __entry->result_logical = ac->ac_b_ex.fe_logical;
839 __entry->result_start = ac->ac_b_ex.fe_start;
840 __entry->result_group = ac->ac_b_ex.fe_group;
841 __entry->result_len = ac->ac_b_ex.fe_len;
842 ),
843
844 TP_printk("dev %s inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
845 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
846 __entry->orig_group, __entry->orig_start,
847 __entry->orig_len, __entry->orig_logical,
848 __entry->result_group, __entry->result_start,
849 __entry->result_len, __entry->result_logical)
850);
851
852TRACE_EVENT(ext4_mballoc_discard,
853 TP_PROTO(struct ext4_allocation_context *ac),
854
855 TP_ARGS(ac),
856
857 TP_STRUCT__entry(
858 __field( dev_t, dev )
859 __field( ino_t, ino )
860 __field( __u32, result_logical )
861 __field( int, result_start )
862 __field( __u32, result_group )
863 __field( int, result_len )
864 ),
865
866 TP_fast_assign(
867 __entry->dev = ac->ac_inode->i_sb->s_dev;
868 __entry->ino = ac->ac_inode->i_ino;
869 __entry->result_logical = ac->ac_b_ex.fe_logical;
870 __entry->result_start = ac->ac_b_ex.fe_start;
871 __entry->result_group = ac->ac_b_ex.fe_group;
872 __entry->result_len = ac->ac_b_ex.fe_len;
873 ),
874
875 TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
876 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
877 __entry->result_group, __entry->result_start,
878 __entry->result_len, __entry->result_logical)
879);
880
881TRACE_EVENT(ext4_mballoc_free,
882 TP_PROTO(struct ext4_allocation_context *ac),
883
884 TP_ARGS(ac),
885
886 TP_STRUCT__entry(
887 __field( dev_t, dev )
888 __field( ino_t, ino )
889 __field( __u32, result_logical )
890 __field( int, result_start )
891 __field( __u32, result_group )
892 __field( int, result_len )
893 ),
894
895 TP_fast_assign(
896 __entry->dev = ac->ac_inode->i_sb->s_dev;
897 __entry->ino = ac->ac_inode->i_ino;
898 __entry->result_logical = ac->ac_b_ex.fe_logical;
899 __entry->result_start = ac->ac_b_ex.fe_start;
900 __entry->result_group = ac->ac_b_ex.fe_group;
901 __entry->result_len = ac->ac_b_ex.fe_len;
902 ),
903
904 TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
905 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
906 __entry->result_group, __entry->result_start,
907 __entry->result_len, __entry->result_logical)
908);
909
740#endif /* _TRACE_EXT4_H */ 910#endif /* _TRACE_EXT4_H */
741 911
742/* This part must be outside protection */ 912/* This part must be outside protection */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index b851f0b4701c..3c60b75adb9e 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -7,6 +7,9 @@
7#include <linux/jbd2.h> 7#include <linux/jbd2.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10struct transaction_chp_stats_s;
11struct transaction_run_stats_s;
12
10TRACE_EVENT(jbd2_checkpoint, 13TRACE_EVENT(jbd2_checkpoint,
11 14
12 TP_PROTO(journal_t *journal, int result), 15 TP_PROTO(journal_t *journal, int result),
@@ -162,6 +165,81 @@ TRACE_EVENT(jbd2_submit_inode_data,
162 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino) 165 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
163); 166);
164 167
168TRACE_EVENT(jbd2_run_stats,
169 TP_PROTO(dev_t dev, unsigned long tid,
170 struct transaction_run_stats_s *stats),
171
172 TP_ARGS(dev, tid, stats),
173
174 TP_STRUCT__entry(
175 __field( dev_t, dev )
176 __field( unsigned long, tid )
177 __field( unsigned long, wait )
178 __field( unsigned long, running )
179 __field( unsigned long, locked )
180 __field( unsigned long, flushing )
181 __field( unsigned long, logging )
182 __field( __u32, handle_count )
183 __field( __u32, blocks )
184 __field( __u32, blocks_logged )
185 ),
186
187 TP_fast_assign(
188 __entry->dev = dev;
189 __entry->tid = tid;
190 __entry->wait = stats->rs_wait;
191 __entry->running = stats->rs_running;
192 __entry->locked = stats->rs_locked;
193 __entry->flushing = stats->rs_flushing;
194 __entry->logging = stats->rs_logging;
195 __entry->handle_count = stats->rs_handle_count;
196 __entry->blocks = stats->rs_blocks;
197 __entry->blocks_logged = stats->rs_blocks_logged;
198 ),
199
200 TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
201 "logging %u handle_count %u blocks %u blocks_logged %u",
202 jbd2_dev_to_name(__entry->dev), __entry->tid,
203 jiffies_to_msecs(__entry->wait),
204 jiffies_to_msecs(__entry->running),
205 jiffies_to_msecs(__entry->locked),
206 jiffies_to_msecs(__entry->flushing),
207 jiffies_to_msecs(__entry->logging),
208 __entry->handle_count, __entry->blocks,
209 __entry->blocks_logged)
210);
211
212TRACE_EVENT(jbd2_checkpoint_stats,
213 TP_PROTO(dev_t dev, unsigned long tid,
214 struct transaction_chp_stats_s *stats),
215
216 TP_ARGS(dev, tid, stats),
217
218 TP_STRUCT__entry(
219 __field( dev_t, dev )
220 __field( unsigned long, tid )
221 __field( unsigned long, chp_time )
222 __field( __u32, forced_to_close )
223 __field( __u32, written )
224 __field( __u32, dropped )
225 ),
226
227 TP_fast_assign(
228 __entry->dev = dev;
229 __entry->tid = tid;
230 __entry->chp_time = stats->cs_chp_time;
231 __entry->forced_to_close= stats->cs_forced_to_close;
232 __entry->written = stats->cs_written;
233 __entry->dropped = stats->cs_dropped;
234 ),
235
236 TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
237 "written %u dropped %u",
238 jbd2_dev_to_name(__entry->dev), __entry->tid,
239 jiffies_to_msecs(__entry->chp_time),
240 __entry->forced_to_close, __entry->written, __entry->dropped)
241);
242
165#endif /* _TRACE_JBD2_H */ 243#endif /* _TRACE_JBD2_H */
166 244
167/* This part must be outside protection */ 245/* This part must be outside protection */
diff --git a/ipc/shm.c b/ipc/shm.c
index 9eb1488b543b..464694e0aa4a 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -55,7 +55,7 @@ struct shm_file_data {
55#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 55#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 56
57static const struct file_operations shm_file_operations; 57static const struct file_operations shm_file_operations;
58static struct vm_operations_struct shm_vm_ops; 58static const struct vm_operations_struct shm_vm_ops;
59 59
60#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 60#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
61 61
@@ -312,7 +312,7 @@ static const struct file_operations shm_file_operations = {
312 .get_unmapped_area = shm_get_unmapped_area, 312 .get_unmapped_area = shm_get_unmapped_area,
313}; 313};
314 314
315static struct vm_operations_struct shm_vm_ops = { 315static const struct vm_operations_struct shm_vm_ops = {
316 .open = shm_open, /* callback for a new vm-area open */ 316 .open = shm_open, /* callback for a new vm-area open */
317 .close = shm_close, /* callback for when the vm-area is released */ 317 .close = shm_close, /* callback for when the vm-area is released */
318 .fault = shm_fault, 318 .fault = shm_fault,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7ccba4bc5e3b..ca83b73fba19 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -703,7 +703,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
704static int cgroup_populate_dir(struct cgroup *cgrp); 704static int cgroup_populate_dir(struct cgroup *cgrp);
705static const struct inode_operations cgroup_dir_inode_operations; 705static const struct inode_operations cgroup_dir_inode_operations;
706static struct file_operations proc_cgroupstats_operations; 706static const struct file_operations proc_cgroupstats_operations;
707 707
708static struct backing_dev_info cgroup_backing_dev_info = { 708static struct backing_dev_info cgroup_backing_dev_info = {
709 .name = "cgroup", 709 .name = "cgroup",
@@ -1863,7 +1863,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1863 return single_release(inode, file); 1863 return single_release(inode, file);
1864} 1864}
1865 1865
1866static struct file_operations cgroup_seqfile_operations = { 1866static const struct file_operations cgroup_seqfile_operations = {
1867 .read = seq_read, 1867 .read = seq_read,
1868 .write = cgroup_file_write, 1868 .write = cgroup_file_write,
1869 .llseek = seq_lseek, 1869 .llseek = seq_lseek,
@@ -1922,7 +1922,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1923} 1923}
1924 1924
1925static struct file_operations cgroup_file_operations = { 1925static const struct file_operations cgroup_file_operations = {
1926 .read = cgroup_file_read, 1926 .read = cgroup_file_read,
1927 .write = cgroup_file_write, 1927 .write = cgroup_file_write,
1928 .llseek = generic_file_llseek, 1928 .llseek = generic_file_llseek,
@@ -3369,7 +3369,7 @@ static int cgroup_open(struct inode *inode, struct file *file)
3369 return single_open(file, proc_cgroup_show, pid); 3369 return single_open(file, proc_cgroup_show, pid);
3370} 3370}
3371 3371
3372struct file_operations proc_cgroup_operations = { 3372const struct file_operations proc_cgroup_operations = {
3373 .open = cgroup_open, 3373 .open = cgroup_open,
3374 .read = seq_read, 3374 .read = seq_read,
3375 .llseek = seq_lseek, 3375 .llseek = seq_lseek,
@@ -3398,7 +3398,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file)
3398 return single_open(file, proc_cgroupstats_show, NULL); 3398 return single_open(file, proc_cgroupstats_show, NULL);
3399} 3399}
3400 3400
3401static struct file_operations proc_cgroupstats_operations = { 3401static const struct file_operations proc_cgroupstats_operations = {
3402 .open = cgroupstats_open, 3402 .open = cgroupstats_open,
3403 .read = seq_read, 3403 .read = seq_read,
3404 .llseek = seq_lseek, 3404 .llseek = seq_lseek,
@@ -3708,8 +3708,10 @@ static void check_for_release(struct cgroup *cgrp)
3708void __css_put(struct cgroup_subsys_state *css) 3708void __css_put(struct cgroup_subsys_state *css)
3709{ 3709{
3710 struct cgroup *cgrp = css->cgroup; 3710 struct cgroup *cgrp = css->cgroup;
3711 int val;
3711 rcu_read_lock(); 3712 rcu_read_lock();
3712 if (atomic_dec_return(&css->refcnt) == 1) { 3713 val = atomic_dec_return(&css->refcnt);
3714 if (val == 1) {
3713 if (notify_on_release(cgrp)) { 3715 if (notify_on_release(cgrp)) {
3714 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3716 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3715 check_for_release(cgrp); 3717 check_for_release(cgrp);
@@ -3717,6 +3719,7 @@ void __css_put(struct cgroup_subsys_state *css)
3717 cgroup_wakeup_rmdir_waiter(cgrp); 3719 cgroup_wakeup_rmdir_waiter(cgrp);
3718 } 3720 }
3719 rcu_read_unlock(); 3721 rcu_read_unlock();
3722 WARN_ON_ONCE(val < 1);
3720} 3723}
3721 3724
3722/* 3725/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e5d98ce50f89..6d7020490f94 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -509,13 +509,14 @@ static inline int hrtimer_hres_active(void)
509 * next event 509 * next event
510 * Called with interrupts disabled and base->lock held 510 * Called with interrupts disabled and base->lock held
511 */ 511 */
512static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) 512static void
513hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
513{ 514{
514 int i; 515 int i;
515 struct hrtimer_clock_base *base = cpu_base->clock_base; 516 struct hrtimer_clock_base *base = cpu_base->clock_base;
516 ktime_t expires; 517 ktime_t expires, expires_next;
517 518
518 cpu_base->expires_next.tv64 = KTIME_MAX; 519 expires_next.tv64 = KTIME_MAX;
519 520
520 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 521 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
521 struct hrtimer *timer; 522 struct hrtimer *timer;
@@ -531,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
531 */ 532 */
532 if (expires.tv64 < 0) 533 if (expires.tv64 < 0)
533 expires.tv64 = 0; 534 expires.tv64 = 0;
534 if (expires.tv64 < cpu_base->expires_next.tv64) 535 if (expires.tv64 < expires_next.tv64)
535 cpu_base->expires_next = expires; 536 expires_next = expires;
536 } 537 }
537 538
539 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
540 return;
541
542 cpu_base->expires_next.tv64 = expires_next.tv64;
543
538 if (cpu_base->expires_next.tv64 != KTIME_MAX) 544 if (cpu_base->expires_next.tv64 != KTIME_MAX)
539 tick_program_event(cpu_base->expires_next, 1); 545 tick_program_event(cpu_base->expires_next, 1);
540} 546}
@@ -617,7 +623,7 @@ static void retrigger_next_event(void *arg)
617 base->clock_base[CLOCK_REALTIME].offset = 623 base->clock_base[CLOCK_REALTIME].offset =
618 timespec_to_ktime(realtime_offset); 624 timespec_to_ktime(realtime_offset);
619 625
620 hrtimer_force_reprogram(base); 626 hrtimer_force_reprogram(base, 0);
621 spin_unlock(&base->lock); 627 spin_unlock(&base->lock);
622} 628}
623 629
@@ -730,7 +736,8 @@ static int hrtimer_switch_to_hres(void)
730static inline int hrtimer_hres_active(void) { return 0; } 736static inline int hrtimer_hres_active(void) { return 0; }
731static inline int hrtimer_is_hres_enabled(void) { return 0; } 737static inline int hrtimer_is_hres_enabled(void) { return 0; }
732static inline int hrtimer_switch_to_hres(void) { return 0; } 738static inline int hrtimer_switch_to_hres(void) { return 0; }
733static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } 739static inline void
740hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
734static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 741static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
735 struct hrtimer_clock_base *base, 742 struct hrtimer_clock_base *base,
736 int wakeup) 743 int wakeup)
@@ -873,19 +880,29 @@ static void __remove_hrtimer(struct hrtimer *timer,
873 struct hrtimer_clock_base *base, 880 struct hrtimer_clock_base *base,
874 unsigned long newstate, int reprogram) 881 unsigned long newstate, int reprogram)
875{ 882{
876 if (timer->state & HRTIMER_STATE_ENQUEUED) { 883 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
877 /* 884 goto out;
878 * Remove the timer from the rbtree and replace the 885
879 * first entry pointer if necessary. 886 /*
880 */ 887 * Remove the timer from the rbtree and replace the first
881 if (base->first == &timer->node) { 888 * entry pointer if necessary.
882 base->first = rb_next(&timer->node); 889 */
883 /* Reprogram the clock event device. if enabled */ 890 if (base->first == &timer->node) {
884 if (reprogram && hrtimer_hres_active()) 891 base->first = rb_next(&timer->node);
885 hrtimer_force_reprogram(base->cpu_base); 892#ifdef CONFIG_HIGH_RES_TIMERS
893 /* Reprogram the clock event device. if enabled */
894 if (reprogram && hrtimer_hres_active()) {
895 ktime_t expires;
896
897 expires = ktime_sub(hrtimer_get_expires(timer),
898 base->offset);
899 if (base->cpu_base->expires_next.tv64 == expires.tv64)
900 hrtimer_force_reprogram(base->cpu_base, 1);
886 } 901 }
887 rb_erase(&timer->node, &base->active); 902#endif
888 } 903 }
904 rb_erase(&timer->node, &base->active);
905out:
889 timer->state = newstate; 906 timer->state = newstate;
890} 907}
891 908
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index cfadc1291d0b..5240d75f4c60 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1333 return seq_open(filp, &kprobes_seq_ops); 1333 return seq_open(filp, &kprobes_seq_ops);
1334} 1334}
1335 1335
1336static struct file_operations debugfs_kprobes_operations = { 1336static const struct file_operations debugfs_kprobes_operations = {
1337 .open = kprobes_open, 1337 .open = kprobes_open,
1338 .read = seq_read, 1338 .read = seq_read,
1339 .llseek = seq_lseek, 1339 .llseek = seq_lseek,
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
1515 return count; 1515 return count;
1516} 1516}
1517 1517
1518static struct file_operations fops_kp = { 1518static const struct file_operations fops_kp = {
1519 .read = read_enabled_file_bool, 1519 .read = read_enabled_file_bool,
1520 .write = write_enabled_file_bool, 1520 .write = write_enabled_file_bool,
1521}; 1521};
diff --git a/kernel/module.c b/kernel/module.c
index fe748a86d452..8b7d8805819d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1992,12 +1992,14 @@ static inline unsigned long layout_symtab(struct module *mod,
1992 Elf_Shdr *sechdrs, 1992 Elf_Shdr *sechdrs,
1993 unsigned int symindex, 1993 unsigned int symindex,
1994 unsigned int strindex, 1994 unsigned int strindex,
1995 const Elf_Hdr *hdr, 1995 const Elf_Ehdr *hdr,
1996 const char *secstrings, 1996 const char *secstrings,
1997 unsigned long *pstroffs, 1997 unsigned long *pstroffs,
1998 unsigned long *strmap) 1998 unsigned long *strmap)
1999{ 1999{
2000 return 0;
2000} 2001}
2002
2001static inline void add_kallsyms(struct module *mod, 2003static inline void add_kallsyms(struct module *mod,
2002 Elf_Shdr *sechdrs, 2004 Elf_Shdr *sechdrs,
2003 unsigned int shnum, 2005 unsigned int shnum,
@@ -2081,9 +2083,8 @@ static noinline struct module *load_module(void __user *umod,
2081 struct module *mod; 2083 struct module *mod;
2082 long err = 0; 2084 long err = 0;
2083 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2085 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
2084#ifdef CONFIG_KALLSYMS
2085 unsigned long symoffs, stroffs, *strmap; 2086 unsigned long symoffs, stroffs, *strmap;
2086#endif 2087
2087 mm_segment_t old_fs; 2088 mm_segment_t old_fs;
2088 2089
2089 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 2090 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 76ac4db405e9..0f86feb6db0c 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2253,7 +2253,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
2253 } 2253 }
2254} 2254}
2255 2255
2256static struct vm_operations_struct perf_mmap_vmops = { 2256static const struct vm_operations_struct perf_mmap_vmops = {
2257 .open = perf_mmap_open, 2257 .open = perf_mmap_open,
2258 .close = perf_mmap_close, 2258 .close = perf_mmap_close,
2259 .fault = perf_mmap_fault, 2259 .fault = perf_mmap_fault,
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index c89f5e9fd173..179e6ad80dc0 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file)
93 return single_open(file, show_rcudata, NULL); 93 return single_open(file, show_rcudata, NULL);
94} 94}
95 95
96static struct file_operations rcudata_fops = { 96static const struct file_operations rcudata_fops = {
97 .owner = THIS_MODULE, 97 .owner = THIS_MODULE,
98 .open = rcudata_open, 98 .open = rcudata_open,
99 .read = seq_read, 99 .read = seq_read,
@@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file)
145 return single_open(file, show_rcudata_csv, NULL); 145 return single_open(file, show_rcudata_csv, NULL);
146} 146}
147 147
148static struct file_operations rcudata_csv_fops = { 148static const struct file_operations rcudata_csv_fops = {
149 .owner = THIS_MODULE, 149 .owner = THIS_MODULE,
150 .open = rcudata_csv_open, 150 .open = rcudata_csv_open,
151 .read = seq_read, 151 .read = seq_read,
@@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file)
196 return single_open(file, show_rcuhier, NULL); 196 return single_open(file, show_rcuhier, NULL);
197} 197}
198 198
199static struct file_operations rcuhier_fops = { 199static const struct file_operations rcuhier_fops = {
200 .owner = THIS_MODULE, 200 .owner = THIS_MODULE,
201 .open = rcuhier_open, 201 .open = rcuhier_open,
202 .read = seq_read, 202 .read = seq_read,
@@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file)
222 return single_open(file, show_rcugp, NULL); 222 return single_open(file, show_rcugp, NULL);
223} 223}
224 224
225static struct file_operations rcugp_fops = { 225static const struct file_operations rcugp_fops = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227 .open = rcugp_open, 227 .open = rcugp_open,
228 .read = seq_read, 228 .read = seq_read,
@@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file)
276 return single_open(file, show_rcu_pending, NULL); 276 return single_open(file, show_rcu_pending, NULL);
277} 277}
278 278
279static struct file_operations rcu_pending_fops = { 279static const struct file_operations rcu_pending_fops = {
280 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
281 .open = rcu_pending_open, 281 .open = rcu_pending_open,
282 .read = seq_read, 282 .read = seq_read,
diff --git a/kernel/relay.c b/kernel/relay.c
index bc188549788f..760c26209a3c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -60,7 +60,7 @@ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60/* 60/*
61 * vm_ops for relay file mappings. 61 * vm_ops for relay file mappings.
62 */ 62 */
63static struct vm_operations_struct relay_file_mmap_ops = { 63static const struct vm_operations_struct relay_file_mmap_ops = {
64 .fault = relay_buf_fault, 64 .fault = relay_buf_fault,
65 .close = relay_file_mmap_close, 65 .close = relay_file_mmap_close,
66}; 66};
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 88faec23e833..bcdabf37c40b 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -37,27 +37,17 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
37} 37}
38 38
39int res_counter_charge(struct res_counter *counter, unsigned long val, 39int res_counter_charge(struct res_counter *counter, unsigned long val,
40 struct res_counter **limit_fail_at, 40 struct res_counter **limit_fail_at)
41 struct res_counter **soft_limit_fail_at)
42{ 41{
43 int ret; 42 int ret;
44 unsigned long flags; 43 unsigned long flags;
45 struct res_counter *c, *u; 44 struct res_counter *c, *u;
46 45
47 *limit_fail_at = NULL; 46 *limit_fail_at = NULL;
48 if (soft_limit_fail_at)
49 *soft_limit_fail_at = NULL;
50 local_irq_save(flags); 47 local_irq_save(flags);
51 for (c = counter; c != NULL; c = c->parent) { 48 for (c = counter; c != NULL; c = c->parent) {
52 spin_lock(&c->lock); 49 spin_lock(&c->lock);
53 ret = res_counter_charge_locked(c, val); 50 ret = res_counter_charge_locked(c, val);
54 /*
55 * With soft limits, we return the highest ancestor
56 * that exceeds its soft limit
57 */
58 if (soft_limit_fail_at &&
59 !res_counter_soft_limit_check_locked(c))
60 *soft_limit_fail_at = c;
61 spin_unlock(&c->lock); 51 spin_unlock(&c->lock);
62 if (ret < 0) { 52 if (ret < 0) {
63 *limit_fail_at = c; 53 *limit_fail_at = c;
@@ -85,8 +75,7 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
85 counter->usage -= val; 75 counter->usage -= val;
86} 76}
87 77
88void res_counter_uncharge(struct res_counter *counter, unsigned long val, 78void res_counter_uncharge(struct res_counter *counter, unsigned long val)
89 bool *was_soft_limit_excess)
90{ 79{
91 unsigned long flags; 80 unsigned long flags;
92 struct res_counter *c; 81 struct res_counter *c;
@@ -94,9 +83,6 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val,
94 local_irq_save(flags); 83 local_irq_save(flags);
95 for (c = counter; c != NULL; c = c->parent) { 84 for (c = counter; c != NULL; c = c->parent) {
96 spin_lock(&c->lock); 85 spin_lock(&c->lock);
97 if (was_soft_limit_excess)
98 *was_soft_limit_excess =
99 !res_counter_soft_limit_check_locked(c);
100 res_counter_uncharge_locked(c, val); 86 res_counter_uncharge_locked(c, val);
101 spin_unlock(&c->lock); 87 spin_unlock(&c->lock);
102 } 88 }
diff --git a/kernel/sched.c b/kernel/sched.c
index ee61f454a98b..1535f3884b88 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -780,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
780 return single_open(filp, sched_feat_show, NULL); 780 return single_open(filp, sched_feat_show, NULL);
781} 781}
782 782
783static struct file_operations sched_feat_fops = { 783static const struct file_operations sched_feat_fops = {
784 .open = sched_feat_open, 784 .open = sched_feat_open,
785 .write = sched_feat_write, 785 .write = sched_feat_write,
786 .read = seq_read, 786 .read = seq_read,
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index ac2e1dc708bd..479ce5682d7c 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -127,7 +127,7 @@ again:
127 clock = wrap_max(clock, min_clock); 127 clock = wrap_max(clock, min_clock);
128 clock = wrap_min(clock, max_clock); 128 clock = wrap_min(clock, max_clock);
129 129
130 if (cmpxchg(&scd->clock, old_clock, clock) != old_clock) 130 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
131 goto again; 131 goto again;
132 132
133 return clock; 133 return clock;
@@ -163,7 +163,7 @@ again:
163 val = remote_clock; 163 val = remote_clock;
164 } 164 }
165 165
166 if (cmpxchg(ptr, old_val, val) != old_val) 166 if (cmpxchg64(ptr, old_val, val) != old_val)
167 goto again; 167 goto again;
168 168
169 return val; 169 return val;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fddd69d16e03..1b5b7aa2fdfd 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp)
275 return single_open(filp, timer_list_show, NULL); 275 return single_open(filp, timer_list_show, NULL);
276} 276}
277 277
278static struct file_operations timer_list_fops = { 278static const struct file_operations timer_list_fops = {
279 .open = timer_list_open, 279 .open = timer_list_open,
280 .read = seq_read, 280 .read = seq_read,
281 .llseek = seq_lseek, 281 .llseek = seq_lseek,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 4cde8b9c716f..ee5681f8d7ec 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp)
395 return single_open(filp, tstats_show, NULL); 395 return single_open(filp, tstats_show, NULL);
396} 396}
397 397
398static struct file_operations tstats_fops = { 398static const struct file_operations tstats_fops = {
399 .open = tstats_open, 399 .open = tstats_open,
400 .read = seq_read, 400 .read = seq_read,
401 .write = tstats_write, 401 .write = tstats_write,
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b91839e9e892..33bed5e67a21 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1771,7 +1771,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1771 * advance both strings to next white space 1771 * advance both strings to next white space
1772 */ 1772 */
1773 if (*fmt == '*') { 1773 if (*fmt == '*') {
1774 while (!isspace(*fmt) && *fmt) 1774 while (!isspace(*fmt) && *fmt != '%' && *fmt)
1775 fmt++; 1775 fmt++;
1776 while (!isspace(*str) && *str) 1776 while (!isspace(*str) && *str)
1777 str++; 1777 str++;
diff --git a/mm/Kconfig b/mm/Kconfig
index 247760729593..edd300aca173 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -244,10 +244,12 @@ config DEFAULT_MMAP_MIN_ADDR
244 This value can be changed after boot using the 244 This value can be changed after boot using the
245 /proc/sys/vm/mmap_min_addr tunable. 245 /proc/sys/vm/mmap_min_addr tunable.
246 246
247config ARCH_SUPPORTS_MEMORY_FAILURE
248 bool
247 249
248config MEMORY_FAILURE 250config MEMORY_FAILURE
249 depends on MMU 251 depends on MMU
250 depends on X86_MCE 252 depends on ARCH_SUPPORTS_MEMORY_FAILURE
251 bool "Enable recovery from hardware memory errors" 253 bool "Enable recovery from hardware memory errors"
252 help 254 help
253 Enables code to recover from some memory failures on systems 255 Enables code to recover from some memory failures on systems
diff --git a/mm/filemap.c b/mm/filemap.c
index 6c84e598b4a9..ef169f37156d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1611,7 +1611,7 @@ page_not_uptodate:
1611} 1611}
1612EXPORT_SYMBOL(filemap_fault); 1612EXPORT_SYMBOL(filemap_fault);
1613 1613
1614struct vm_operations_struct generic_file_vm_ops = { 1614const struct vm_operations_struct generic_file_vm_ops = {
1615 .fault = filemap_fault, 1615 .fault = filemap_fault,
1616}; 1616};
1617 1617
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 427dfe3ce78c..1888b2d71bb8 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -296,7 +296,7 @@ out:
296 } 296 }
297} 297}
298 298
299static struct vm_operations_struct xip_file_vm_ops = { 299static const struct vm_operations_struct xip_file_vm_ops = {
300 .fault = xip_file_fault, 300 .fault = xip_file_fault,
301}; 301};
302 302
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6f048fcc749c..5d7601b02874 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1721,7 +1721,7 @@ static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1721 return 0; 1721 return 0;
1722} 1722}
1723 1723
1724struct vm_operations_struct hugetlb_vm_ops = { 1724const struct vm_operations_struct hugetlb_vm_ops = {
1725 .fault = hugetlb_vm_op_fault, 1725 .fault = hugetlb_vm_op_fault,
1726 .open = hugetlb_vm_op_open, 1726 .open = hugetlb_vm_op_open,
1727 .close = hugetlb_vm_op_close, 1727 .close = hugetlb_vm_op_close,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e2b98a6875c0..f99f5991d6bb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page)
313static void 313static void
314__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 314__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
315 struct mem_cgroup_per_zone *mz, 315 struct mem_cgroup_per_zone *mz,
316 struct mem_cgroup_tree_per_zone *mctz) 316 struct mem_cgroup_tree_per_zone *mctz,
317 unsigned long long new_usage_in_excess)
317{ 318{
318 struct rb_node **p = &mctz->rb_root.rb_node; 319 struct rb_node **p = &mctz->rb_root.rb_node;
319 struct rb_node *parent = NULL; 320 struct rb_node *parent = NULL;
@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
322 if (mz->on_tree) 323 if (mz->on_tree)
323 return; 324 return;
324 325
325 mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res); 326 mz->usage_in_excess = new_usage_in_excess;
327 if (!mz->usage_in_excess)
328 return;
326 while (*p) { 329 while (*p) {
327 parent = *p; 330 parent = *p;
328 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 331 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
@@ -353,16 +356,6 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
353} 356}
354 357
355static void 358static void
356mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
357 struct mem_cgroup_per_zone *mz,
358 struct mem_cgroup_tree_per_zone *mctz)
359{
360 spin_lock(&mctz->lock);
361 __mem_cgroup_insert_exceeded(mem, mz, mctz);
362 spin_unlock(&mctz->lock);
363}
364
365static void
366mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 359mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
367 struct mem_cgroup_per_zone *mz, 360 struct mem_cgroup_per_zone *mz,
368 struct mem_cgroup_tree_per_zone *mctz) 361 struct mem_cgroup_tree_per_zone *mctz)
@@ -392,34 +385,36 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
392 385
393static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 386static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
394{ 387{
395 unsigned long long prev_usage_in_excess, new_usage_in_excess; 388 unsigned long long excess;
396 bool updated_tree = false;
397 struct mem_cgroup_per_zone *mz; 389 struct mem_cgroup_per_zone *mz;
398 struct mem_cgroup_tree_per_zone *mctz; 390 struct mem_cgroup_tree_per_zone *mctz;
399 391 int nid = page_to_nid(page);
400 mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page)); 392 int zid = page_zonenum(page);
401 mctz = soft_limit_tree_from_page(page); 393 mctz = soft_limit_tree_from_page(page);
402 394
403 /* 395 /*
404 * We do updates in lazy mode, mem's are removed 396 * Necessary to update all ancestors when hierarchy is used.
405 * lazily from the per-zone, per-node rb tree 397 * because their event counter is not touched.
406 */ 398 */
407 prev_usage_in_excess = mz->usage_in_excess; 399 for (; mem; mem = parent_mem_cgroup(mem)) {
408 400 mz = mem_cgroup_zoneinfo(mem, nid, zid);
409 new_usage_in_excess = res_counter_soft_limit_excess(&mem->res); 401 excess = res_counter_soft_limit_excess(&mem->res);
410 if (prev_usage_in_excess) { 402 /*
411 mem_cgroup_remove_exceeded(mem, mz, mctz); 403 * We have to update the tree if mz is on RB-tree or
412 updated_tree = true; 404 * mem is over its softlimit.
413 } 405 */
414 if (!new_usage_in_excess) 406 if (excess || mz->on_tree) {
415 goto done; 407 spin_lock(&mctz->lock);
416 mem_cgroup_insert_exceeded(mem, mz, mctz); 408 /* if on-tree, remove it */
417 409 if (mz->on_tree)
418done: 410 __mem_cgroup_remove_exceeded(mem, mz, mctz);
419 if (updated_tree) { 411 /*
420 spin_lock(&mctz->lock); 412 * Insert again. mz->usage_in_excess will be updated.
421 mz->usage_in_excess = new_usage_in_excess; 413 * If excess is 0, no tree ops.
422 spin_unlock(&mctz->lock); 414 */
415 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
416 spin_unlock(&mctz->lock);
417 }
423 } 418 }
424} 419}
425 420
@@ -447,9 +442,10 @@ static struct mem_cgroup_per_zone *
447__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 442__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
448{ 443{
449 struct rb_node *rightmost = NULL; 444 struct rb_node *rightmost = NULL;
450 struct mem_cgroup_per_zone *mz = NULL; 445 struct mem_cgroup_per_zone *mz;
451 446
452retry: 447retry:
448 mz = NULL;
453 rightmost = rb_last(&mctz->rb_root); 449 rightmost = rb_last(&mctz->rb_root);
454 if (!rightmost) 450 if (!rightmost)
455 goto done; /* Nothing to reclaim from */ 451 goto done; /* Nothing to reclaim from */
@@ -1270,9 +1266,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1270 gfp_t gfp_mask, struct mem_cgroup **memcg, 1266 gfp_t gfp_mask, struct mem_cgroup **memcg,
1271 bool oom, struct page *page) 1267 bool oom, struct page *page)
1272{ 1268{
1273 struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit; 1269 struct mem_cgroup *mem, *mem_over_limit;
1274 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1270 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1275 struct res_counter *fail_res, *soft_fail_res = NULL; 1271 struct res_counter *fail_res;
1276 1272
1277 if (unlikely(test_thread_flag(TIF_MEMDIE))) { 1273 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
1278 /* Don't account this! */ 1274 /* Don't account this! */
@@ -1304,17 +1300,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1304 1300
1305 if (mem_cgroup_is_root(mem)) 1301 if (mem_cgroup_is_root(mem))
1306 goto done; 1302 goto done;
1307 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res, 1303 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
1308 &soft_fail_res);
1309 if (likely(!ret)) { 1304 if (likely(!ret)) {
1310 if (!do_swap_account) 1305 if (!do_swap_account)
1311 break; 1306 break;
1312 ret = res_counter_charge(&mem->memsw, PAGE_SIZE, 1307 ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
1313 &fail_res, NULL); 1308 &fail_res);
1314 if (likely(!ret)) 1309 if (likely(!ret))
1315 break; 1310 break;
1316 /* mem+swap counter fails */ 1311 /* mem+swap counter fails */
1317 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1312 res_counter_uncharge(&mem->res, PAGE_SIZE);
1318 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 1313 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1319 mem_over_limit = mem_cgroup_from_res_counter(fail_res, 1314 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1320 memsw); 1315 memsw);
@@ -1353,16 +1348,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1353 } 1348 }
1354 } 1349 }
1355 /* 1350 /*
1356 * Insert just the ancestor, we should trickle down to the correct 1351 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1357 * cgroup for reclaim, since the other nodes will be below their 1352 * if they exceeds softlimit.
1358 * soft limit
1359 */ 1353 */
1360 if (soft_fail_res) { 1354 if (mem_cgroup_soft_limit_check(mem))
1361 mem_over_soft_limit = 1355 mem_cgroup_update_tree(mem, page);
1362 mem_cgroup_from_res_counter(soft_fail_res, res);
1363 if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
1364 mem_cgroup_update_tree(mem_over_soft_limit, page);
1365 }
1366done: 1356done:
1367 return 0; 1357 return 0;
1368nomem: 1358nomem:
@@ -1437,10 +1427,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1437 if (unlikely(PageCgroupUsed(pc))) { 1427 if (unlikely(PageCgroupUsed(pc))) {
1438 unlock_page_cgroup(pc); 1428 unlock_page_cgroup(pc);
1439 if (!mem_cgroup_is_root(mem)) { 1429 if (!mem_cgroup_is_root(mem)) {
1440 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1430 res_counter_uncharge(&mem->res, PAGE_SIZE);
1441 if (do_swap_account) 1431 if (do_swap_account)
1442 res_counter_uncharge(&mem->memsw, PAGE_SIZE, 1432 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1443 NULL);
1444 } 1433 }
1445 css_put(&mem->css); 1434 css_put(&mem->css);
1446 return; 1435 return;
@@ -1519,7 +1508,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1519 goto out; 1508 goto out;
1520 1509
1521 if (!mem_cgroup_is_root(from)) 1510 if (!mem_cgroup_is_root(from))
1522 res_counter_uncharge(&from->res, PAGE_SIZE, NULL); 1511 res_counter_uncharge(&from->res, PAGE_SIZE);
1523 mem_cgroup_charge_statistics(from, pc, false); 1512 mem_cgroup_charge_statistics(from, pc, false);
1524 1513
1525 page = pc->page; 1514 page = pc->page;
@@ -1539,7 +1528,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1539 } 1528 }
1540 1529
1541 if (do_swap_account && !mem_cgroup_is_root(from)) 1530 if (do_swap_account && !mem_cgroup_is_root(from))
1542 res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL); 1531 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1543 css_put(&from->css); 1532 css_put(&from->css);
1544 1533
1545 css_get(&to->css); 1534 css_get(&to->css);
@@ -1610,9 +1599,9 @@ uncharge:
1610 css_put(&parent->css); 1599 css_put(&parent->css);
1611 /* uncharge if move fails */ 1600 /* uncharge if move fails */
1612 if (!mem_cgroup_is_root(parent)) { 1601 if (!mem_cgroup_is_root(parent)) {
1613 res_counter_uncharge(&parent->res, PAGE_SIZE, NULL); 1602 res_counter_uncharge(&parent->res, PAGE_SIZE);
1614 if (do_swap_account) 1603 if (do_swap_account)
1615 res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL); 1604 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1616 } 1605 }
1617 return ret; 1606 return ret;
1618} 1607}
@@ -1803,8 +1792,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1803 * calling css_tryget 1792 * calling css_tryget
1804 */ 1793 */
1805 if (!mem_cgroup_is_root(memcg)) 1794 if (!mem_cgroup_is_root(memcg))
1806 res_counter_uncharge(&memcg->memsw, PAGE_SIZE, 1795 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1807 NULL);
1808 mem_cgroup_swap_statistics(memcg, false); 1796 mem_cgroup_swap_statistics(memcg, false);
1809 mem_cgroup_put(memcg); 1797 mem_cgroup_put(memcg);
1810 } 1798 }
@@ -1831,9 +1819,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1831 if (!mem) 1819 if (!mem)
1832 return; 1820 return;
1833 if (!mem_cgroup_is_root(mem)) { 1821 if (!mem_cgroup_is_root(mem)) {
1834 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1822 res_counter_uncharge(&mem->res, PAGE_SIZE);
1835 if (do_swap_account) 1823 if (do_swap_account)
1836 res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); 1824 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1837 } 1825 }
1838 css_put(&mem->css); 1826 css_put(&mem->css);
1839} 1827}
@@ -1848,7 +1836,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1848 struct page_cgroup *pc; 1836 struct page_cgroup *pc;
1849 struct mem_cgroup *mem = NULL; 1837 struct mem_cgroup *mem = NULL;
1850 struct mem_cgroup_per_zone *mz; 1838 struct mem_cgroup_per_zone *mz;
1851 bool soft_limit_excess = false;
1852 1839
1853 if (mem_cgroup_disabled()) 1840 if (mem_cgroup_disabled())
1854 return NULL; 1841 return NULL;
@@ -1888,10 +1875,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1888 } 1875 }
1889 1876
1890 if (!mem_cgroup_is_root(mem)) { 1877 if (!mem_cgroup_is_root(mem)) {
1891 res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess); 1878 res_counter_uncharge(&mem->res, PAGE_SIZE);
1892 if (do_swap_account && 1879 if (do_swap_account &&
1893 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) 1880 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1894 res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); 1881 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1895 } 1882 }
1896 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 1883 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1897 mem_cgroup_swap_statistics(mem, true); 1884 mem_cgroup_swap_statistics(mem, true);
@@ -1908,7 +1895,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1908 mz = page_cgroup_zoneinfo(pc); 1895 mz = page_cgroup_zoneinfo(pc);
1909 unlock_page_cgroup(pc); 1896 unlock_page_cgroup(pc);
1910 1897
1911 if (soft_limit_excess && mem_cgroup_soft_limit_check(mem)) 1898 if (mem_cgroup_soft_limit_check(mem))
1912 mem_cgroup_update_tree(mem, page); 1899 mem_cgroup_update_tree(mem, page);
1913 /* at swapout, this memcg will be accessed to record to swap */ 1900 /* at swapout, this memcg will be accessed to record to swap */
1914 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 1901 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
@@ -1986,7 +1973,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
1986 * This memcg can be obsolete one. We avoid calling css_tryget 1973 * This memcg can be obsolete one. We avoid calling css_tryget
1987 */ 1974 */
1988 if (!mem_cgroup_is_root(memcg)) 1975 if (!mem_cgroup_is_root(memcg))
1989 res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL); 1976 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1990 mem_cgroup_swap_statistics(memcg, false); 1977 mem_cgroup_swap_statistics(memcg, false);
1991 mem_cgroup_put(memcg); 1978 mem_cgroup_put(memcg);
1992 } 1979 }
@@ -2233,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2233 unsigned long reclaimed; 2220 unsigned long reclaimed;
2234 int loop = 0; 2221 int loop = 0;
2235 struct mem_cgroup_tree_per_zone *mctz; 2222 struct mem_cgroup_tree_per_zone *mctz;
2223 unsigned long long excess;
2236 2224
2237 if (order > 0) 2225 if (order > 0)
2238 return 0; 2226 return 0;
@@ -2284,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2284 break; 2272 break;
2285 } while (1); 2273 } while (1);
2286 } 2274 }
2287 mz->usage_in_excess =
2288 res_counter_soft_limit_excess(&mz->mem->res);
2289 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 2275 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2276 excess = res_counter_soft_limit_excess(&mz->mem->res);
2290 /* 2277 /*
2291 * One school of thought says that we should not add 2278 * One school of thought says that we should not add
2292 * back the node to the tree if reclaim returns 0. 2279 * back the node to the tree if reclaim returns 0.
@@ -2295,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2295 * memory to reclaim from. Consider this as a longer 2282 * memory to reclaim from. Consider this as a longer
2296 * term TODO. 2283 * term TODO.
2297 */ 2284 */
2298 if (mz->usage_in_excess) 2285 /* If excess == 0, no tree ops */
2299 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz); 2286 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2300 spin_unlock(&mctz->lock); 2287 spin_unlock(&mctz->lock);
2301 css_put(&mz->mem->css); 2288 css_put(&mz->mem->css);
2302 loop++; 2289 loop++;
diff --git a/mm/mmap.c b/mm/mmap.c
index 21d4029a07b3..73f5e4b64010 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2282,7 +2282,7 @@ static void special_mapping_close(struct vm_area_struct *vma)
2282{ 2282{
2283} 2283}
2284 2284
2285static struct vm_operations_struct special_mapping_vmops = { 2285static const struct vm_operations_struct special_mapping_vmops = {
2286 .close = special_mapping_close, 2286 .close = special_mapping_close,
2287 .fault = special_mapping_fault, 2287 .fault = special_mapping_fault,
2288}; 2288};
diff --git a/mm/nommu.c b/mm/nommu.c
index c73aa4753d79..5189b5aed8c0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -79,7 +79,7 @@ static struct kmem_cache *vm_region_jar;
79struct rb_root nommu_region_tree = RB_ROOT; 79struct rb_root nommu_region_tree = RB_ROOT;
80DECLARE_RWSEM(nommu_region_sem); 80DECLARE_RWSEM(nommu_region_sem);
81 81
82struct vm_operations_struct generic_file_vm_ops = { 82const struct vm_operations_struct generic_file_vm_ops = {
83}; 83};
84 84
85/* 85/*
diff --git a/mm/percpu.c b/mm/percpu.c
index 43d8cacfdaa5..4a048abad043 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1043,7 +1043,9 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
1043 */ 1043 */
1044static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1044static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1045{ 1045{
1046 static int warn_limit = 10;
1046 struct pcpu_chunk *chunk; 1047 struct pcpu_chunk *chunk;
1048 const char *err;
1047 int slot, off; 1049 int slot, off;
1048 1050
1049 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1051 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@@ -1059,11 +1061,14 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1059 if (reserved && pcpu_reserved_chunk) { 1061 if (reserved && pcpu_reserved_chunk) {
1060 chunk = pcpu_reserved_chunk; 1062 chunk = pcpu_reserved_chunk;
1061 if (size > chunk->contig_hint || 1063 if (size > chunk->contig_hint ||
1062 pcpu_extend_area_map(chunk) < 0) 1064 pcpu_extend_area_map(chunk) < 0) {
1065 err = "failed to extend area map of reserved chunk";
1063 goto fail_unlock; 1066 goto fail_unlock;
1067 }
1064 off = pcpu_alloc_area(chunk, size, align); 1068 off = pcpu_alloc_area(chunk, size, align);
1065 if (off >= 0) 1069 if (off >= 0)
1066 goto area_found; 1070 goto area_found;
1071 err = "alloc from reserved chunk failed";
1067 goto fail_unlock; 1072 goto fail_unlock;
1068 } 1073 }
1069 1074
@@ -1080,6 +1085,7 @@ restart:
1080 case 1: 1085 case 1:
1081 goto restart; /* pcpu_lock dropped, restart */ 1086 goto restart; /* pcpu_lock dropped, restart */
1082 default: 1087 default:
1088 err = "failed to extend area map";
1083 goto fail_unlock; 1089 goto fail_unlock;
1084 } 1090 }
1085 1091
@@ -1093,8 +1099,10 @@ restart:
1093 spin_unlock_irq(&pcpu_lock); 1099 spin_unlock_irq(&pcpu_lock);
1094 1100
1095 chunk = alloc_pcpu_chunk(); 1101 chunk = alloc_pcpu_chunk();
1096 if (!chunk) 1102 if (!chunk) {
1103 err = "failed to allocate new chunk";
1097 goto fail_unlock_mutex; 1104 goto fail_unlock_mutex;
1105 }
1098 1106
1099 spin_lock_irq(&pcpu_lock); 1107 spin_lock_irq(&pcpu_lock);
1100 pcpu_chunk_relocate(chunk, -1); 1108 pcpu_chunk_relocate(chunk, -1);
@@ -1107,6 +1115,7 @@ area_found:
1107 if (pcpu_populate_chunk(chunk, off, size)) { 1115 if (pcpu_populate_chunk(chunk, off, size)) {
1108 spin_lock_irq(&pcpu_lock); 1116 spin_lock_irq(&pcpu_lock);
1109 pcpu_free_area(chunk, off); 1117 pcpu_free_area(chunk, off);
1118 err = "failed to populate";
1110 goto fail_unlock; 1119 goto fail_unlock;
1111 } 1120 }
1112 1121
@@ -1119,6 +1128,13 @@ fail_unlock:
1119 spin_unlock_irq(&pcpu_lock); 1128 spin_unlock_irq(&pcpu_lock);
1120fail_unlock_mutex: 1129fail_unlock_mutex:
1121 mutex_unlock(&pcpu_alloc_mutex); 1130 mutex_unlock(&pcpu_alloc_mutex);
1131 if (warn_limit) {
1132 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1133 "%s\n", size, align, err);
1134 dump_stack();
1135 if (!--warn_limit)
1136 pr_info("PERCPU: limit reached, disable warning\n");
1137 }
1122 return NULL; 1138 return NULL;
1123} 1139}
1124 1140
@@ -1347,6 +1363,10 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1347 struct pcpu_alloc_info *ai; 1363 struct pcpu_alloc_info *ai;
1348 unsigned int *cpu_map; 1364 unsigned int *cpu_map;
1349 1365
1366 /* this function may be called multiple times */
1367 memset(group_map, 0, sizeof(group_map));
1368 memset(group_cnt, 0, sizeof(group_map));
1369
1350 /* 1370 /*
1351 * Determine min_unit_size, alloc_size and max_upa such that 1371 * Determine min_unit_size, alloc_size and max_upa such that
1352 * alloc_size is multiple of atom_size and is the smallest 1372 * alloc_size is multiple of atom_size and is the smallest
@@ -1574,6 +1594,7 @@ static void pcpu_dump_alloc_info(const char *lvl,
1574int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1594int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1575 void *base_addr) 1595 void *base_addr)
1576{ 1596{
1597 static char cpus_buf[4096] __initdata;
1577 static int smap[2], dmap[2]; 1598 static int smap[2], dmap[2];
1578 size_t dyn_size = ai->dyn_size; 1599 size_t dyn_size = ai->dyn_size;
1579 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1600 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
@@ -1585,17 +1606,26 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1585 int *unit_map; 1606 int *unit_map;
1586 int group, unit, i; 1607 int group, unit, i;
1587 1608
1609 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1610
1611#define PCPU_SETUP_BUG_ON(cond) do { \
1612 if (unlikely(cond)) { \
1613 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1614 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1615 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1616 BUG(); \
1617 } \
1618} while (0)
1619
1588 /* sanity checks */ 1620 /* sanity checks */
1589 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1621 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1590 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1622 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1591 BUG_ON(ai->nr_groups <= 0); 1623 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1592 BUG_ON(!ai->static_size); 1624 PCPU_SETUP_BUG_ON(!ai->static_size);
1593 BUG_ON(!base_addr); 1625 PCPU_SETUP_BUG_ON(!base_addr);
1594 BUG_ON(ai->unit_size < size_sum); 1626 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1595 BUG_ON(ai->unit_size & ~PAGE_MASK); 1627 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1596 BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1628 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1597
1598 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1599 1629
1600 /* process group information and build config tables accordingly */ 1630 /* process group information and build config tables accordingly */
1601 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1631 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
@@ -1604,7 +1634,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1604 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1634 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1605 1635
1606 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1636 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1607 unit_map[cpu] = NR_CPUS; 1637 unit_map[cpu] = UINT_MAX;
1608 pcpu_first_unit_cpu = NR_CPUS; 1638 pcpu_first_unit_cpu = NR_CPUS;
1609 1639
1610 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1640 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
@@ -1618,8 +1648,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1618 if (cpu == NR_CPUS) 1648 if (cpu == NR_CPUS)
1619 continue; 1649 continue;
1620 1650
1621 BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu)); 1651 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1622 BUG_ON(unit_map[cpu] != NR_CPUS); 1652 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1653 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1623 1654
1624 unit_map[cpu] = unit + i; 1655 unit_map[cpu] = unit + i;
1625 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1656 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
@@ -1632,7 +1663,11 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1632 pcpu_nr_units = unit; 1663 pcpu_nr_units = unit;
1633 1664
1634 for_each_possible_cpu(cpu) 1665 for_each_possible_cpu(cpu)
1635 BUG_ON(unit_map[cpu] == NR_CPUS); 1666 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1667
1668 /* we're done parsing the input, undefine BUG macro and dump config */
1669#undef PCPU_SETUP_BUG_ON
1670 pcpu_dump_alloc_info(KERN_INFO, ai);
1636 1671
1637 pcpu_nr_groups = ai->nr_groups; 1672 pcpu_nr_groups = ai->nr_groups;
1638 pcpu_group_offsets = group_offsets; 1673 pcpu_group_offsets = group_offsets;
@@ -1782,7 +1817,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1782 void *base = (void *)ULONG_MAX; 1817 void *base = (void *)ULONG_MAX;
1783 void **areas = NULL; 1818 void **areas = NULL;
1784 struct pcpu_alloc_info *ai; 1819 struct pcpu_alloc_info *ai;
1785 size_t size_sum, areas_size; 1820 size_t size_sum, areas_size, max_distance;
1786 int group, i, rc; 1821 int group, i, rc;
1787 1822
1788 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1823 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
@@ -1832,8 +1867,24 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1832 } 1867 }
1833 1868
1834 /* base address is now known, determine group base offsets */ 1869 /* base address is now known, determine group base offsets */
1835 for (group = 0; group < ai->nr_groups; group++) 1870 max_distance = 0;
1871 for (group = 0; group < ai->nr_groups; group++) {
1836 ai->groups[group].base_offset = areas[group] - base; 1872 ai->groups[group].base_offset = areas[group] - base;
1873 max_distance = max(max_distance, ai->groups[group].base_offset);
1874 }
1875 max_distance += ai->unit_size;
1876
1877 /* warn if maximum distance is further than 75% of vmalloc space */
1878 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1879 pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc "
1880 "space 0x%lx\n",
1881 max_distance, VMALLOC_END - VMALLOC_START);
1882#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1883 /* and fail if we have fallback */
1884 rc = -EINVAL;
1885 goto out_free;
1886#endif
1887 }
1837 1888
1838 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1889 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1839 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1890 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
diff --git a/mm/rmap.c b/mm/rmap.c
index 28aafe2b5306..dd43373a483f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -242,8 +242,8 @@ vma_address(struct page *page, struct vm_area_struct *vma)
242} 242}
243 243
244/* 244/*
245 * At what user virtual address is page expected in vma? checking that the 245 * At what user virtual address is page expected in vma?
246 * page matches the vma: currently only used on anon pages, by unuse_vma; 246 * checking that the page matches the vma.
247 */ 247 */
248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
249{ 249{
diff --git a/mm/shmem.c b/mm/shmem.c
index ccf446a9faa1..356dd99566ec 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -218,7 +218,7 @@ static const struct file_operations shmem_file_operations;
218static const struct inode_operations shmem_inode_operations; 218static const struct inode_operations shmem_inode_operations;
219static const struct inode_operations shmem_dir_inode_operations; 219static const struct inode_operations shmem_dir_inode_operations;
220static const struct inode_operations shmem_special_inode_operations; 220static const struct inode_operations shmem_special_inode_operations;
221static struct vm_operations_struct shmem_vm_ops; 221static const struct vm_operations_struct shmem_vm_ops;
222 222
223static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 223static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
224 .ra_pages = 0, /* No readahead */ 224 .ra_pages = 0, /* No readahead */
@@ -2498,7 +2498,7 @@ static const struct super_operations shmem_ops = {
2498 .put_super = shmem_put_super, 2498 .put_super = shmem_put_super,
2499}; 2499};
2500 2500
2501static struct vm_operations_struct shmem_vm_ops = { 2501static const struct vm_operations_struct shmem_vm_ops = {
2502 .fault = shmem_fault, 2502 .fault = shmem_fault,
2503#ifdef CONFIG_NUMA 2503#ifdef CONFIG_NUMA
2504 .set_policy = shmem_set_policy, 2504 .set_policy = shmem_set_policy,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 343146e1bceb..a91504850195 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -169,6 +169,7 @@ static size_t vlan_get_size(const struct net_device *dev)
169 struct vlan_dev_info *vlan = vlan_dev_info(dev); 169 struct vlan_dev_info *vlan = vlan_dev_info(dev);
170 170
171 return nla_total_size(2) + /* IFLA_VLAN_ID */ 171 return nla_total_size(2) + /* IFLA_VLAN_ID */
172 sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
172 vlan_qos_map_size(vlan->nr_ingress_mappings) + 173 vlan_qos_map_size(vlan->nr_ingress_mappings) +
173 vlan_qos_map_size(vlan->nr_egress_mappings); 174 vlan_qos_map_size(vlan->nr_egress_mappings);
174} 175}
diff --git a/net/atm/common.c b/net/atm/common.c
index 8c4d843eb17f..950bd16d2383 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -679,7 +679,7 @@ static int check_qos(const struct atm_qos *qos)
679} 679}
680 680
681int vcc_setsockopt(struct socket *sock, int level, int optname, 681int vcc_setsockopt(struct socket *sock, int level, int optname,
682 char __user *optval, int optlen) 682 char __user *optval, unsigned int optlen)
683{ 683{
684 struct atm_vcc *vcc; 684 struct atm_vcc *vcc;
685 unsigned long value; 685 unsigned long value;
diff --git a/net/atm/common.h b/net/atm/common.h
index 92e2981f479f..f48a76b6cdf4 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -21,7 +21,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
21int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 21int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
22int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 22int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
23int vcc_setsockopt(struct socket *sock, int level, int optname, 23int vcc_setsockopt(struct socket *sock, int level, int optname,
24 char __user *optval, int optlen); 24 char __user *optval, unsigned int optlen);
25int vcc_getsockopt(struct socket *sock, int level, int optname, 25int vcc_getsockopt(struct socket *sock, int level, int optname,
26 char __user *optval, int __user *optlen); 26 char __user *optval, int __user *optlen);
27 27
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index e1d22d9430dd..d4c024504f99 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -59,7 +59,7 @@ static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr,
59} 59}
60 60
61static int pvc_setsockopt(struct socket *sock, int level, int optname, 61static int pvc_setsockopt(struct socket *sock, int level, int optname,
62 char __user *optval, int optlen) 62 char __user *optval, unsigned int optlen)
63{ 63{
64 struct sock *sk = sock->sk; 64 struct sock *sk = sock->sk;
65 int error; 65 int error;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 7b831b526d0b..f90d143c4b25 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -446,7 +446,7 @@ int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
446 446
447 447
448static int svc_setsockopt(struct socket *sock, int level, int optname, 448static int svc_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int optlen) 449 char __user *optval, unsigned int optlen)
450{ 450{
451 struct sock *sk = sock->sk; 451 struct sock *sk = sock->sk;
452 struct atm_vcc *vcc = ATM_SD(sock); 452 struct atm_vcc *vcc = ATM_SD(sock);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index fbcac76fdc0d..f45460730371 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -534,7 +534,7 @@ ax25_cb *ax25_create_cb(void)
534 */ 534 */
535 535
536static int ax25_setsockopt(struct socket *sock, int level, int optname, 536static int ax25_setsockopt(struct socket *sock, int level, int optname,
537 char __user *optval, int optlen) 537 char __user *optval, unsigned int optlen)
538{ 538{
539 struct sock *sk = sock->sk; 539 struct sock *sk = sock->sk;
540 ax25_cb *ax25; 540 ax25_cb *ax25;
@@ -641,15 +641,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
641 641
642 case SO_BINDTODEVICE: 642 case SO_BINDTODEVICE:
643 if (optlen > IFNAMSIZ) 643 if (optlen > IFNAMSIZ)
644 optlen=IFNAMSIZ; 644 optlen = IFNAMSIZ;
645 if (copy_from_user(devname, optval, optlen)) {
646 res = -EFAULT;
647 break;
648 }
649 645
650 dev = dev_get_by_name(&init_net, devname); 646 if (copy_from_user(devname, optval, optlen)) {
651 if (dev == NULL) { 647 res = -EFAULT;
652 res = -ENODEV;
653 break; 648 break;
654 } 649 }
655 650
@@ -657,12 +652,18 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
657 (sock->state != SS_UNCONNECTED || 652 (sock->state != SS_UNCONNECTED ||
658 sk->sk_state == TCP_LISTEN)) { 653 sk->sk_state == TCP_LISTEN)) {
659 res = -EADDRNOTAVAIL; 654 res = -EADDRNOTAVAIL;
660 dev_put(dev); 655 break;
656 }
657
658 dev = dev_get_by_name(&init_net, devname);
659 if (!dev) {
660 res = -ENODEV;
661 break; 661 break;
662 } 662 }
663 663
664 ax25->ax25_dev = ax25_dev_ax25dev(dev); 664 ax25->ax25_dev = ax25_dev_ax25dev(dev);
665 ax25_fillin_cb(ax25, ax25->ax25_dev); 665 ax25_fillin_cb(ax25, ax25->ax25_dev);
666 dev_put(dev);
666 break; 667 break;
667 668
668 default: 669 default:
@@ -900,7 +901,6 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
900 901
901 sock_init_data(NULL, sk); 902 sock_init_data(NULL, sk);
902 903
903 sk->sk_destruct = ax25_free_sock;
904 sk->sk_type = osk->sk_type; 904 sk->sk_type = osk->sk_type;
905 sk->sk_priority = osk->sk_priority; 905 sk->sk_priority = osk->sk_priority;
906 sk->sk_protocol = osk->sk_protocol; 906 sk->sk_protocol = osk->sk_protocol;
@@ -938,6 +938,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
938 } 938 }
939 939
940 sk->sk_protinfo = ax25; 940 sk->sk_protinfo = ax25;
941 sk->sk_destruct = ax25_free_sock;
941 ax25->sk = sk; 942 ax25->sk = sk;
942 943
943 return sk; 944 return sk;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 4f9621f759a0..75302a986067 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -466,7 +466,7 @@ drop:
466 goto done; 466 goto done;
467} 467}
468 468
469static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int len) 469static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
470{ 470{
471 struct hci_ufilter uf = { .opcode = 0 }; 471 struct hci_ufilter uf = { .opcode = 0 };
472 struct sock *sk = sock->sk; 472 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index b03012564647..555d9da1869b 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1698,7 +1698,7 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags); 1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1699} 1699}
1700 1700
1701static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen) 1701static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1702{ 1702{
1703 struct sock *sk = sock->sk; 1703 struct sock *sk = sock->sk;
1704 struct l2cap_options opts; 1704 struct l2cap_options opts;
@@ -1755,7 +1755,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1755 return err; 1755 return err;
1756} 1756}
1757 1757
1758static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1758static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1759{ 1759{
1760 struct sock *sk = sock->sk; 1760 struct sock *sk = sock->sk;
1761 struct bt_security sec; 1761 struct bt_security sec;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 0b85e8116859..8a20aaf1f231 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -730,7 +730,7 @@ out:
730 return copied ? : err; 730 return copied ? : err;
731} 731}
732 732
733static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen) 733static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
734{ 734{
735 struct sock *sk = sock->sk; 735 struct sock *sk = sock->sk;
736 int err = 0; 736 int err = 0;
@@ -766,7 +766,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
766 return err; 766 return err;
767} 767}
768 768
769static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 769static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
770{ 770{
771 struct sock *sk = sock->sk; 771 struct sock *sk = sock->sk;
772 struct bt_security sec; 772 struct bt_security sec;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 13c27f17192c..77f4153bdb5e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -644,7 +644,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
644 return err; 644 return err;
645} 645}
646 646
647static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 647static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
648{ 648{
649 struct sock *sk = sock->sk; 649 struct sock *sk = sock->sk;
650 int err = 0; 650 int err = 0;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 142ebac14176..b1b3b0fbf41c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -432,6 +432,7 @@ err2:
432 br_fdb_delete_by_port(br, p, 1); 432 br_fdb_delete_by_port(br, p, 1);
433err1: 433err1:
434 kobject_put(&p->kobj); 434 kobject_put(&p->kobj);
435 p = NULL; /* kobject_put frees */
435err0: 436err0:
436 dev_set_promiscuity(dev, -1); 437 dev_set_promiscuity(dev, -1);
437put_back: 438put_back:
diff --git a/net/can/raw.c b/net/can/raw.c
index db3152df7d2b..b5e897922d32 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -411,7 +411,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
411} 411}
412 412
413static int raw_setsockopt(struct socket *sock, int level, int optname, 413static int raw_setsockopt(struct socket *sock, int level, int optname,
414 char __user *optval, int optlen) 414 char __user *optval, unsigned int optlen)
415{ 415{
416 struct sock *sk = sock->sk; 416 struct sock *sk = sock->sk;
417 struct raw_sock *ro = raw_sk(sk); 417 struct raw_sock *ro = raw_sk(sk);
diff --git a/net/compat.c b/net/compat.c
index 12728b17a226..a407c3addbae 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -331,7 +331,7 @@ struct compat_sock_fprog {
331}; 331};
332 332
333static int do_set_attach_filter(struct socket *sock, int level, int optname, 333static int do_set_attach_filter(struct socket *sock, int level, int optname,
334 char __user *optval, int optlen) 334 char __user *optval, unsigned int optlen)
335{ 335{
336 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; 336 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
337 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); 337 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -351,7 +351,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
351} 351}
352 352
353static int do_set_sock_timeout(struct socket *sock, int level, 353static int do_set_sock_timeout(struct socket *sock, int level,
354 int optname, char __user *optval, int optlen) 354 int optname, char __user *optval, unsigned int optlen)
355{ 355{
356 struct compat_timeval __user *up = (struct compat_timeval __user *) optval; 356 struct compat_timeval __user *up = (struct compat_timeval __user *) optval;
357 struct timeval ktime; 357 struct timeval ktime;
@@ -373,7 +373,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
373} 373}
374 374
375static int compat_sock_setsockopt(struct socket *sock, int level, int optname, 375static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
376 char __user *optval, int optlen) 376 char __user *optval, unsigned int optlen)
377{ 377{
378 if (optname == SO_ATTACH_FILTER) 378 if (optname == SO_ATTACH_FILTER)
379 return do_set_attach_filter(sock, level, optname, 379 return do_set_attach_filter(sock, level, optname,
@@ -385,7 +385,7 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
385} 385}
386 386
387asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, 387asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
388 char __user *optval, int optlen) 388 char __user *optval, unsigned int optlen)
389{ 389{
390 int err; 390 int err;
391 struct socket *sock; 391 struct socket *sock;
@@ -558,8 +558,8 @@ struct compat_group_filter {
558 558
559 559
560int compat_mc_setsockopt(struct sock *sock, int level, int optname, 560int compat_mc_setsockopt(struct sock *sock, int level, int optname,
561 char __user *optval, int optlen, 561 char __user *optval, unsigned int optlen,
562 int (*setsockopt)(struct sock *,int,int,char __user *,int)) 562 int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int))
563{ 563{
564 char __user *koptval = optval; 564 char __user *koptval = optval;
565 int koptlen = optlen; 565 int koptlen = optlen;
diff --git a/net/core/dev.c b/net/core/dev.c
index 560c8c9c03ab..b8f74cfb1bfd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2288,6 +2288,9 @@ int netif_receive_skb(struct sk_buff *skb)
2288 int ret = NET_RX_DROP; 2288 int ret = NET_RX_DROP;
2289 __be16 type; 2289 __be16 type;
2290 2290
2291 if (!skb->tstamp.tv64)
2292 net_timestamp(skb);
2293
2291 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) 2294 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2292 return NET_RX_SUCCESS; 2295 return NET_RX_SUCCESS;
2293 2296
@@ -2295,9 +2298,6 @@ int netif_receive_skb(struct sk_buff *skb)
2295 if (netpoll_receive_skb(skb)) 2298 if (netpoll_receive_skb(skb))
2296 return NET_RX_DROP; 2299 return NET_RX_DROP;
2297 2300
2298 if (!skb->tstamp.tv64)
2299 net_timestamp(skb);
2300
2301 if (!skb->iif) 2301 if (!skb->iif)
2302 skb->iif = skb->dev->ifindex; 2302 skb->iif = skb->dev->ifindex;
2303 2303
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7d4c57523b09..821d30918cfc 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -16,7 +16,7 @@
16#include <net/sock.h> 16#include <net/sock.h>
17#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
18#include <linux/wireless.h> 18#include <linux/wireless.h>
19#include <net/iw_handler.h> 19#include <net/wext.h>
20 20
21#include "net-sysfs.h" 21#include "net-sysfs.h"
22 22
@@ -363,15 +363,13 @@ static ssize_t wireless_show(struct device *d, char *buf,
363 char *)) 363 char *))
364{ 364{
365 struct net_device *dev = to_net_dev(d); 365 struct net_device *dev = to_net_dev(d);
366 const struct iw_statistics *iw = NULL; 366 const struct iw_statistics *iw;
367 ssize_t ret = -EINVAL; 367 ssize_t ret = -EINVAL;
368 368
369 read_lock(&dev_base_lock); 369 read_lock(&dev_base_lock);
370 if (dev_isalive(dev)) { 370 if (dev_isalive(dev)) {
371 if (dev->wireless_handlers && 371 iw = get_wireless_stats(dev);
372 dev->wireless_handlers->get_wireless_stats) 372 if (iw)
373 iw = dev->wireless_handlers->get_wireless_stats(dev);
374 if (iw != NULL)
375 ret = (*format)(iw, buf); 373 ret = (*format)(iw, buf);
376 } 374 }
377 read_unlock(&dev_base_lock); 375 read_unlock(&dev_base_lock);
@@ -505,7 +503,7 @@ int netdev_register_kobject(struct net_device *net)
505 *groups++ = &netstat_group; 503 *groups++ = &netstat_group;
506 504
507#ifdef CONFIG_WIRELESS_EXT_SYSFS 505#ifdef CONFIG_WIRELESS_EXT_SYSFS
508 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) 506 if (net->wireless_handlers || net->ieee80211_ptr)
509 *groups++ = &wireless_group; 507 *groups++ = &wireless_group;
510#endif 508#endif
511#endif /* CONFIG_SYSFS */ 509#endif /* CONFIG_SYSFS */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d11c28ca8ca..b69455217ed6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2105,15 +2105,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2105static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) 2105static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2106{ 2106{
2107 ktime_t start_time, end_time; 2107 ktime_t start_time, end_time;
2108 s32 remaining; 2108 s64 remaining;
2109 struct hrtimer_sleeper t; 2109 struct hrtimer_sleeper t;
2110 2110
2111 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2111 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2112 hrtimer_set_expires(&t.timer, spin_until); 2112 hrtimer_set_expires(&t.timer, spin_until);
2113 2113
2114 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); 2114 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
2115 if (remaining <= 0) 2115 if (remaining <= 0) {
2116 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2116 return; 2117 return;
2118 }
2117 2119
2118 start_time = ktime_now(); 2120 start_time = ktime_now();
2119 if (remaining < 100) 2121 if (remaining < 100)
diff --git a/net/core/sock.c b/net/core/sock.c
index 524712a7b154..7626b6aacd68 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -446,7 +446,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
446 */ 446 */
447 447
448int sock_setsockopt(struct socket *sock, int level, int optname, 448int sock_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int optlen) 449 char __user *optval, unsigned int optlen)
450{ 450{
451 struct sock *sk = sock->sk; 451 struct sock *sk = sock->sk;
452 int val; 452 int val;
@@ -1228,17 +1228,22 @@ void __init sk_init(void)
1228void sock_wfree(struct sk_buff *skb) 1228void sock_wfree(struct sk_buff *skb)
1229{ 1229{
1230 struct sock *sk = skb->sk; 1230 struct sock *sk = skb->sk;
1231 int res; 1231 unsigned int len = skb->truesize;
1232 1232
1233 /* In case it might be waiting for more memory. */ 1233 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1234 res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc); 1234 /*
1235 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) 1235 * Keep a reference on sk_wmem_alloc, this will be released
1236 * after sk_write_space() call
1237 */
1238 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1236 sk->sk_write_space(sk); 1239 sk->sk_write_space(sk);
1240 len = 1;
1241 }
1237 /* 1242 /*
1238 * if sk_wmem_alloc reached 0, we are last user and should 1243 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1239 * free this sock, as sk_free() call could not do it. 1244 * could not do because of in-flight packets
1240 */ 1245 */
1241 if (res == 0) 1246 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1242 __sk_free(sk); 1247 __sk_free(sk);
1243} 1248}
1244EXPORT_SYMBOL(sock_wfree); 1249EXPORT_SYMBOL(sock_wfree);
@@ -1697,7 +1702,7 @@ int sock_no_shutdown(struct socket *sock, int how)
1697EXPORT_SYMBOL(sock_no_shutdown); 1702EXPORT_SYMBOL(sock_no_shutdown);
1698 1703
1699int sock_no_setsockopt(struct socket *sock, int level, int optname, 1704int sock_no_setsockopt(struct socket *sock, int level, int optname,
1700 char __user *optval, int optlen) 1705 char __user *optval, unsigned int optlen)
1701{ 1706{
1702 return -EOPNOTSUPP; 1707 return -EOPNOTSUPP;
1703} 1708}
@@ -2018,7 +2023,7 @@ EXPORT_SYMBOL(sock_common_recvmsg);
2018 * Set socket options on an inet socket. 2023 * Set socket options on an inet socket.
2019 */ 2024 */
2020int sock_common_setsockopt(struct socket *sock, int level, int optname, 2025int sock_common_setsockopt(struct socket *sock, int level, int optname,
2021 char __user *optval, int optlen) 2026 char __user *optval, unsigned int optlen)
2022{ 2027{
2023 struct sock *sk = sock->sk; 2028 struct sock *sk = sock->sk;
2024 2029
@@ -2028,7 +2033,7 @@ EXPORT_SYMBOL(sock_common_setsockopt);
2028 2033
2029#ifdef CONFIG_COMPAT 2034#ifdef CONFIG_COMPAT
2030int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2035int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2031 char __user *optval, int optlen) 2036 char __user *optval, unsigned int optlen)
2032{ 2037{
2033 struct sock *sk = sock->sk; 2038 struct sock *sk = sock->sk;
2034 2039
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index e0879bfb7dd5..ac1205df6c86 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -194,7 +194,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
194 nlmsg_end(dcbnl_skb, nlh); 194 nlmsg_end(dcbnl_skb, nlh);
195 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 195 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
196 if (ret) 196 if (ret)
197 goto err; 197 return -EINVAL;
198 198
199 return 0; 199 return 0;
200nlmsg_failure: 200nlmsg_failure:
@@ -275,7 +275,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
275 275
276 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 276 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
277 if (ret) 277 if (ret)
278 goto err; 278 goto err_out;
279 279
280 return 0; 280 return 0;
281nlmsg_failure: 281nlmsg_failure:
@@ -316,12 +316,11 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
316 316
317 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 317 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
318 if (ret) 318 if (ret)
319 goto err; 319 goto err_out;
320 320
321 return 0; 321 return 0;
322 322
323nlmsg_failure: 323nlmsg_failure:
324err:
325 kfree_skb(dcbnl_skb); 324 kfree_skb(dcbnl_skb);
326err_out: 325err_out:
327 return -EINVAL; 326 return -EINVAL;
@@ -383,7 +382,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
383 382
384 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 383 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
385 if (ret) 384 if (ret)
386 goto err; 385 goto err_out;
387 386
388 return 0; 387 return 0;
389nlmsg_failure: 388nlmsg_failure:
@@ -460,7 +459,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
460 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 459 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
461 if (ret) { 460 if (ret) {
462 ret = -EINVAL; 461 ret = -EINVAL;
463 goto err; 462 goto err_out;
464 } 463 }
465 464
466 return 0; 465 return 0;
@@ -799,7 +798,7 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
799 798
800 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 799 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
801 if (ret) 800 if (ret)
802 goto err; 801 goto err_out;
803 802
804 return 0; 803 return 0;
805 804
@@ -1063,7 +1062,7 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1063 1062
1064 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 1063 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1065 if (ret) 1064 if (ret)
1066 goto err; 1065 goto err_out;
1067 1066
1068 return 0; 1067 return 0;
1069 1068
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d6bc47363b1c..5ef32c2f0d6a 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -290,14 +290,14 @@ extern int dccp_disconnect(struct sock *sk, int flags);
290extern int dccp_getsockopt(struct sock *sk, int level, int optname, 290extern int dccp_getsockopt(struct sock *sk, int level, int optname,
291 char __user *optval, int __user *optlen); 291 char __user *optval, int __user *optlen);
292extern int dccp_setsockopt(struct sock *sk, int level, int optname, 292extern int dccp_setsockopt(struct sock *sk, int level, int optname,
293 char __user *optval, int optlen); 293 char __user *optval, unsigned int optlen);
294#ifdef CONFIG_COMPAT 294#ifdef CONFIG_COMPAT
295extern int compat_dccp_getsockopt(struct sock *sk, 295extern int compat_dccp_getsockopt(struct sock *sk,
296 int level, int optname, 296 int level, int optname,
297 char __user *optval, int __user *optlen); 297 char __user *optval, int __user *optlen);
298extern int compat_dccp_setsockopt(struct sock *sk, 298extern int compat_dccp_setsockopt(struct sock *sk,
299 int level, int optname, 299 int level, int optname,
300 char __user *optval, int optlen); 300 char __user *optval, unsigned int optlen);
301#endif 301#endif
302extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); 302extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
303extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, 303extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index bc4467082a00..a156319fd0ac 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -393,7 +393,7 @@ out:
393EXPORT_SYMBOL_GPL(dccp_ioctl); 393EXPORT_SYMBOL_GPL(dccp_ioctl);
394 394
395static int dccp_setsockopt_service(struct sock *sk, const __be32 service, 395static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
396 char __user *optval, int optlen) 396 char __user *optval, unsigned int optlen)
397{ 397{
398 struct dccp_sock *dp = dccp_sk(sk); 398 struct dccp_sock *dp = dccp_sk(sk);
399 struct dccp_service_list *sl = NULL; 399 struct dccp_service_list *sl = NULL;
@@ -464,7 +464,7 @@ static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
464} 464}
465 465
466static int dccp_setsockopt_ccid(struct sock *sk, int type, 466static int dccp_setsockopt_ccid(struct sock *sk, int type,
467 char __user *optval, int optlen) 467 char __user *optval, unsigned int optlen)
468{ 468{
469 u8 *val; 469 u8 *val;
470 int rc = 0; 470 int rc = 0;
@@ -494,7 +494,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
494} 494}
495 495
496static int do_dccp_setsockopt(struct sock *sk, int level, int optname, 496static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
497 char __user *optval, int optlen) 497 char __user *optval, unsigned int optlen)
498{ 498{
499 struct dccp_sock *dp = dccp_sk(sk); 499 struct dccp_sock *dp = dccp_sk(sk);
500 int val, err = 0; 500 int val, err = 0;
@@ -546,7 +546,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
546} 546}
547 547
548int dccp_setsockopt(struct sock *sk, int level, int optname, 548int dccp_setsockopt(struct sock *sk, int level, int optname,
549 char __user *optval, int optlen) 549 char __user *optval, unsigned int optlen)
550{ 550{
551 if (level != SOL_DCCP) 551 if (level != SOL_DCCP)
552 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, 552 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
@@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(dccp_setsockopt);
559 559
560#ifdef CONFIG_COMPAT 560#ifdef CONFIG_COMPAT
561int compat_dccp_setsockopt(struct sock *sk, int level, int optname, 561int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
562 char __user *optval, int optlen) 562 char __user *optval, unsigned int optlen)
563{ 563{
564 if (level != SOL_DCCP) 564 if (level != SOL_DCCP)
565 return inet_csk_compat_setsockopt(sk, level, optname, 565 return inet_csk_compat_setsockopt(sk, level, optname,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 77d40289653c..7a58c87baf17 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -157,7 +157,7 @@ static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157static struct hlist_head dn_wild_sk; 157static struct hlist_head dn_wild_sk;
158static atomic_t decnet_memory_allocated; 158static atomic_t decnet_memory_allocated;
159 159
160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags); 160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
162 162
163static struct hlist_head *dn_find_list(struct sock *sk) 163static struct hlist_head *dn_find_list(struct sock *sk)
@@ -1325,7 +1325,7 @@ out:
1325 return err; 1325 return err;
1326} 1326}
1327 1327
1328static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1328static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1329{ 1329{
1330 struct sock *sk = sock->sk; 1330 struct sock *sk = sock->sk;
1331 int err; 1331 int err;
@@ -1337,7 +1337,7 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
1337 return err; 1337 return err;
1338} 1338}
1339 1339
1340static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags) 1340static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1341{ 1341{
1342 struct sock *sk = sock->sk; 1342 struct sock *sk = sock->sk;
1343 struct dn_scp *scp = DN_SK(sk); 1343 struct dn_scp *scp = DN_SK(sk);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 51593a48f2dd..a413b1bf4465 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -414,7 +414,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
414} 414}
415 415
416static int dgram_setsockopt(struct sock *sk, int level, int optname, 416static int dgram_setsockopt(struct sock *sk, int level, int optname,
417 char __user *optval, int optlen) 417 char __user *optval, unsigned int optlen)
418{ 418{
419 struct dgram_sock *ro = dgram_sk(sk); 419 struct dgram_sock *ro = dgram_sk(sk);
420 int val; 420 int val;
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 13198859982e..30e74eee07d6 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -244,7 +244,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
244} 244}
245 245
246static int raw_setsockopt(struct sock *sk, int level, int optname, 246static int raw_setsockopt(struct sock *sk, int level, int optname,
247 char __user *optval, int optlen) 247 char __user *optval, unsigned int optlen)
248{ 248{
249 return -EOPNOTSUPP; 249 return -EOPNOTSUPP;
250} 250}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 58c4b0f7c4aa..57737b8d1711 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1119,6 +1119,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1119{ 1119{
1120 struct flowi fl = { 1120 struct flowi fl = {
1121 .oif = sk->sk_bound_dev_if, 1121 .oif = sk->sk_bound_dev_if,
1122 .mark = sk->sk_mark,
1122 .nl_u = { 1123 .nl_u = {
1123 .ip4_u = { 1124 .ip4_u = {
1124 .daddr = daddr, 1125 .daddr = daddr,
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 22cd19ee44e5..4351ca2cf0b8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -714,7 +714,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
714EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); 714EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
715 715
716int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, 716int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
717 char __user *optval, int optlen) 717 char __user *optval, unsigned int optlen)
718{ 718{
719 const struct inet_connection_sock *icsk = inet_csk(sk); 719 const struct inet_connection_sock *icsk = inet_csk(sk);
720 720
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9fe5d7b81580..f9895180f481 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -335,6 +335,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
335 335
336 { 336 {
337 struct flowi fl = { .oif = sk->sk_bound_dev_if, 337 struct flowi fl = { .oif = sk->sk_bound_dev_if,
338 .mark = sk->sk_mark,
338 .nl_u = { .ip4_u = 339 .nl_u = { .ip4_u =
339 { .daddr = daddr, 340 { .daddr = daddr,
340 .saddr = inet->saddr, 341 .saddr = inet->saddr,
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5a0693576e82..0c0b6e363a20 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -440,7 +440,7 @@ out:
440 */ 440 */
441 441
442static int do_ip_setsockopt(struct sock *sk, int level, 442static int do_ip_setsockopt(struct sock *sk, int level,
443 int optname, char __user *optval, int optlen) 443 int optname, char __user *optval, unsigned int optlen)
444{ 444{
445 struct inet_sock *inet = inet_sk(sk); 445 struct inet_sock *inet = inet_sk(sk);
446 int val = 0, err; 446 int val = 0, err;
@@ -950,7 +950,7 @@ e_inval:
950} 950}
951 951
952int ip_setsockopt(struct sock *sk, int level, 952int ip_setsockopt(struct sock *sk, int level,
953 int optname, char __user *optval, int optlen) 953 int optname, char __user *optval, unsigned int optlen)
954{ 954{
955 int err; 955 int err;
956 956
@@ -975,7 +975,7 @@ EXPORT_SYMBOL(ip_setsockopt);
975 975
976#ifdef CONFIG_COMPAT 976#ifdef CONFIG_COMPAT
977int compat_ip_setsockopt(struct sock *sk, int level, int optname, 977int compat_ip_setsockopt(struct sock *sk, int level, int optname,
978 char __user *optval, int optlen) 978 char __user *optval, unsigned int optlen)
979{ 979{
980 int err; 980 int err;
981 981
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c43ec2d51ce2..630a56df7b47 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -931,7 +931,7 @@ static void mrtsock_destruct(struct sock *sk)
931 * MOSPF/PIM router set up we can clean this up. 931 * MOSPF/PIM router set up we can clean this up.
932 */ 932 */
933 933
934int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen) 934int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
935{ 935{
936 int ret; 936 int ret;
937 struct vifctl vif; 937 struct vifctl vif;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ebb1e5848bc6..757c9171e7c2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -741,7 +741,7 @@ out: return ret;
741} 741}
742 742
743static int do_raw_setsockopt(struct sock *sk, int level, int optname, 743static int do_raw_setsockopt(struct sock *sk, int level, int optname,
744 char __user *optval, int optlen) 744 char __user *optval, unsigned int optlen)
745{ 745{
746 if (optname == ICMP_FILTER) { 746 if (optname == ICMP_FILTER) {
747 if (inet_sk(sk)->num != IPPROTO_ICMP) 747 if (inet_sk(sk)->num != IPPROTO_ICMP)
@@ -753,7 +753,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname,
753} 753}
754 754
755static int raw_setsockopt(struct sock *sk, int level, int optname, 755static int raw_setsockopt(struct sock *sk, int level, int optname,
756 char __user *optval, int optlen) 756 char __user *optval, unsigned int optlen)
757{ 757{
758 if (level != SOL_RAW) 758 if (level != SOL_RAW)
759 return ip_setsockopt(sk, level, optname, optval, optlen); 759 return ip_setsockopt(sk, level, optname, optval, optlen);
@@ -762,7 +762,7 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
762 762
763#ifdef CONFIG_COMPAT 763#ifdef CONFIG_COMPAT
764static int compat_raw_setsockopt(struct sock *sk, int level, int optname, 764static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
765 char __user *optval, int optlen) 765 char __user *optval, unsigned int optlen)
766{ 766{
767 if (level != SOL_RAW) 767 if (level != SOL_RAW)
768 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 768 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 21387ebabf00..64d0af675823 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -580,7 +580,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
580 580
581 lock_sock(sk); 581 lock_sock(sk);
582 582
583 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); 583 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
584 while (tss.len) { 584 while (tss.len) {
585 ret = __tcp_splice_read(sk, &tss); 585 ret = __tcp_splice_read(sk, &tss);
586 if (ret < 0) 586 if (ret < 0)
@@ -2032,7 +2032,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2032 * Socket option code for TCP. 2032 * Socket option code for TCP.
2033 */ 2033 */
2034static int do_tcp_setsockopt(struct sock *sk, int level, 2034static int do_tcp_setsockopt(struct sock *sk, int level,
2035 int optname, char __user *optval, int optlen) 2035 int optname, char __user *optval, unsigned int optlen)
2036{ 2036{
2037 struct tcp_sock *tp = tcp_sk(sk); 2037 struct tcp_sock *tp = tcp_sk(sk);
2038 struct inet_connection_sock *icsk = inet_csk(sk); 2038 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2047,7 +2047,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2047 return -EINVAL; 2047 return -EINVAL;
2048 2048
2049 val = strncpy_from_user(name, optval, 2049 val = strncpy_from_user(name, optval,
2050 min(TCP_CA_NAME_MAX-1, optlen)); 2050 min_t(long, TCP_CA_NAME_MAX-1, optlen));
2051 if (val < 0) 2051 if (val < 0)
2052 return -EFAULT; 2052 return -EFAULT;
2053 name[val] = 0; 2053 name[val] = 0;
@@ -2220,7 +2220,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2220} 2220}
2221 2221
2222int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2222int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2223 int optlen) 2223 unsigned int optlen)
2224{ 2224{
2225 struct inet_connection_sock *icsk = inet_csk(sk); 2225 struct inet_connection_sock *icsk = inet_csk(sk);
2226 2226
@@ -2232,7 +2232,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2232 2232
2233#ifdef CONFIG_COMPAT 2233#ifdef CONFIG_COMPAT
2234int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2234int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2235 char __user *optval, int optlen) 2235 char __user *optval, unsigned int optlen)
2236{ 2236{
2237 if (level != SOL_TCP) 2237 if (level != SOL_TCP)
2238 return inet_csk_compat_setsockopt(sk, level, optname, 2238 return inet_csk_compat_setsockopt(sk, level, optname,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5200aab0ca97..fcd278a7080e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -361,6 +361,7 @@ static inline int tcp_urg_mode(const struct tcp_sock *tp)
361#define OPTION_SACK_ADVERTISE (1 << 0) 361#define OPTION_SACK_ADVERTISE (1 << 0)
362#define OPTION_TS (1 << 1) 362#define OPTION_TS (1 << 1)
363#define OPTION_MD5 (1 << 2) 363#define OPTION_MD5 (1 << 2)
364#define OPTION_WSCALE (1 << 3)
364 365
365struct tcp_out_options { 366struct tcp_out_options {
366 u8 options; /* bit field of OPTION_* */ 367 u8 options; /* bit field of OPTION_* */
@@ -427,7 +428,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
427 TCPOLEN_SACK_PERM); 428 TCPOLEN_SACK_PERM);
428 } 429 }
429 430
430 if (unlikely(opts->ws)) { 431 if (unlikely(OPTION_WSCALE & opts->options)) {
431 *ptr++ = htonl((TCPOPT_NOP << 24) | 432 *ptr++ = htonl((TCPOPT_NOP << 24) |
432 (TCPOPT_WINDOW << 16) | 433 (TCPOPT_WINDOW << 16) |
433 (TCPOLEN_WINDOW << 8) | 434 (TCPOLEN_WINDOW << 8) |
@@ -494,8 +495,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
494 } 495 }
495 if (likely(sysctl_tcp_window_scaling)) { 496 if (likely(sysctl_tcp_window_scaling)) {
496 opts->ws = tp->rx_opt.rcv_wscale; 497 opts->ws = tp->rx_opt.rcv_wscale;
497 if (likely(opts->ws)) 498 opts->options |= OPTION_WSCALE;
498 size += TCPOLEN_WSCALE_ALIGNED; 499 size += TCPOLEN_WSCALE_ALIGNED;
499 } 500 }
500 if (likely(sysctl_tcp_sack)) { 501 if (likely(sysctl_tcp_sack)) {
501 opts->options |= OPTION_SACK_ADVERTISE; 502 opts->options |= OPTION_SACK_ADVERTISE;
@@ -537,8 +538,8 @@ static unsigned tcp_synack_options(struct sock *sk,
537 538
538 if (likely(ireq->wscale_ok)) { 539 if (likely(ireq->wscale_ok)) {
539 opts->ws = ireq->rcv_wscale; 540 opts->ws = ireq->rcv_wscale;
540 if (likely(opts->ws)) 541 opts->options |= OPTION_WSCALE;
541 size += TCPOLEN_WSCALE_ALIGNED; 542 size += TCPOLEN_WSCALE_ALIGNED;
542 } 543 }
543 if (likely(doing_ts)) { 544 if (likely(doing_ts)) {
544 opts->options |= OPTION_TS; 545 opts->options |= OPTION_TS;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ebaaa7f973d7..6ec6a8a4a224 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -696,6 +696,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
696 696
697 if (rt == NULL) { 697 if (rt == NULL) {
698 struct flowi fl = { .oif = ipc.oif, 698 struct flowi fl = { .oif = ipc.oif,
699 .mark = sk->sk_mark,
699 .nl_u = { .ip4_u = 700 .nl_u = { .ip4_u =
700 { .daddr = faddr, 701 { .daddr = faddr,
701 .saddr = saddr, 702 .saddr = saddr,
@@ -1359,7 +1360,7 @@ void udp_destroy_sock(struct sock *sk)
1359 * Socket option code for UDP 1360 * Socket option code for UDP
1360 */ 1361 */
1361int udp_lib_setsockopt(struct sock *sk, int level, int optname, 1362int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1362 char __user *optval, int optlen, 1363 char __user *optval, unsigned int optlen,
1363 int (*push_pending_frames)(struct sock *)) 1364 int (*push_pending_frames)(struct sock *))
1364{ 1365{
1365 struct udp_sock *up = udp_sk(sk); 1366 struct udp_sock *up = udp_sk(sk);
@@ -1441,7 +1442,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1441EXPORT_SYMBOL(udp_lib_setsockopt); 1442EXPORT_SYMBOL(udp_lib_setsockopt);
1442 1443
1443int udp_setsockopt(struct sock *sk, int level, int optname, 1444int udp_setsockopt(struct sock *sk, int level, int optname,
1444 char __user *optval, int optlen) 1445 char __user *optval, unsigned int optlen)
1445{ 1446{
1446 if (level == SOL_UDP || level == SOL_UDPLITE) 1447 if (level == SOL_UDP || level == SOL_UDPLITE)
1447 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1448 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
@@ -1451,7 +1452,7 @@ int udp_setsockopt(struct sock *sk, int level, int optname,
1451 1452
1452#ifdef CONFIG_COMPAT 1453#ifdef CONFIG_COMPAT
1453int compat_udp_setsockopt(struct sock *sk, int level, int optname, 1454int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1454 char __user *optval, int optlen) 1455 char __user *optval, unsigned int optlen)
1455{ 1456{
1456 if (level == SOL_UDP || level == SOL_UDPLITE) 1457 if (level == SOL_UDP || level == SOL_UDPLITE)
1457 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1458 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 9f4a6165f722..aaad650d47d9 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -11,13 +11,13 @@ extern void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
11extern int udp_v4_get_port(struct sock *sk, unsigned short snum); 11extern int udp_v4_get_port(struct sock *sk, unsigned short snum);
12 12
13extern int udp_setsockopt(struct sock *sk, int level, int optname, 13extern int udp_setsockopt(struct sock *sk, int level, int optname,
14 char __user *optval, int optlen); 14 char __user *optval, unsigned int optlen);
15extern int udp_getsockopt(struct sock *sk, int level, int optname, 15extern int udp_getsockopt(struct sock *sk, int level, int optname,
16 char __user *optval, int __user *optlen); 16 char __user *optval, int __user *optlen);
17 17
18#ifdef CONFIG_COMPAT 18#ifdef CONFIG_COMPAT
19extern int compat_udp_setsockopt(struct sock *sk, int level, int optname, 19extern int compat_udp_setsockopt(struct sock *sk, int level, int optname,
20 char __user *optval, int optlen); 20 char __user *optval, unsigned int optlen);
21extern int compat_udp_getsockopt(struct sock *sk, int level, int optname, 21extern int compat_udp_getsockopt(struct sock *sk, int level, int optname,
22 char __user *optval, int __user *optlen); 22 char __user *optval, int __user *optlen);
23#endif 23#endif
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 090675e269ee..716153941fc4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1281,7 +1281,7 @@ int ip6mr_sk_done(struct sock *sk)
1281 * MOSPF/PIM router set up we can clean this up. 1281 * MOSPF/PIM router set up we can clean this up.
1282 */ 1282 */
1283 1283
1284int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen) 1284int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1285{ 1285{
1286 int ret; 1286 int ret;
1287 struct mif6ctl vif; 1287 struct mif6ctl vif;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index f5e0682b402d..14f54eb5a7fc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -123,7 +123,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
123} 123}
124 124
125static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, 125static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
126 char __user *optval, int optlen) 126 char __user *optval, unsigned int optlen)
127{ 127{
128 struct ipv6_pinfo *np = inet6_sk(sk); 128 struct ipv6_pinfo *np = inet6_sk(sk);
129 struct net *net = sock_net(sk); 129 struct net *net = sock_net(sk);
@@ -773,7 +773,7 @@ e_inval:
773} 773}
774 774
775int ipv6_setsockopt(struct sock *sk, int level, int optname, 775int ipv6_setsockopt(struct sock *sk, int level, int optname,
776 char __user *optval, int optlen) 776 char __user *optval, unsigned int optlen)
777{ 777{
778 int err; 778 int err;
779 779
@@ -801,7 +801,7 @@ EXPORT_SYMBOL(ipv6_setsockopt);
801 801
802#ifdef CONFIG_COMPAT 802#ifdef CONFIG_COMPAT
803int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, 803int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
804 char __user *optval, int optlen) 804 char __user *optval, unsigned int optlen)
805{ 805{
806 int err; 806 int err;
807 807
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 498b9b0b0fad..f74e4e2cdd06 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -658,7 +658,6 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
658 &icmp6h, NULL, 658 &icmp6h, NULL,
659 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0); 659 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0);
660} 660}
661EXPORT_SYMBOL(ndisc_send_rs);
662 661
663 662
664static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb) 663static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 7d675b8d82d3..4f24570b0869 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -957,7 +957,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
957 957
958 958
959static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, 959static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
960 char __user *optval, int optlen) 960 char __user *optval, unsigned int optlen)
961{ 961{
962 struct raw6_sock *rp = raw6_sk(sk); 962 struct raw6_sock *rp = raw6_sk(sk);
963 int val; 963 int val;
@@ -1000,7 +1000,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
1000} 1000}
1001 1001
1002static int rawv6_setsockopt(struct sock *sk, int level, int optname, 1002static int rawv6_setsockopt(struct sock *sk, int level, int optname,
1003 char __user *optval, int optlen) 1003 char __user *optval, unsigned int optlen)
1004{ 1004{
1005 switch(level) { 1005 switch(level) {
1006 case SOL_RAW: 1006 case SOL_RAW:
@@ -1024,7 +1024,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
1024 1024
1025#ifdef CONFIG_COMPAT 1025#ifdef CONFIG_COMPAT
1026static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, 1026static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
1027 char __user *optval, int optlen) 1027 char __user *optval, unsigned int optlen)
1028{ 1028{
1029 switch (level) { 1029 switch (level) {
1030 case SOL_RAW: 1030 case SOL_RAW:
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index fcb539628847..dbd19a78ca73 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -15,7 +15,6 @@
15 * Roger Venning <r.venning@telstra.com>: 6to4 support 15 * Roger Venning <r.venning@telstra.com>: 6to4 support
16 * Nate Thompson <nate@thebog.net>: 6to4 support 16 * Nate Thompson <nate@thebog.net>: 6to4 support
17 * Fred Templin <fred.l.templin@boeing.com>: isatap support 17 * Fred Templin <fred.l.templin@boeing.com>: isatap support
18 * Sascha Hlusiak <mail@saschahlusiak.de>: stateless autoconf for isatap
19 */ 18 */
20 19
21#include <linux/module.h> 20#include <linux/module.h>
@@ -223,44 +222,6 @@ failed:
223 return NULL; 222 return NULL;
224} 223}
225 224
226static void ipip6_tunnel_rs_timer(unsigned long data)
227{
228 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *) data;
229 struct inet6_dev *ifp;
230 struct inet6_ifaddr *addr;
231
232 spin_lock(&p->lock);
233 ifp = __in6_dev_get(p->tunnel->dev);
234
235 read_lock_bh(&ifp->lock);
236 for (addr = ifp->addr_list; addr; addr = addr->if_next) {
237 struct in6_addr rtr;
238
239 if (!(ipv6_addr_type(&addr->addr) & IPV6_ADDR_LINKLOCAL))
240 continue;
241
242 /* Send RS to guessed linklocal address of router
243 *
244 * Better: send to ff02::2 encapsuled in unicast directly
245 * to router-v4 instead of guessing the v6 address.
246 *
247 * Cisco/Windows seem to not set the u/l bit correctly,
248 * so we won't guess right.
249 */
250 ipv6_addr_set(&rtr, htonl(0xFE800000), 0, 0, 0);
251 if (!__ipv6_isatap_ifid(rtr.s6_addr + 8,
252 p->addr)) {
253 ndisc_send_rs(p->tunnel->dev, &addr->addr, &rtr);
254 }
255 }
256 read_unlock_bh(&ifp->lock);
257
258 mod_timer(&p->rs_timer, jiffies + HZ * p->rs_delay);
259 spin_unlock(&p->lock);
260
261 return;
262}
263
264static struct ip_tunnel_prl_entry * 225static struct ip_tunnel_prl_entry *
265__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) 226__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
266{ 227{
@@ -313,13 +274,12 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
313 274
314 c = 0; 275 c = 0;
315 for (prl = t->prl; prl; prl = prl->next) { 276 for (prl = t->prl; prl; prl = prl->next) {
316 if (c > cmax) 277 if (c >= cmax)
317 break; 278 break;
318 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) 279 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
319 continue; 280 continue;
320 kp[c].addr = prl->addr; 281 kp[c].addr = prl->addr;
321 kp[c].flags = prl->flags; 282 kp[c].flags = prl->flags;
322 kp[c].rs_delay = prl->rs_delay;
323 c++; 283 c++;
324 if (kprl.addr != htonl(INADDR_ANY)) 284 if (kprl.addr != htonl(INADDR_ANY))
325 break; 285 break;
@@ -369,23 +329,11 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
369 } 329 }
370 330
371 p->next = t->prl; 331 p->next = t->prl;
372 p->tunnel = t;
373 t->prl = p; 332 t->prl = p;
374 t->prl_count++; 333 t->prl_count++;
375
376 spin_lock_init(&p->lock);
377 setup_timer(&p->rs_timer, ipip6_tunnel_rs_timer, (unsigned long) p);
378update: 334update:
379 p->addr = a->addr; 335 p->addr = a->addr;
380 p->flags = a->flags; 336 p->flags = a->flags;
381 p->rs_delay = a->rs_delay;
382 if (p->rs_delay == 0)
383 p->rs_delay = IPTUNNEL_RS_DEFAULT_DELAY;
384 spin_lock(&p->lock);
385 del_timer(&p->rs_timer);
386 if (p->flags & PRL_DEFAULT)
387 mod_timer(&p->rs_timer, jiffies + 1);
388 spin_unlock(&p->lock);
389out: 337out:
390 write_unlock(&ipip6_lock); 338 write_unlock(&ipip6_lock);
391 return err; 339 return err;
@@ -404,9 +352,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
404 if ((*p)->addr == a->addr) { 352 if ((*p)->addr == a->addr) {
405 x = *p; 353 x = *p;
406 *p = x->next; 354 *p = x->next;
407 spin_lock(&x->lock);
408 del_timer(&x->rs_timer);
409 spin_unlock(&x->lock);
410 kfree(x); 355 kfree(x);
411 t->prl_count--; 356 t->prl_count--;
412 goto out; 357 goto out;
@@ -417,9 +362,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
417 while (t->prl) { 362 while (t->prl) {
418 x = t->prl; 363 x = t->prl;
419 t->prl = t->prl->next; 364 t->prl = t->prl->next;
420 spin_lock(&x->lock);
421 del_timer(&x->rs_timer);
422 spin_unlock(&x->lock);
423 kfree(x); 365 kfree(x);
424 t->prl_count--; 366 t->prl_count--;
425 } 367 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b265b7047d3e..3a60f12b34ed 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1044,7 +1044,7 @@ void udpv6_destroy_sock(struct sock *sk)
1044 * Socket option code for UDP 1044 * Socket option code for UDP
1045 */ 1045 */
1046int udpv6_setsockopt(struct sock *sk, int level, int optname, 1046int udpv6_setsockopt(struct sock *sk, int level, int optname,
1047 char __user *optval, int optlen) 1047 char __user *optval, unsigned int optlen)
1048{ 1048{
1049 if (level == SOL_UDP || level == SOL_UDPLITE) 1049 if (level == SOL_UDP || level == SOL_UDPLITE)
1050 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1050 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
@@ -1054,7 +1054,7 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname,
1054 1054
1055#ifdef CONFIG_COMPAT 1055#ifdef CONFIG_COMPAT
1056int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 1056int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1057 char __user *optval, int optlen) 1057 char __user *optval, unsigned int optlen)
1058{ 1058{
1059 if (level == SOL_UDP || level == SOL_UDPLITE) 1059 if (level == SOL_UDP || level == SOL_UDPLITE)
1060 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1060 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 6bb303471e20..d7571046bfc4 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -16,10 +16,10 @@ extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
16extern int udpv6_getsockopt(struct sock *sk, int level, int optname, 16extern int udpv6_getsockopt(struct sock *sk, int level, int optname,
17 char __user *optval, int __user *optlen); 17 char __user *optval, int __user *optlen);
18extern int udpv6_setsockopt(struct sock *sk, int level, int optname, 18extern int udpv6_setsockopt(struct sock *sk, int level, int optname,
19 char __user *optval, int optlen); 19 char __user *optval, unsigned int optlen);
20#ifdef CONFIG_COMPAT 20#ifdef CONFIG_COMPAT
21extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 21extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
22 char __user *optval, int optlen); 22 char __user *optval, unsigned int optlen);
23extern int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 23extern int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
24 char __user *optval, int __user *optlen); 24 char __user *optval, int __user *optlen);
25#endif 25#endif
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index f1118d92a191..66c7a20011f3 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1292,7 +1292,7 @@ const char *ipx_device_name(struct ipx_interface *intrfc)
1292 * socket object. */ 1292 * socket object. */
1293 1293
1294static int ipx_setsockopt(struct socket *sock, int level, int optname, 1294static int ipx_setsockopt(struct socket *sock, int level, int optname,
1295 char __user *optval, int optlen) 1295 char __user *optval, unsigned int optlen)
1296{ 1296{
1297 struct sock *sk = sock->sk; 1297 struct sock *sk = sock->sk;
1298 int opt; 1298 int opt;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 50b43c57d5d8..dd35641835f4 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1826,7 +1826,7 @@ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
1826 * 1826 *
1827 */ 1827 */
1828static int irda_setsockopt(struct socket *sock, int level, int optname, 1828static int irda_setsockopt(struct socket *sock, int level, int optname,
1829 char __user *optval, int optlen) 1829 char __user *optval, unsigned int optlen)
1830{ 1830{
1831 struct sock *sk = sock->sk; 1831 struct sock *sk = sock->sk;
1832 struct irda_sock *self = irda_sk(sk); 1832 struct irda_sock *self = irda_sk(sk);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d985d163dcfc..bada1b9c670b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1387,7 +1387,7 @@ static int iucv_sock_release(struct socket *sock)
1387 1387
1388/* getsockopt and setsockopt */ 1388/* getsockopt and setsockopt */
1389static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1389static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1390 char __user *optval, int optlen) 1390 char __user *optval, unsigned int optlen)
1391{ 1391{
1392 struct sock *sk = sock->sk; 1392 struct sock *sk = sock->sk;
1393 struct iucv_sock *iucv = iucv_sk(sk); 1393 struct iucv_sock *iucv = iucv_sk(sk);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c45eee1c0e8d..7aa4fd170104 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -973,7 +973,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
973 * Set various connection specific parameters. 973 * Set various connection specific parameters.
974 */ 974 */
975static int llc_ui_setsockopt(struct socket *sock, int level, int optname, 975static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
976 char __user *optval, int optlen) 976 char __user *optval, unsigned int optlen)
977{ 977{
978 struct sock *sk = sock->sk; 978 struct sock *sk = sock->sk;
979 struct llc_sock *llc = llc_sk(sk); 979 struct llc_sock *llc = llc_sk(sk);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 97a278a2f48e..8d26e9bf8964 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1388,8 +1388,8 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1388 1388
1389 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1389 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1390 1390
1391 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", 1391 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1392 sdata->dev->name, reason_code); 1392 sdata->dev->name, mgmt->sa, reason_code);
1393 1393
1394 ieee80211_set_disassoc(sdata, false); 1394 ieee80211_set_disassoc(sdata, false);
1395 return RX_MGMT_CFG80211_DISASSOC; 1395 return RX_MGMT_CFG80211_DISASSOC;
@@ -1675,7 +1675,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1675 1675
1676 /* direct probe may be part of the association flow */ 1676 /* direct probe may be part of the association flow */
1677 if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) { 1677 if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
1678 printk(KERN_DEBUG "%s direct probe responded\n", 1678 printk(KERN_DEBUG "%s: direct probe responded\n",
1679 sdata->dev->name); 1679 sdata->dev->name);
1680 wk->tries = 0; 1680 wk->tries = 0;
1681 wk->state = IEEE80211_MGD_STATE_AUTH; 1681 wk->state = IEEE80211_MGD_STATE_AUTH;
@@ -2502,9 +2502,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2502 struct ieee80211_mgd_work *wk; 2502 struct ieee80211_mgd_work *wk;
2503 const u8 *bssid = NULL; 2503 const u8 *bssid = NULL;
2504 2504
2505 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
2506 sdata->dev->name, req->reason_code);
2507
2508 mutex_lock(&ifmgd->mtx); 2505 mutex_lock(&ifmgd->mtx);
2509 2506
2510 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { 2507 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) {
@@ -2532,6 +2529,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2532 2529
2533 mutex_unlock(&ifmgd->mtx); 2530 mutex_unlock(&ifmgd->mtx);
2534 2531
2532 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2533 sdata->dev->name, bssid, req->reason_code);
2534
2535 ieee80211_send_deauth_disassoc(sdata, bssid, 2535 ieee80211_send_deauth_disassoc(sdata, bssid,
2536 IEEE80211_STYPE_DEAUTH, req->reason_code, 2536 IEEE80211_STYPE_DEAUTH, req->reason_code,
2537 cookie); 2537 cookie);
@@ -2545,9 +2545,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2545{ 2545{
2546 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2546 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2547 2547
2548 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
2549 sdata->dev->name, req->reason_code);
2550
2551 mutex_lock(&ifmgd->mtx); 2548 mutex_lock(&ifmgd->mtx);
2552 2549
2553 /* 2550 /*
@@ -2561,6 +2558,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2561 return -ENOLINK; 2558 return -ENOLINK;
2562 } 2559 }
2563 2560
2561 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2562 sdata->dev->name, req->bss->bssid, req->reason_code);
2563
2564 ieee80211_set_disassoc(sdata, false); 2564 ieee80211_set_disassoc(sdata, false);
2565 2565
2566 mutex_unlock(&ifmgd->mtx); 2566 mutex_unlock(&ifmgd->mtx);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5143d203256b..fd4028296613 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -367,7 +367,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
368 u32 staflags; 368 u32 staflags;
369 369
370 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control))) 370 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)
371 || ieee80211_is_auth(hdr->frame_control)
372 || ieee80211_is_assoc_resp(hdr->frame_control)
373 || ieee80211_is_reassoc_resp(hdr->frame_control)))
371 return TX_CONTINUE; 374 return TX_CONTINUE;
372 375
373 staflags = get_sta_flags(sta); 376 staflags = get_sta_flags(sta);
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 8ab829f86574..f042ae521557 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -113,7 +113,7 @@ static int nf_sockopt(struct sock *sk, u_int8_t pf, int val,
113} 113}
114 114
115int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, 115int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
116 int len) 116 unsigned int len)
117{ 117{
118 return nf_sockopt(sk, pf, val, opt, &len, 0); 118 return nf_sockopt(sk, pf, val, opt, &len, 0);
119} 119}
@@ -154,7 +154,7 @@ static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val,
154} 154}
155 155
156int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, 156int compat_nf_setsockopt(struct sock *sk, u_int8_t pf,
157 int val, char __user *opt, int len) 157 int val, char __user *opt, unsigned int len)
158{ 158{
159 return compat_nf_sockopt(sk, pf, val, opt, &len, 0); 159 return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
160} 160}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4bafbf15097..19e98007691c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1150,7 +1150,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk,
1150} 1150}
1151 1151
1152static int netlink_setsockopt(struct socket *sock, int level, int optname, 1152static int netlink_setsockopt(struct socket *sock, int level, int optname,
1153 char __user *optval, int optlen) 1153 char __user *optval, unsigned int optlen)
1154{ 1154{
1155 struct sock *sk = sock->sk; 1155 struct sock *sk = sock->sk;
1156 struct netlink_sock *nlk = nlk_sk(sk); 1156 struct netlink_sock *nlk = nlk_sk(sk);
@@ -1788,7 +1788,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1788 } 1788 }
1789 1789
1790 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 1790 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1791 NLMSG_ERROR, sizeof(struct nlmsgerr), 0); 1791 NLMSG_ERROR, payload, 0);
1792 errmsg = nlmsg_data(rep); 1792 errmsg = nlmsg_data(rep);
1793 errmsg->error = err; 1793 errmsg->error = err;
1794 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); 1794 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ce1a34b99c23..7a834952f67f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -301,7 +301,7 @@ void nr_destroy_socket(struct sock *sk)
301 */ 301 */
302 302
303static int nr_setsockopt(struct socket *sock, int level, int optname, 303static int nr_setsockopt(struct socket *sock, int level, int optname,
304 char __user *optval, int optlen) 304 char __user *optval, unsigned int optlen)
305{ 305{
306 struct sock *sk = sock->sk; 306 struct sock *sk = sock->sk;
307 struct nr_sock *nr = nr_sk(sk); 307 struct nr_sock *nr = nr_sk(sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d3d52c66cdc2..d7ecca0a0c07 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1701,7 +1701,7 @@ static void packet_flush_mclist(struct sock *sk)
1701} 1701}
1702 1702
1703static int 1703static int
1704packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1704packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1705{ 1705{
1706 struct sock *sk = sock->sk; 1706 struct sock *sk = sock->sk;
1707 struct packet_sock *po = pkt_sk(sk); 1707 struct packet_sock *po = pkt_sk(sk);
@@ -2084,7 +2084,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
2084 atomic_dec(&pkt_sk(sk)->mapped); 2084 atomic_dec(&pkt_sk(sk)->mapped);
2085} 2085}
2086 2086
2087static struct vm_operations_struct packet_mmap_ops = { 2087static const struct vm_operations_struct packet_mmap_ops = {
2088 .open = packet_mm_open, 2088 .open = packet_mm_open,
2089 .close = packet_mm_close, 2089 .close = packet_mm_close,
2090}; 2090};
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b8252d289cd7..5f32d217535b 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -742,7 +742,7 @@ static int pep_init(struct sock *sk)
742} 742}
743 743
744static int pep_setsockopt(struct sock *sk, int level, int optname, 744static int pep_setsockopt(struct sock *sk, int level, int optname,
745 char __user *optval, int optlen) 745 char __user *optval, unsigned int optlen)
746{ 746{
747 struct pep_sock *pn = pep_sk(sk); 747 struct pep_sock *pn = pep_sk(sk);
748 int val = 0, err = 0; 748 int val = 0, err = 0;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 07aa9f08d5fb..aa5b5a972bff 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -407,7 +407,6 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport)
407 return -EADDRINUSE; 407 return -EADDRINUSE;
408 408
409found: 409found:
410 mutex_unlock(&port_mutex);
411 pn->sobject = pn_object(pn_addr(pn->sobject), sport); 410 pn->sobject = pn_object(pn_addr(pn->sobject), sport);
412 return 0; 411 return 0;
413} 412}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 6b58aeff4c7a..98e05382fd3c 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -248,7 +248,7 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
248} 248}
249 249
250static int rds_setsockopt(struct socket *sock, int level, int optname, 250static int rds_setsockopt(struct socket *sock, int level, int optname,
251 char __user *optval, int optlen) 251 char __user *optval, unsigned int optlen)
252{ 252{
253 struct rds_sock *rs = rds_sk_to_rs(sock->sk); 253 struct rds_sock *rs = rds_sk_to_rs(sock->sk);
254 int ret; 254 int ret;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 1e166c9685aa..502cce76621d 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -370,7 +370,7 @@ void rose_destroy_socket(struct sock *sk)
370 */ 370 */
371 371
372static int rose_setsockopt(struct socket *sock, int level, int optname, 372static int rose_setsockopt(struct socket *sock, int level, int optname,
373 char __user *optval, int optlen) 373 char __user *optval, unsigned int optlen)
374{ 374{
375 struct sock *sk = sock->sk; 375 struct sock *sk = sock->sk;
376 struct rose_sock *rose = rose_sk(sk); 376 struct rose_sock *rose = rose_sk(sk);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index bfe493ebf27c..a86afceaa94f 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -507,7 +507,7 @@ out:
507 * set RxRPC socket options 507 * set RxRPC socket options
508 */ 508 */
509static int rxrpc_setsockopt(struct socket *sock, int level, int optname, 509static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
510 char __user *optval, int optlen) 510 char __user *optval, unsigned int optlen)
511{ 511{
512 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 512 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
513 unsigned min_sec_level; 513 unsigned min_sec_level;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 89af37a6c871..c8d05758661d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2027,7 +2027,8 @@ out:
2027 * instead a error will be indicated to the user. 2027 * instead a error will be indicated to the user.
2028 */ 2028 */
2029static int sctp_setsockopt_disable_fragments(struct sock *sk, 2029static int sctp_setsockopt_disable_fragments(struct sock *sk,
2030 char __user *optval, int optlen) 2030 char __user *optval,
2031 unsigned int optlen)
2031{ 2032{
2032 int val; 2033 int val;
2033 2034
@@ -2043,7 +2044,7 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
2043} 2044}
2044 2045
2045static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2046static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2046 int optlen) 2047 unsigned int optlen)
2047{ 2048{
2048 if (optlen > sizeof(struct sctp_event_subscribe)) 2049 if (optlen > sizeof(struct sctp_event_subscribe))
2049 return -EINVAL; 2050 return -EINVAL;
@@ -2064,7 +2065,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2064 * association is closed. 2065 * association is closed.
2065 */ 2066 */
2066static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2067static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2067 int optlen) 2068 unsigned int optlen)
2068{ 2069{
2069 struct sctp_sock *sp = sctp_sk(sk); 2070 struct sctp_sock *sp = sctp_sk(sk);
2070 2071
@@ -2318,7 +2319,8 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2318} 2319}
2319 2320
2320static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2321static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2321 char __user *optval, int optlen) 2322 char __user *optval,
2323 unsigned int optlen)
2322{ 2324{
2323 struct sctp_paddrparams params; 2325 struct sctp_paddrparams params;
2324 struct sctp_transport *trans = NULL; 2326 struct sctp_transport *trans = NULL;
@@ -2430,7 +2432,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2430 */ 2432 */
2431 2433
2432static int sctp_setsockopt_delayed_ack(struct sock *sk, 2434static int sctp_setsockopt_delayed_ack(struct sock *sk,
2433 char __user *optval, int optlen) 2435 char __user *optval, unsigned int optlen)
2434{ 2436{
2435 struct sctp_sack_info params; 2437 struct sctp_sack_info params;
2436 struct sctp_transport *trans = NULL; 2438 struct sctp_transport *trans = NULL;
@@ -2546,7 +2548,7 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
2546 * by the change). With TCP-style sockets, this option is inherited by 2548 * by the change). With TCP-style sockets, this option is inherited by
2547 * sockets derived from a listener socket. 2549 * sockets derived from a listener socket.
2548 */ 2550 */
2549static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int optlen) 2551static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2550{ 2552{
2551 struct sctp_initmsg sinit; 2553 struct sctp_initmsg sinit;
2552 struct sctp_sock *sp = sctp_sk(sk); 2554 struct sctp_sock *sp = sctp_sk(sk);
@@ -2583,7 +2585,8 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int opt
2583 * to this call if the caller is using the UDP model. 2585 * to this call if the caller is using the UDP model.
2584 */ 2586 */
2585static int sctp_setsockopt_default_send_param(struct sock *sk, 2587static int sctp_setsockopt_default_send_param(struct sock *sk,
2586 char __user *optval, int optlen) 2588 char __user *optval,
2589 unsigned int optlen)
2587{ 2590{
2588 struct sctp_sndrcvinfo info; 2591 struct sctp_sndrcvinfo info;
2589 struct sctp_association *asoc; 2592 struct sctp_association *asoc;
@@ -2622,7 +2625,7 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
2622 * association peer's addresses. 2625 * association peer's addresses.
2623 */ 2626 */
2624static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2627static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2625 int optlen) 2628 unsigned int optlen)
2626{ 2629{
2627 struct sctp_prim prim; 2630 struct sctp_prim prim;
2628 struct sctp_transport *trans; 2631 struct sctp_transport *trans;
@@ -2651,7 +2654,7 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2651 * integer boolean flag. 2654 * integer boolean flag.
2652 */ 2655 */
2653static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2656static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2654 int optlen) 2657 unsigned int optlen)
2655{ 2658{
2656 int val; 2659 int val;
2657 2660
@@ -2676,7 +2679,8 @@ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2676 * be changed. 2679 * be changed.
2677 * 2680 *
2678 */ 2681 */
2679static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int optlen) { 2682static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2683{
2680 struct sctp_rtoinfo rtoinfo; 2684 struct sctp_rtoinfo rtoinfo;
2681 struct sctp_association *asoc; 2685 struct sctp_association *asoc;
2682 2686
@@ -2728,7 +2732,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt
2728 * See [SCTP] for more information. 2732 * See [SCTP] for more information.
2729 * 2733 *
2730 */ 2734 */
2731static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int optlen) 2735static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
2732{ 2736{
2733 2737
2734 struct sctp_assocparams assocparams; 2738 struct sctp_assocparams assocparams;
@@ -2800,7 +2804,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o
2800 * addresses and a user will receive both PF_INET6 and PF_INET type 2804 * addresses and a user will receive both PF_INET6 and PF_INET type
2801 * addresses on the socket. 2805 * addresses on the socket.
2802 */ 2806 */
2803static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int optlen) 2807static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
2804{ 2808{
2805 int val; 2809 int val;
2806 struct sctp_sock *sp = sctp_sk(sk); 2810 struct sctp_sock *sp = sctp_sk(sk);
@@ -2844,7 +2848,7 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int op
2844 * changed (effecting future associations only). 2848 * changed (effecting future associations only).
2845 * assoc_value: This parameter specifies the maximum size in bytes. 2849 * assoc_value: This parameter specifies the maximum size in bytes.
2846 */ 2850 */
2847static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optlen) 2851static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
2848{ 2852{
2849 struct sctp_assoc_value params; 2853 struct sctp_assoc_value params;
2850 struct sctp_association *asoc; 2854 struct sctp_association *asoc;
@@ -2899,7 +2903,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
2899 * set primary request: 2903 * set primary request:
2900 */ 2904 */
2901static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 2905static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
2902 int optlen) 2906 unsigned int optlen)
2903{ 2907{
2904 struct sctp_sock *sp; 2908 struct sctp_sock *sp;
2905 struct sctp_endpoint *ep; 2909 struct sctp_endpoint *ep;
@@ -2950,7 +2954,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2950} 2954}
2951 2955
2952static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 2956static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
2953 int optlen) 2957 unsigned int optlen)
2954{ 2958{
2955 struct sctp_setadaptation adaptation; 2959 struct sctp_setadaptation adaptation;
2956 2960
@@ -2979,7 +2983,7 @@ static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval
2979 * saved with outbound messages. 2983 * saved with outbound messages.
2980 */ 2984 */
2981static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 2985static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
2982 int optlen) 2986 unsigned int optlen)
2983{ 2987{
2984 struct sctp_assoc_value params; 2988 struct sctp_assoc_value params;
2985 struct sctp_sock *sp; 2989 struct sctp_sock *sp;
@@ -3030,7 +3034,7 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3030 */ 3034 */
3031static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3035static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3032 char __user *optval, 3036 char __user *optval,
3033 int optlen) 3037 unsigned int optlen)
3034{ 3038{
3035 int val; 3039 int val;
3036 3040
@@ -3063,7 +3067,7 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3063 */ 3067 */
3064static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3068static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3065 char __user *optval, 3069 char __user *optval,
3066 int optlen) 3070 unsigned int optlen)
3067{ 3071{
3068 u32 val; 3072 u32 val;
3069 3073
@@ -3096,7 +3100,7 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3096 */ 3100 */
3097static int sctp_setsockopt_maxburst(struct sock *sk, 3101static int sctp_setsockopt_maxburst(struct sock *sk,
3098 char __user *optval, 3102 char __user *optval,
3099 int optlen) 3103 unsigned int optlen)
3100{ 3104{
3101 struct sctp_assoc_value params; 3105 struct sctp_assoc_value params;
3102 struct sctp_sock *sp; 3106 struct sctp_sock *sp;
@@ -3140,8 +3144,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
3140 * will only effect future associations on the socket. 3144 * will only effect future associations on the socket.
3141 */ 3145 */
3142static int sctp_setsockopt_auth_chunk(struct sock *sk, 3146static int sctp_setsockopt_auth_chunk(struct sock *sk,
3143 char __user *optval, 3147 char __user *optval,
3144 int optlen) 3148 unsigned int optlen)
3145{ 3149{
3146 struct sctp_authchunk val; 3150 struct sctp_authchunk val;
3147 3151
@@ -3172,8 +3176,8 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
3172 * endpoint requires the peer to use. 3176 * endpoint requires the peer to use.
3173 */ 3177 */
3174static int sctp_setsockopt_hmac_ident(struct sock *sk, 3178static int sctp_setsockopt_hmac_ident(struct sock *sk,
3175 char __user *optval, 3179 char __user *optval,
3176 int optlen) 3180 unsigned int optlen)
3177{ 3181{
3178 struct sctp_hmacalgo *hmacs; 3182 struct sctp_hmacalgo *hmacs;
3179 u32 idents; 3183 u32 idents;
@@ -3215,7 +3219,7 @@ out:
3215 */ 3219 */
3216static int sctp_setsockopt_auth_key(struct sock *sk, 3220static int sctp_setsockopt_auth_key(struct sock *sk,
3217 char __user *optval, 3221 char __user *optval,
3218 int optlen) 3222 unsigned int optlen)
3219{ 3223{
3220 struct sctp_authkey *authkey; 3224 struct sctp_authkey *authkey;
3221 struct sctp_association *asoc; 3225 struct sctp_association *asoc;
@@ -3260,8 +3264,8 @@ out:
3260 * the association shared key. 3264 * the association shared key.
3261 */ 3265 */
3262static int sctp_setsockopt_active_key(struct sock *sk, 3266static int sctp_setsockopt_active_key(struct sock *sk,
3263 char __user *optval, 3267 char __user *optval,
3264 int optlen) 3268 unsigned int optlen)
3265{ 3269{
3266 struct sctp_authkeyid val; 3270 struct sctp_authkeyid val;
3267 struct sctp_association *asoc; 3271 struct sctp_association *asoc;
@@ -3288,8 +3292,8 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3288 * This set option will delete a shared secret key from use. 3292 * This set option will delete a shared secret key from use.
3289 */ 3293 */
3290static int sctp_setsockopt_del_key(struct sock *sk, 3294static int sctp_setsockopt_del_key(struct sock *sk,
3291 char __user *optval, 3295 char __user *optval,
3292 int optlen) 3296 unsigned int optlen)
3293{ 3297{
3294 struct sctp_authkeyid val; 3298 struct sctp_authkeyid val;
3295 struct sctp_association *asoc; 3299 struct sctp_association *asoc;
@@ -3332,7 +3336,7 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3332 * optlen - the size of the buffer. 3336 * optlen - the size of the buffer.
3333 */ 3337 */
3334SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, 3338SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3335 char __user *optval, int optlen) 3339 char __user *optval, unsigned int optlen)
3336{ 3340{
3337 int retval = 0; 3341 int retval = 0;
3338 3342
diff --git a/net/socket.c b/net/socket.c
index 49917a1cac7d..75655365b5fd 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2098,12 +2098,17 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2098 unsigned long a[6]; 2098 unsigned long a[6];
2099 unsigned long a0, a1; 2099 unsigned long a0, a1;
2100 int err; 2100 int err;
2101 unsigned int len;
2101 2102
2102 if (call < 1 || call > SYS_ACCEPT4) 2103 if (call < 1 || call > SYS_ACCEPT4)
2103 return -EINVAL; 2104 return -EINVAL;
2104 2105
2106 len = nargs[call];
2107 if (len > sizeof(a))
2108 return -EINVAL;
2109
2105 /* copy_from_user should be SMP safe. */ 2110 /* copy_from_user should be SMP safe. */
2106 if (copy_from_user(a, args, nargs[call])) 2111 if (copy_from_user(a, args, len))
2107 return -EFAULT; 2112 return -EFAULT;
2108 2113
2109 audit_socketcall(nargs[call] / sizeof(unsigned long), a); 2114 audit_socketcall(nargs[call] / sizeof(unsigned long), a);
@@ -2386,7 +2391,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
2386} 2391}
2387 2392
2388int kernel_setsockopt(struct socket *sock, int level, int optname, 2393int kernel_setsockopt(struct socket *sock, int level, int optname,
2389 char *optval, int optlen) 2394 char *optval, unsigned int optlen)
2390{ 2395{
2391 mm_segment_t oldfs = get_fs(); 2396 mm_segment_t oldfs = get_fs();
2392 int err; 2397 int err;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e8254e809b79..e6d9abf7440e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1658,7 +1658,7 @@ restart:
1658 */ 1658 */
1659 1659
1660static int setsockopt(struct socket *sock, 1660static int setsockopt(struct socket *sock,
1661 int lvl, int opt, char __user *ov, int ol) 1661 int lvl, int opt, char __user *ov, unsigned int ol)
1662{ 1662{
1663 struct sock *sk = sock->sk; 1663 struct sock *sk = sock->sk;
1664 struct tipc_port *tport = tipc_sk_port(sk); 1664 struct tipc_port *tport = tipc_sk_port(sk);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 7fae7eee65de..93c3ed329204 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -762,9 +762,8 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
762 wdev->conn->params.ssid = wdev->ssid; 762 wdev->conn->params.ssid = wdev->ssid;
763 wdev->conn->params.ssid_len = connect->ssid_len; 763 wdev->conn->params.ssid_len = connect->ssid_len;
764 764
765 /* don't care about result -- but fill bssid & channel */ 765 /* see if we have the bss already */
766 if (!wdev->conn->params.bssid || !wdev->conn->params.channel) 766 bss = cfg80211_get_conn_bss(wdev);
767 bss = cfg80211_get_conn_bss(wdev);
768 767
769 wdev->sme_state = CFG80211_SME_CONNECTING; 768 wdev->sme_state = CFG80211_SME_CONNECTING;
770 wdev->connect_keys = connkeys; 769 wdev->connect_keys = connkeys;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index bf725275eb8d..5615a8802536 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -30,7 +30,8 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
30 if (wdev->wext.keys) { 30 if (wdev->wext.keys) {
31 wdev->wext.keys->def = wdev->wext.default_key; 31 wdev->wext.keys->def = wdev->wext.default_key;
32 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; 32 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
33 wdev->wext.connect.privacy = true; 33 if (wdev->wext.default_key != -1)
34 wdev->wext.connect.privacy = true;
34 } 35 }
35 36
36 if (!wdev->wext.connect.ssid_len) 37 if (!wdev->wext.connect.ssid_len)
@@ -229,8 +230,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
229 data->flags = 1; 230 data->flags = 1;
230 data->length = wdev->wext.connect.ssid_len; 231 data->length = wdev->wext.connect.ssid_len;
231 memcpy(ssid, wdev->wext.connect.ssid, data->length); 232 memcpy(ssid, wdev->wext.connect.ssid, data->length);
232 } else 233 }
233 data->flags = 0;
234 wdev_unlock(wdev); 234 wdev_unlock(wdev);
235 235
236 return 0; 236 return 0;
@@ -306,8 +306,6 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev,
306 wdev_lock(wdev); 306 wdev_lock(wdev);
307 if (wdev->current_bss) 307 if (wdev->current_bss)
308 memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); 308 memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
309 else if (wdev->wext.connect.bssid)
310 memcpy(ap_addr->sa_data, wdev->wext.connect.bssid, ETH_ALEN);
311 else 309 else
312 memset(ap_addr->sa_data, 0, ETH_ALEN); 310 memset(ap_addr->sa_data, 0, ETH_ALEN);
313 wdev_unlock(wdev); 311 wdev_unlock(wdev);
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 5b4a0cee4418..60fe57761ca9 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -470,7 +470,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
470/* 470/*
471 * Get statistics out of the driver 471 * Get statistics out of the driver
472 */ 472 */
473static struct iw_statistics *get_wireless_stats(struct net_device *dev) 473struct iw_statistics *get_wireless_stats(struct net_device *dev)
474{ 474{
475 /* New location */ 475 /* New location */
476 if ((dev->wireless_handlers != NULL) && 476 if ((dev->wireless_handlers != NULL) &&
@@ -773,10 +773,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
773 essid_compat = 1; 773 essid_compat = 1;
774 else if (IW_IS_SET(cmd) && (iwp->length != 0)) { 774 else if (IW_IS_SET(cmd) && (iwp->length != 0)) {
775 char essid[IW_ESSID_MAX_SIZE + 1]; 775 char essid[IW_ESSID_MAX_SIZE + 1];
776 unsigned int len;
777 len = iwp->length * descr->token_size;
776 778
777 err = copy_from_user(essid, iwp->pointer, 779 if (len > IW_ESSID_MAX_SIZE)
778 iwp->length * 780 return -EFAULT;
779 descr->token_size); 781
782 err = copy_from_user(essid, iwp->pointer, len);
780 if (err) 783 if (err)
781 return -EFAULT; 784 return -EFAULT;
782 785
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5e6c072c64d3..7fa9c7ad3d3b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -409,7 +409,7 @@ static void x25_destroy_socket(struct sock *sk)
409 */ 409 */
410 410
411static int x25_setsockopt(struct socket *sock, int level, int optname, 411static int x25_setsockopt(struct socket *sock, int level, int optname,
412 char __user *optval, int optlen) 412 char __user *optval, unsigned int optlen)
413{ 413{
414 int opt; 414 int opt;
415 struct sock *sk = sock->sk; 415 struct sock *sk = sock->sk;
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
index 9cf80a11e8b6..26fab33ffa8c 100644
--- a/samples/tracepoints/tracepoint-sample.c
+++ b/samples/tracepoints/tracepoint-sample.c
@@ -28,7 +28,7 @@ static int my_open(struct inode *inode, struct file *file)
28 return -EPERM; 28 return -EPERM;
29} 29}
30 30
31static struct file_operations mark_ops = { 31static const struct file_operations mark_ops = {
32 .open = my_open, 32 .open = my_open,
33}; 33};
34 34
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 8e9777b76405..0c72c9c38956 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -43,7 +43,7 @@ static ssize_t ima_show_htable_violations(struct file *filp,
43 return ima_show_htable_value(buf, count, ppos, &ima_htable.violations); 43 return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
44} 44}
45 45
46static struct file_operations ima_htable_violations_ops = { 46static const struct file_operations ima_htable_violations_ops = {
47 .read = ima_show_htable_violations 47 .read = ima_show_htable_violations
48}; 48};
49 49
@@ -55,7 +55,7 @@ static ssize_t ima_show_measurements_count(struct file *filp,
55 55
56} 56}
57 57
58static struct file_operations ima_measurements_count_ops = { 58static const struct file_operations ima_measurements_count_ops = {
59 .read = ima_show_measurements_count 59 .read = ima_show_measurements_count
60}; 60};
61 61
@@ -158,7 +158,7 @@ static int ima_measurements_open(struct inode *inode, struct file *file)
158 return seq_open(file, &ima_measurments_seqops); 158 return seq_open(file, &ima_measurments_seqops);
159} 159}
160 160
161static struct file_operations ima_measurements_ops = { 161static const struct file_operations ima_measurements_ops = {
162 .open = ima_measurements_open, 162 .open = ima_measurements_open,
163 .read = seq_read, 163 .read = seq_read,
164 .llseek = seq_lseek, 164 .llseek = seq_lseek,
@@ -233,7 +233,7 @@ static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
233 return seq_open(file, &ima_ascii_measurements_seqops); 233 return seq_open(file, &ima_ascii_measurements_seqops);
234} 234}
235 235
236static struct file_operations ima_ascii_measurements_ops = { 236static const struct file_operations ima_ascii_measurements_ops = {
237 .open = ima_ascii_measurements_open, 237 .open = ima_ascii_measurements_open,
238 .read = seq_read, 238 .read = seq_read,
239 .llseek = seq_lseek, 239 .llseek = seq_lseek,
@@ -313,7 +313,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
313 return 0; 313 return 0;
314} 314}
315 315
316static struct file_operations ima_measure_policy_ops = { 316static const struct file_operations ima_measure_policy_ops = {
317 .open = ima_open_policy, 317 .open = ima_open_policy,
318 .write = ima_write_policy, 318 .write = ima_write_policy,
319 .release = ima_release_policy 319 .release = ima_release_policy
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 561d6d95a2d3..ab73edf2c89a 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2985,7 +2985,7 @@ static int snd_pcm_mmap_status_fault(struct vm_area_struct *area,
2985 return 0; 2985 return 0;
2986} 2986}
2987 2987
2988static struct vm_operations_struct snd_pcm_vm_ops_status = 2988static const struct vm_operations_struct snd_pcm_vm_ops_status =
2989{ 2989{
2990 .fault = snd_pcm_mmap_status_fault, 2990 .fault = snd_pcm_mmap_status_fault,
2991}; 2991};
@@ -3024,7 +3024,7 @@ static int snd_pcm_mmap_control_fault(struct vm_area_struct *area,
3024 return 0; 3024 return 0;
3025} 3025}
3026 3026
3027static struct vm_operations_struct snd_pcm_vm_ops_control = 3027static const struct vm_operations_struct snd_pcm_vm_ops_control =
3028{ 3028{
3029 .fault = snd_pcm_mmap_control_fault, 3029 .fault = snd_pcm_mmap_control_fault,
3030}; 3030};
@@ -3094,7 +3094,7 @@ static int snd_pcm_mmap_data_fault(struct vm_area_struct *area,
3094 return 0; 3094 return 0;
3095} 3095}
3096 3096
3097static struct vm_operations_struct snd_pcm_vm_ops_data = 3097static const struct vm_operations_struct snd_pcm_vm_ops_data =
3098{ 3098{
3099 .open = snd_pcm_mmap_data_open, 3099 .open = snd_pcm_mmap_data_open,
3100 .close = snd_pcm_mmap_data_close, 3100 .close = snd_pcm_mmap_data_close,
@@ -3118,7 +3118,7 @@ static int snd_pcm_default_mmap(struct snd_pcm_substream *substream,
3118 * mmap the DMA buffer on I/O memory area 3118 * mmap the DMA buffer on I/O memory area
3119 */ 3119 */
3120#if SNDRV_PCM_INFO_MMAP_IOMEM 3120#if SNDRV_PCM_INFO_MMAP_IOMEM
3121static struct vm_operations_struct snd_pcm_vm_ops_data_mmio = 3121static const struct vm_operations_struct snd_pcm_vm_ops_data_mmio =
3122{ 3122{
3123 .open = snd_pcm_mmap_data_open, 3123 .open = snd_pcm_mmap_data_open,
3124 .close = snd_pcm_mmap_data_close, 3124 .close = snd_pcm_mmap_data_close,
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index fd44946ce4b3..99f33766cd51 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -154,7 +154,7 @@ static void usb_stream_hwdep_vm_close(struct vm_area_struct *area)
154 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); 154 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
155} 155}
156 156
157static struct vm_operations_struct usb_stream_hwdep_vm_ops = { 157static const struct vm_operations_struct usb_stream_hwdep_vm_ops = {
158 .open = usb_stream_hwdep_vm_open, 158 .open = usb_stream_hwdep_vm_open,
159 .fault = usb_stream_hwdep_vm_fault, 159 .fault = usb_stream_hwdep_vm_fault,
160 .close = usb_stream_hwdep_vm_close, 160 .close = usb_stream_hwdep_vm_close,
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index f3d8f71265dd..52e04b2f35d3 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -53,7 +53,7 @@ static int snd_us428ctls_vm_fault(struct vm_area_struct *area,
53 return 0; 53 return 0;
54} 54}
55 55
56static struct vm_operations_struct us428ctls_vm_ops = { 56static const struct vm_operations_struct us428ctls_vm_ops = {
57 .fault = snd_us428ctls_vm_fault, 57 .fault = snd_us428ctls_vm_fault,
58}; 58};
59 59
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 117946f2debb..4b2304c2e02d 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -697,7 +697,7 @@ static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_area_struct *area,
697} 697}
698 698
699 699
700static struct vm_operations_struct snd_usX2Y_hwdep_pcm_vm_ops = { 700static const struct vm_operations_struct snd_usX2Y_hwdep_pcm_vm_ops = {
701 .open = snd_usX2Y_hwdep_pcm_vm_open, 701 .open = snd_usX2Y_hwdep_pcm_vm_open,
702 .close = snd_usX2Y_hwdep_pcm_vm_close, 702 .close = snd_usX2Y_hwdep_pcm_vm_close,
703 .fault = snd_usX2Y_hwdep_pcm_vm_fault, 703 .fault = snd_usX2Y_hwdep_pcm_vm_fault,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 034a798b0431..e79c54034bcd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1713,7 +1713,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1713 return 0; 1713 return 0;
1714} 1714}
1715 1715
1716static struct vm_operations_struct kvm_vcpu_vm_ops = { 1716static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1717 .fault = kvm_vcpu_fault, 1717 .fault = kvm_vcpu_fault,
1718}; 1718};
1719 1719
@@ -2317,7 +2317,7 @@ static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2317 return 0; 2317 return 0;
2318} 2318}
2319 2319
2320static struct vm_operations_struct kvm_vm_vm_ops = { 2320static const struct vm_operations_struct kvm_vm_vm_ops = {
2321 .fault = kvm_vm_fault, 2321 .fault = kvm_vm_fault,
2322}; 2322};
2323 2323
@@ -2625,7 +2625,7 @@ static int vcpu_stat_get(void *_offset, u64 *val)
2625 2625
2626DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2626DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2627 2627
2628static struct file_operations *stat_fops[] = { 2628static const struct file_operations *stat_fops[] = {
2629 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2629 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2630 [KVM_STAT_VM] = &vm_stat_fops, 2630 [KVM_STAT_VM] = &vm_stat_fops,
2631}; 2631};